| Backslash and newline ignored | \(1) |
++-------------------------+---------------------------------+-------+
+| ``\\`` | Backslash (``\``) | |
++-------------------------+---------------------------------+-------+
+| ``\'`` | Single quote (``'``) | |
++-------------------------+---------------------------------+-------+
+| ``\"`` | Double quote (``"``) | |
++-------------------------+---------------------------------+-------+
+| ``\a`` | ASCII Bell (BEL) | |
++-------------------------+---------------------------------+-------+
+| ``\b`` | ASCII Backspace (BS) | |
++-------------------------+---------------------------------+-------+
+| ``\f`` | ASCII Formfeed (FF) | |
++-------------------------+---------------------------------+-------+
+| ``\n`` | ASCII Linefeed (LF) | |
++-------------------------+---------------------------------+-------+
+| ``\r`` | ASCII Carriage Return (CR) | |
++-------------------------+---------------------------------+-------+
+| ``\t`` | ASCII Horizontal Tab (TAB) | |
++-------------------------+---------------------------------+-------+
+| ``\v`` | ASCII Vertical Tab (VT) | |
++-------------------------+---------------------------------+-------+
+| :samp:`\\{ooo}` | Character with octal value | (2,4) |
+| | *ooo* | |
++-------------------------+---------------------------------+-------+
+| :samp:`\\x{hh}` | Character with hex value *hh* | (3,4) |
++-------------------------+---------------------------------+-------+
Escape sequences only recognized in string literals are:
-+-----------------+---------------------------------+-------+
-| Escape Sequence | Meaning | Notes |
-+=================+=================================+=======+
-| ``\N{name}`` | Character named *name* in the | \(5) |
-| | Unicode database | |
-+-----------------+---------------------------------+-------+
-| ``\uxxxx`` | Character with 16-bit hex value | \(6) |
-| | *xxxx* | |
-+-----------------+---------------------------------+-------+
-| ``\Uxxxxxxxx`` | Character with 32-bit hex value | \(7) |
-| | *xxxxxxxx* | |
-+-----------------+---------------------------------+-------+
++-------------------------+---------------------------------+-------+
+| Escape Sequence | Meaning | Notes |
++=========================+=================================+=======+
+| :samp:`\\N\\{{name}\\}` | Character named *name* in the | \(5) |
+| | Unicode database | |
++-------------------------+---------------------------------+-------+
+| :samp:`\\u{xxxx}` | Character with 16-bit hex value | \(6) |
+| | *xxxx* | |
++-------------------------+---------------------------------+-------+
+| :samp:`\\U{xxxxxxxx}` | Character with 32-bit hex value | \(7) |
+| | *xxxxxxxx* | |
++-------------------------+---------------------------------+-------+
Notes:
@@ -1045,4 +1045,4 @@ occurrence outside string literals and comments is an unconditional error:
.. rubric:: Footnotes
-.. [#] https://www.unicode.org/Public/15.0.0/ucd/NameAliases.txt
+.. [#] https://www.unicode.org/Public/15.1.0/ucd/NameAliases.txt
diff --git a/Doc/requirements-oldest-sphinx.txt b/Doc/requirements-oldest-sphinx.txt
index 94611ca22f09fe..d3ef5bc17650ae 100644
--- a/Doc/requirements-oldest-sphinx.txt
+++ b/Doc/requirements-oldest-sphinx.txt
@@ -7,12 +7,10 @@ blurb
python-docs-theme>=2022.1
# Generated from:
-# pip install "Sphinx~=3.2.0" "docutils<0.17" "Jinja2<3" "MarkupSafe<2"
+# pip install "Sphinx~=4.2.0"
# pip freeze
#
-# Sphinx 3.2 comes from ``needs_sphinx = '3.2'`` in ``Doc/conf.py``.
-# Docutils<0.17, Jinja2<3, and MarkupSafe<2 are additionally specified as
-# Sphinx 3.2 is incompatible with newer releases of these packages.
+# Sphinx 4.2 comes from ``needs_sphinx = '4.2'`` in ``Doc/conf.py``.
alabaster==0.7.13
Babel==2.12.1
@@ -25,10 +23,10 @@ imagesize==1.4.1
Jinja2==2.11.3
MarkupSafe==1.1.1
packaging==23.1
-Pygments==2.15.1
+Pygments==2.16.1
requests==2.31.0
snowballstemmer==2.2.0
-Sphinx==3.2.1
+Sphinx==4.2.0
sphinxcontrib-applehelp==1.0.4
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.1
diff --git a/Doc/tools/.nitignore b/Doc/tools/.nitignore
index a6268048e143db..487652f4b51d4b 100644
--- a/Doc/tools/.nitignore
+++ b/Doc/tools/.nitignore
@@ -130,7 +130,6 @@ Doc/library/tkinter.scrolledtext.rst
Doc/library/tkinter.ttk.rst
Doc/library/traceback.rst
Doc/library/tty.rst
-Doc/library/turtle.rst
Doc/library/unittest.mock.rst
Doc/library/unittest.rst
Doc/library/urllib.parse.rst
@@ -140,7 +139,6 @@ Doc/library/wsgiref.rst
Doc/library/xml.dom.minidom.rst
Doc/library/xml.dom.pulldom.rst
Doc/library/xml.dom.rst
-Doc/library/xml.etree.elementtree.rst
Doc/library/xml.rst
Doc/library/xml.sax.handler.rst
Doc/library/xml.sax.reader.rst
diff --git a/Doc/tools/templates/indexcontent.html b/Doc/tools/templates/indexcontent.html
index a96746b69fd41b..1e3ab7cfe02fee 100644
--- a/Doc/tools/templates/indexcontent.html
+++ b/Doc/tools/templates/indexcontent.html
@@ -62,6 +62,7 @@ {{ docstitle|e }}
{% trans %}History and License of Python{% endtrans %}
{% trans %}Copyright{% endtrans %}
+ {% trans %}Download the documentation{% endtrans %}
|
{% endblock %}
diff --git a/Doc/using/configure.rst b/Doc/using/configure.rst
index fe35372603fdd8..763f9778776990 100644
--- a/Doc/using/configure.rst
+++ b/Doc/using/configure.rst
@@ -60,6 +60,29 @@ See also :pep:`7` "Style Guide for C Code" and :pep:`11` "CPython platform
support".
+Generated files
+===============
+
+To reduce build dependencies, Python source code contains multiple generated
+files. Commands to regenerate all generated files::
+
+ make regen-all
+ make regen-stdlib-module-names
+ make regen-limited-abi
+ make regen-configure
+
+The ``Makefile.pre.in`` file documents generated files, their inputs, and tools used
+to regenerate them. Search for ``regen-*`` make targets.
+
+The ``make regen-configure`` command runs `tiran/cpython_autoconf
+`_ container for reproducible build;
+see container ``entry.sh`` script. The container is optional, the following
+command can be run locally, the generated files depend on autoconf and aclocal
+versions::
+
+ autoreconf -ivf -Werror
+
+
.. _configure-options:
Configure Options
@@ -192,14 +215,69 @@ General Options
.. cmdoption:: --enable-pystats
- Turn on internal statistics gathering.
+ Turn on internal Python performance statistics gathering.
+
+ By default, statistics gathering is off. Use ``python3 -X pystats`` command
+ or set ``PYTHONSTATS=1`` environment variable to turn on statistics
+ gathering at Python startup.
+
+ At Python exit, dump statistics if statistics gathering was on and not
+ cleared.
+
+ Effects:
+
+ * Add :option:`-X pystats <-X>` command line option.
+ * Add :envvar:`!PYTHONSTATS` environment variable.
+ * Define the ``Py_STATS`` macro.
+ * Add functions to the :mod:`sys` module:
+
+ * :func:`!sys._stats_on`: Turns on statistics gathering.
+ * :func:`!sys._stats_off`: Turns off statistics gathering.
+ * :func:`!sys._stats_clear`: Clears the statistics.
+ * :func:`!sys._stats_dump`: Dump statistics to file, and clears the statistics.
The statistics will be dumped to a arbitrary (probably unique) file in
- ``/tmp/py_stats/``, or ``C:\temp\py_stats\`` on Windows. If that directory
- does not exist, results will be printed on stdout.
+ ``/tmp/py_stats/`` (Unix) or ``C:\temp\py_stats\`` (Windows). If that
+ directory does not exist, results will be printed on stderr.
Use ``Tools/scripts/summarize_stats.py`` to read the stats.
+ Statistics:
+
+ * Opcode:
+
+ * Specialization: success, failure, hit, deferred, miss, deopt, failures;
+ * Execution count;
+ * Pair count.
+
+ * Call:
+
+ * Inlined Python calls;
+ * PyEval calls;
+ * Frames pushed;
+ * Frame object created;
+ * Eval calls: vector, generator, legacy, function VECTORCALL, build class,
+ slot, function "ex", API, method.
+
+ * Object:
+
+ * incref and decref;
+ * interpreter incref and decref;
+ * allocations: all, 512 bytes, 4 kiB, big;
+ * free;
+ * to/from free lists;
+ * dictionary materialized/dematerialized;
+ * type cache;
+ * optimization attemps;
+ * optimization traces created/executed;
+ * uops executed.
+
+ * Garbage collector:
+
+ * Garbage collections;
+ * Objects visited;
+ * Objects collected.
+
.. versionadded:: 3.11
.. cmdoption:: --disable-gil
@@ -211,6 +289,136 @@ General Options
.. versionadded:: 3.13
+.. cmdoption:: PKG_CONFIG
+
+ Path to ``pkg-config`` utility.
+
+.. cmdoption:: PKG_CONFIG_LIBDIR
+.. cmdoption:: PKG_CONFIG_PATH
+
+ ``pkg-config`` options.
+
+
+C compiler options
+------------------
+
+.. cmdoption:: CC
+
+ C compiler command.
+
+.. cmdoption:: CFLAGS
+
+ C compiler flags.
+
+.. cmdoption:: CPP
+
+ C preprocessor command.
+
+.. cmdoption:: CPPFLAGS
+
+ C preprocessor flags, e.g. :samp:`-I{include_dir}`.
+
+
+Linker options
+--------------
+
+.. cmdoption:: LDFLAGS
+
+ Linker flags, e.g. :samp:`-L{library_directory}`.
+
+.. cmdoption:: LIBS
+
+ Libraries to pass to the linker, e.g. :samp:`-l{library}`.
+
+.. cmdoption:: MACHDEP
+
+ Name for machine-dependent library files.
+
+
+Options for third-party dependencies
+------------------------------------
+
+.. versionadded:: 3.11
+
+.. cmdoption:: BZIP2_CFLAGS
+.. cmdoption:: BZIP2_LIBS
+
+ C compiler and linker flags to link Python to ``libbz2``, used by :mod:`bz2`
+ module, overriding ``pkg-config``.
+
+.. cmdoption:: CURSES_CFLAGS
+.. cmdoption:: CURSES_LIBS
+
+ C compiler and linker flags for ``libncurses`` or ``libncursesw``, used by
+ :mod:`curses` module, overriding ``pkg-config``.
+
+.. cmdoption:: GDBM_CFLAGS
+.. cmdoption:: GDBM_LIBS
+
+ C compiler and linker flags for ``gdbm``.
+
+.. cmdoption:: LIBB2_CFLAGS
+.. cmdoption:: LIBB2_LIBS
+
+ C compiler and linker flags for ``libb2`` (:ref:`BLAKE2 `),
+ used by :mod:`hashlib` module, overriding ``pkg-config``.
+
+.. cmdoption:: LIBEDIT_CFLAGS
+.. cmdoption:: LIBEDIT_LIBS
+
+ C compiler and linker flags for ``libedit``, used by :mod:`readline` module,
+ overriding ``pkg-config``.
+
+.. cmdoption:: LIBFFI_CFLAGS
+.. cmdoption:: LIBFFI_LIBS
+
+ C compiler and linker flags for ``libffi``, used by :mod:`ctypes` module,
+ overriding ``pkg-config``.
+
+.. cmdoption:: LIBLZMA_CFLAGS
+.. cmdoption:: LIBLZMA_LIBS
+
+ C compiler and linker flags for ``liblzma``, used by :mod:`lzma` module,
+ overriding ``pkg-config``.
+
+.. cmdoption:: LIBREADLINE_CFLAGS
+.. cmdoption:: LIBREADLINE_LIBS
+
+ C compiler and linker flags for ``libreadline``, used by :mod:`readline`
+ module, overriding ``pkg-config``.
+
+.. cmdoption:: LIBSQLITE3_CFLAGS
+.. cmdoption:: LIBSQLITE3_LIBS
+
+ C compiler and linker flags for ``libsqlite3``, used by :mod:`sqlite3`
+ module, overriding ``pkg-config``.
+
+.. cmdoption:: LIBUUID_CFLAGS
+.. cmdoption:: LIBUUID_LIBS
+
+ C compiler and linker flags for ``libuuid``, used by :mod:`uuid` module,
+ overriding ``pkg-config``.
+
+.. cmdoption:: PANEL_CFLAGS
+.. cmdoption:: PANEL_LIBS
+
+ C compiler and Linker flags for PANEL, overriding ``pkg-config``.
+
+ C compiler and linker flags for ``libpanel`` or ``libpanelw``, used by
+ :mod:`curses.panel` module, overriding ``pkg-config``.
+
+.. cmdoption:: TCLTK_CFLAGS
+.. cmdoption:: TCLTK_LIBS
+
+ C compiler and linker flags for TCLTK, overriding ``pkg-config``.
+
+.. cmdoption:: ZLIB_CFLAGS
+.. cmdoption:: ZLIB_LIBS
+
+ C compiler and linker flags for ``libzlib``, used by :mod:`gzip` module,
+ overriding ``pkg-config``.
+
+
WebAssembly Options
-------------------
@@ -350,6 +558,19 @@ also be used to improve performance.
.. versionadded:: 3.12
+.. cmdoption:: BOLT_APPLY_FLAGS
+
+ Arguments to ``llvm-bolt`` when creating a `BOLT optimized binary
+ `_.
+
+ .. versionadded:: 3.12
+
+.. cmdoption:: BOLT_INSTRUMENT_FLAGS
+
+ Arguments to ``llvm-bolt`` when instrumenting binaries.
+
+ .. versionadded:: 3.12
+
.. cmdoption:: --with-computed-gotos
Enable computed gotos in evaluation loop (enabled by default on supported
@@ -697,6 +918,12 @@ the version of the cross compiled host Python.
ac_cv_file__dev_ptmx=yes
ac_cv_file__dev_ptc=no
+.. cmdoption:: HOSTRUNNER
+
+ Program to run CPython for the host platform for cross-compilation.
+
+ .. versionadded:: 3.11
+
Cross compiling example::
@@ -777,15 +1004,15 @@ Example on Linux x86-64::
At the beginning of the files, C extensions are built as built-in modules.
Extensions defined after the ``*shared*`` marker are built as dynamic libraries.
-The :c:macro:`PyAPI_FUNC()`, :c:macro:`PyAPI_DATA()` and
-:c:macro:`PyMODINIT_FUNC` macros of :file:`Include/pyport.h` are defined
+The :c:macro:`!PyAPI_FUNC()`, :c:macro:`!PyAPI_DATA()` and
+:c:macro:`PyMODINIT_FUNC` macros of :file:`Include/exports.h` are defined
differently depending if the ``Py_BUILD_CORE_MODULE`` macro is defined:
* Use ``Py_EXPORTED_SYMBOL`` if the ``Py_BUILD_CORE_MODULE`` is defined
* Use ``Py_IMPORTED_SYMBOL`` otherwise.
If the ``Py_BUILD_CORE_BUILTIN`` macro is used by mistake on a C extension
-built as a shared library, its ``PyInit_xxx()`` function is not exported,
+built as a shared library, its :samp:`PyInit_{xxx}()` function is not exported,
causing an :exc:`ImportError` on import.
@@ -806,8 +1033,8 @@ Preprocessor flags
.. envvar:: CPPFLAGS
- (Objective) C/C++ preprocessor flags, e.g. ``-I`` if you have
- headers in a nonstandard directory ````.
+ (Objective) C/C++ preprocessor flags, e.g. :samp:`-I{include_dir}` if you have
+ headers in a nonstandard directory *include_dir*.
Both :envvar:`CPPFLAGS` and :envvar:`LDFLAGS` need to contain the shell's
value to be able to build extension modules using the
@@ -996,8 +1223,8 @@ Linker flags
.. envvar:: LDFLAGS
- Linker flags, e.g. ``-L`` if you have libraries in a nonstandard
- directory ````.
+ Linker flags, e.g. :samp:`-L{lib_dir}` if you have libraries in a nonstandard
+ directory *lib_dir*.
Both :envvar:`CPPFLAGS` and :envvar:`LDFLAGS` need to contain the shell's
value to be able to build extension modules using the
diff --git a/Doc/using/unix.rst b/Doc/using/unix.rst
index 0044eb07f56eec..58838c28e6eb86 100644
--- a/Doc/using/unix.rst
+++ b/Doc/using/unix.rst
@@ -30,9 +30,9 @@ following links:
for Debian users
https://en.opensuse.org/Portal:Packaging
for OpenSuse users
- https://docs-old.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch-creating-rpms.html
+ https://docs.fedoraproject.org/en-US/package-maintainers/Packaging_Tutorial_GNU_Hello/
for Fedora users
- http://www.slackbook.org/html/package-management-making-packages.html
+ https://slackbook.org/html/package-management-making-packages.html
for Slackware users
diff --git a/Doc/using/venv-create.inc b/Doc/using/venv-create.inc
index 2fc90126482268..1cf438b198a9af 100644
--- a/Doc/using/venv-create.inc
+++ b/Doc/using/venv-create.inc
@@ -35,37 +35,48 @@ your :ref:`Python installation `::
The command, if run with ``-h``, will show the available options::
- usage: venv [-h] [--system-site-packages] [--symlinks | --copies] [--clear]
- [--upgrade] [--without-pip] [--prompt PROMPT] [--upgrade-deps]
- ENV_DIR [ENV_DIR ...]
-
- Creates virtual Python environments in one or more target directories.
-
- positional arguments:
- ENV_DIR A directory to create the environment in.
-
- optional arguments:
- -h, --help show this help message and exit
- --system-site-packages
- Give the virtual environment access to the system
- site-packages dir.
- --symlinks Try to use symlinks rather than copies, when symlinks
- are not the default for the platform.
- --copies Try to use copies rather than symlinks, even when
- symlinks are the default for the platform.
- --clear Delete the contents of the environment directory if it
- already exists, before environment creation.
- --upgrade Upgrade the environment directory to use this version
- of Python, assuming Python has been upgraded in-place.
- --without-pip Skips installing or upgrading pip in the virtual
- environment (pip is bootstrapped by default)
- --prompt PROMPT Provides an alternative prompt prefix for this
- environment.
- --upgrade-deps Upgrade core dependencies (pip) to the
- latest version in PyPI
-
- Once an environment has been created, you may wish to activate it, e.g. by
- sourcing an activate script in its bin directory.
+ usage: venv [-h] [--system-site-packages] [--symlinks | --copies] [--clear]
+ [--upgrade] [--without-pip] [--prompt PROMPT] [--upgrade-deps]
+ [--without-scm-ignore-file]
+ ENV_DIR [ENV_DIR ...]
+
+ Creates virtual Python environments in one or more target directories.
+
+ positional arguments:
+ ENV_DIR A directory to create the environment in.
+
+ options:
+ -h, --help show this help message and exit
+ --system-site-packages
+ Give the virtual environment access to the system
+ site-packages dir.
+ --symlinks Try to use symlinks rather than copies, when
+ symlinks are not the default for the platform.
+ --copies Try to use copies rather than symlinks, even when
+ symlinks are the default for the platform.
+ --clear Delete the contents of the environment directory if
+ it already exists, before environment creation.
+ --upgrade Upgrade the environment directory to use this
+ version of Python, assuming Python has been upgraded
+ in-place.
+ --without-pip Skips installing or upgrading pip in the virtual
+ environment (pip is bootstrapped by default)
+ --prompt PROMPT Provides an alternative prompt prefix for this
+ environment.
+ --upgrade-deps Upgrade core dependencies (pip) to the latest
+ version in PyPI
+ --without-scm-ignore-file
+ Skips adding the default SCM ignore file to the
+ environment directory (the default is a .gitignore
+ file).
+
+ Once an environment has been created, you may wish to activate it, e.g. by
+ sourcing an activate script in its bin directory.
+
+.. versionchanged:: 3.13
+
+ ``--without-scm-ignore-file`` was added along with creating an ignore file
+ for ``git`` by default.
.. versionchanged:: 3.12
diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst
index ca79c9d3a9d3a8..2476e60a26d485 100644
--- a/Doc/using/windows.rst
+++ b/Doc/using/windows.rst
@@ -889,7 +889,7 @@ minor version. I.e. ``/usr/bin/python3.7-32`` will request usage of the
The "-64" suffix is deprecated, and now implies "any architecture that is
not provably i386/32-bit". To request a specific environment, use the new
- ``-V:`` argument with the complete tag.
+ :samp:`-V:{TAG}` argument with the complete tag.
The ``/usr/bin/env`` form of shebang line has one further special property.
Before looking for installed Python interpreters, this form will search the
@@ -1192,7 +1192,7 @@ non-standard paths in the registry and user site-packages.
* Adds ``._pth`` file support and removes ``applocal`` option from
``pyvenv.cfg``.
- * Adds ``pythonXX.zip`` as a potential landmark when directly adjacent
+ * Adds :file:`python{XX}.zip` as a potential landmark when directly adjacent
to the executable.
.. deprecated::
diff --git a/Doc/whatsnew/2.0.rst b/Doc/whatsnew/2.0.rst
index 71f681881f446f..c2b0ae8c76302a 100644
--- a/Doc/whatsnew/2.0.rst
+++ b/Doc/whatsnew/2.0.rst
@@ -153,9 +153,9 @@ Lundh. A detailed explanation of the interface was written up as :pep:`100`,
significant points about the Unicode interfaces.
In Python source code, Unicode strings are written as ``u"string"``. Arbitrary
-Unicode characters can be written using a new escape sequence, ``\uHHHH``, where
+Unicode characters can be written using a new escape sequence, :samp:`\\u{HHHH}`, where
*HHHH* is a 4-digit hexadecimal number from 0000 to FFFF. The existing
-``\xHHHH`` escape sequence can also be used, and octal escapes can be used for
+:samp:`\\x{HH}` escape sequence can also be used, and octal escapes can be used for
characters up to U+01FF, which is represented by ``\777``.
Unicode strings, just like regular strings, are an immutable sequence type.
diff --git a/Doc/whatsnew/2.3.rst b/Doc/whatsnew/2.3.rst
index ba18ce343c35d1..ec1ca417ee139d 100644
--- a/Doc/whatsnew/2.3.rst
+++ b/Doc/whatsnew/2.3.rst
@@ -1889,7 +1889,7 @@ Changes to Python's build process and to the C API include:
* The :c:macro:`!DL_EXPORT` and :c:macro:`!DL_IMPORT` macros are now deprecated.
Initialization functions for Python extension modules should now be declared
using the new macro :c:macro:`PyMODINIT_FUNC`, while the Python core will
- generally use the :c:macro:`PyAPI_FUNC` and :c:macro:`PyAPI_DATA` macros.
+ generally use the :c:macro:`!PyAPI_FUNC` and :c:macro:`!PyAPI_DATA` macros.
* The interpreter can be compiled without any docstrings for the built-in
functions and modules by supplying :option:`!--without-doc-strings` to the
diff --git a/Doc/whatsnew/2.6.rst b/Doc/whatsnew/2.6.rst
index beba4428e67c3f..f3912d42180bfd 100644
--- a/Doc/whatsnew/2.6.rst
+++ b/Doc/whatsnew/2.6.rst
@@ -125,7 +125,7 @@ and to C extension code as :c:data:`!Py_Py3kWarningFlag`.
.. seealso::
- The 3xxx series of PEPs, which contains proposals for Python 3.0.
+ The 3\ *xxx* series of PEPs, which contains proposals for Python 3.0.
:pep:`3000` describes the development process for Python 3.0.
Start with :pep:`3100` that describes the general goals for Python
3.0, and then explore the higher-numbered PEPS that propose
diff --git a/Doc/whatsnew/3.10.rst b/Doc/whatsnew/3.10.rst
index 1e6e0befa9819a..20cabbd25cc686 100644
--- a/Doc/whatsnew/3.10.rst
+++ b/Doc/whatsnew/3.10.rst
@@ -878,7 +878,7 @@ Other Language Changes
(Contributed by Raymond Hettinger in :issue:`43475`.)
* A :exc:`SyntaxError` (instead of a :exc:`NameError`) will be raised when deleting
- the :const:`__debug__` constant. (Contributed by Dong-hee Na in :issue:`45000`.)
+ the :const:`__debug__` constant. (Contributed by Donghee Na in :issue:`45000`.)
* :exc:`SyntaxError` exceptions now have ``end_lineno`` and
``end_offset`` attributes. They will be ``None`` if not determined.
@@ -1255,7 +1255,7 @@ pipe. (Contributed by Pablo Galindo in :issue:`41625`.)
Add :const:`~os.O_EVTONLY`, :const:`~os.O_FSYNC`, :const:`~os.O_SYMLINK`
and :const:`~os.O_NOFOLLOW_ANY` for macOS.
-(Contributed by Dong-hee Na in :issue:`43106`.)
+(Contributed by Donghee Na in :issue:`43106`.)
os.path
-------
@@ -1582,7 +1582,7 @@ Optimizations
* The following built-in functions now support the faster :pep:`590` vectorcall calling convention:
:func:`map`, :func:`filter`, :func:`reversed`, :func:`bool` and :func:`float`.
- (Contributed by Dong-hee Na and Jeroen Demeyer in :issue:`43575`, :issue:`43287`, :issue:`41922`, :issue:`41873` and :issue:`41870`.)
+ (Contributed by Donghee Na and Jeroen Demeyer in :issue:`43575`, :issue:`43287`, :issue:`41922`, :issue:`41873` and :issue:`41870`.)
* :class:`BZ2File` performance is improved by removing internal ``RLock``.
This makes :class:`BZ2File` thread unsafe in the face of multiple simultaneous
@@ -1817,7 +1817,7 @@ Removed
scheduled to be removed in Python 3.6, but such removals were delayed until
after Python 2.7 EOL. Existing users should copy whatever classes they use
into their code.
- (Contributed by Dong-hee Na and Terry J. Reedy in :issue:`42299`.)
+ (Contributed by Donghee Na and Terry J. Reedy in :issue:`42299`.)
* Removed the :c:func:`!PyModule_GetWarningsModule` function that was useless
now due to the :mod:`!_warnings` module was converted to a builtin module in 2.6.
diff --git a/Doc/whatsnew/3.11.rst b/Doc/whatsnew/3.11.rst
index cc5cfee08d2b32..257025da91a7ed 100644
--- a/Doc/whatsnew/3.11.rst
+++ b/Doc/whatsnew/3.11.rst
@@ -45,7 +45,7 @@
when researching a change.
This article explains the new features in Python 3.11, compared to 3.10.
-
+Python 3.11 was released on October 24, 2022.
For full details, see the :ref:`changelog `.
@@ -218,7 +218,7 @@ Windows ``py.exe`` launcher improvements
The copy of the :ref:`launcher` included with Python 3.11 has been significantly
updated. It now supports company/tag syntax as defined in :pep:`514` using the
-``-V:/`` argument instead of the limited ``-.``.
+:samp:`-V:{}/{}` argument instead of the limited :samp:`-{}.{}`.
This allows launching distributions other than ``PythonCore``,
the one hosted on `python.org `_.
@@ -227,8 +227,8 @@ installs will be searched. For example, ``-V:OtherPython/`` will select the
"best" tag registered for ``OtherPython``, while ``-V:3.11`` or ``-V:/3.11``
will select the "best" distribution with tag ``3.11``.
-When using the legacy ``-``, ``-.``,
-``--`` or ``-.-`` arguments,
+When using the legacy :samp:`-{}`, :samp:`-{}.{}`,
+:samp:`-{}-{}` or :samp:`-{}.{}-{}` arguments,
all existing behaviour should be preserved from past versions,
and only releases from ``PythonCore`` will be selected.
However, the ``-64`` suffix now implies "not 32-bit" (not necessarily x86-64),
@@ -499,7 +499,7 @@ Other CPython Implementation Changes
* The special methods :meth:`~object.__complex__` for :class:`complex`
and :meth:`~object.__bytes__` for :class:`bytes` are implemented to support
the :class:`typing.SupportsComplex` and :class:`typing.SupportsBytes` protocols.
- (Contributed by Mark Dickinson and Dong-hee Na in :issue:`24234`.)
+ (Contributed by Mark Dickinson and Donghee Na in :issue:`24234`.)
* ``siphash13`` is added as a new internal hashing algorithm.
It has similar security properties as ``siphash24``,
@@ -897,7 +897,7 @@ os
* On Windows, :func:`os.urandom` now uses ``BCryptGenRandom()``,
instead of ``CryptGenRandom()`` which is deprecated.
- (Contributed by Dong-hee Na in :issue:`44611`.)
+ (Contributed by Donghee Na in :issue:`44611`.)
.. _whatsnew311-pathlib:
@@ -1089,7 +1089,7 @@ time
`_
which has a resolution of 100 nanoseconds (10\ :sup:`-7` seconds). Previously,
it had a resolution of 1 millisecond (10\ :sup:`-3` seconds).
- (Contributed by Benjamin Szőke, Dong-hee Na, Eryk Sun and Victor Stinner in :issue:`21302` and :issue:`45429`.)
+ (Contributed by Benjamin Szőke, Donghee Na, Eryk Sun and Victor Stinner in :issue:`21302` and :issue:`45429`.)
.. _whatsnew311-tkinter:
@@ -1305,7 +1305,7 @@ This section covers specific optimizations independent of the
* :func:`unicodedata.normalize`
now normalizes pure-ASCII strings in constant time.
- (Contributed by Dong-hee Na in :issue:`44987`.)
+ (Contributed by Donghee Na in :issue:`44987`.)
.. _whatsnew311-faster-cpython:
@@ -1452,7 +1452,7 @@ Bucher, with additional help from Irit Katriel and Dennis Sweeney.)
| | | | (up to) | |
+===============+====================+=======================================================+===================+===================+
| Binary | ``x + x`` | Binary add, multiply and subtract for common types | 10% | Mark Shannon, |
-| operations | | such as :class:`int`, :class:`float` and :class:`str` | | Dong-hee Na, |
+| operations | | such as :class:`int`, :class:`float` and :class:`str` | | Donghee Na, |
| | ``x - x`` | take custom fast paths for their underlying types. | | Brandt Bucher, |
| | | | | Dennis Sweeney |
| | ``x * x`` | | | |
@@ -1839,7 +1839,7 @@ Standard Library
* :class:`!webbrowser.MacOSX` is deprecated and will be removed in Python 3.13.
It is untested, undocumented, and not used by :mod:`webbrowser` itself.
- (Contributed by Dong-hee Na in :issue:`42255`.)
+ (Contributed by Donghee Na in :issue:`42255`.)
* The behavior of returning a value from a :class:`~unittest.TestCase` and
:class:`~unittest.IsolatedAsyncioTestCase` test methods (other than the
@@ -1984,7 +1984,7 @@ Removed C APIs are :ref:`listed separately `.
:meth:`!NullTranslations.set_output_charset` methods,
and the *codeset* parameter of :func:`!translation` and :func:`!install`,
since they are only used for the :func:`!l*gettext` functions.
- (Contributed by Dong-hee Na and Serhiy Storchaka in :issue:`44235`.)
+ (Contributed by Donghee Na and Serhiy Storchaka in :issue:`44235`.)
* Removed from the :mod:`inspect` module:
@@ -2009,7 +2009,7 @@ Removed C APIs are :ref:`listed separately `.
* Removed the :class:`!MailmanProxy` class in the :mod:`smtpd` module,
as it is unusable without the external :mod:`!mailman` package.
- (Contributed by Dong-hee Na in :issue:`35800`.)
+ (Contributed by Donghee Na in :issue:`35800`.)
* Removed the deprecated :meth:`!split` method of :class:`!_tkinter.TkappType`.
(Contributed by Erlend E. Aasland in :issue:`38371`.)
@@ -2151,7 +2151,7 @@ Build Changes
* CPython can now be built with the
`ThinLTO `_ option
via passing ``thin`` to :option:`--with-lto`, i.e. ``--with-lto=thin``.
- (Contributed by Dong-hee Na and Brett Holman in :issue:`44340`.)
+ (Contributed by Donghee Na and Brett Holman in :issue:`44340`.)
* Freelists for object structs can now be disabled. A new :program:`configure`
option :option:`--without-freelists` can be used to disable all freelists
diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst
index fb31a07930da25..2a186e8c8484ec 100644
--- a/Doc/whatsnew/3.12.rst
+++ b/Doc/whatsnew/3.12.rst
@@ -67,21 +67,23 @@ Summary -- Release highlights
New grammar features:
-* :pep:`701`: Syntactic formalization of f-strings
+* :ref:`whatsnew312-pep701`
Interpreter improvements:
* :ref:`whatsnew312-pep684`
+* :ref:`whatsnew312-pep669`
+
New typing features:
-* :pep:`688`: Making the buffer protocol accessible in Python
+* :ref:`whatsnew312-pep688`
* :ref:`whatsnew312-pep692`
* :ref:`whatsnew312-pep695`
-* :pep:`698`: Override Decorator for Static Typing
+* :ref:`whatsnew312-pep698`
Important deprecations, removals or restrictions:
@@ -96,7 +98,7 @@ Improved Error Messages
* Modules from the standard library are now potentially suggested as part of
the error messages displayed by the interpreter when a :exc:`NameError` is
- raised to the top level. Contributed by Pablo Galindo in :gh:`98254`.
+ raised to the top level. (Contributed by Pablo Galindo in :gh:`98254`.)
>>> sys.version_info
Traceback (most recent call last):
@@ -107,7 +109,7 @@ Improved Error Messages
Now if a :exc:`NameError` is raised in a method and the instance has an
attribute that's exactly equal to the name in the exception, the suggestion
will include ``self.`` instead of the closest match in the method
- scope. Contributed by Pablo Galindo in :gh:`99139`.
+ scope. (Contributed by Pablo Galindo in :gh:`99139`.)
>>> class A:
... def __init__(self):
@@ -115,7 +117,7 @@ Improved Error Messages
...
... def foo(self):
... somethin = blech
-
+ ...
>>> A().foo()
Traceback (most recent call last):
File "", line 1
@@ -123,9 +125,8 @@ Improved Error Messages
^^^^^
NameError: name 'blech' is not defined. Did you mean: 'self.blech'?
-
* Improve the :exc:`SyntaxError` error message when the user types ``import x
- from y`` instead of ``from y import x``. Contributed by Pablo Galindo in :gh:`98931`.
+ from y`` instead of ``from y import x``. (Contributed by Pablo Galindo in :gh:`98931`.)
>>> import a.y.z from b.y.z
Traceback (most recent call last):
@@ -136,7 +137,7 @@ Improved Error Messages
* :exc:`ImportError` exceptions raised from failed ``from import
`` statements now include suggestions for the value of ```` based on the
- available names in ````. Contributed by Pablo Galindo in :gh:`91058`.
+ available names in ````. (Contributed by Pablo Galindo in :gh:`91058`.)
>>> from collections import chainmap
Traceback (most recent call last):
@@ -152,12 +153,13 @@ New Features
PEP 701: Syntactic formalization of f-strings
---------------------------------------------
-:pep:`701` lifts some restrictions on the usage of f-strings. Expression components
-inside f-strings can now be any valid Python expression including backslashes,
-unicode escaped sequences, multi-line expressions, comments and strings reusing the
-same quote as the containing f-string. Let's cover these in detail:
+:pep:`701` lifts some restrictions on the usage of :term:`f-strings `.
+Expression components inside f-strings can now be any valid Python expression,
+including strings reusing the same quote as the containing f-string,
+multi-line expressions, comments, backslashes, and unicode escape sequences.
+Let's cover these in detail:
-* Quote reuse: in Python 3.11, reusing the same quotes as the containing f-string
+* Quote reuse: in Python 3.11, reusing the same quotes as the enclosing f-string
raises a :exc:`SyntaxError`, forcing the user to either use other available
quotes (like using double quotes or triple quotes if the f-string uses single
quotes). In Python 3.12, you can now do things like this:
@@ -180,11 +182,12 @@ same quote as the containing f-string. Let's cover these in detail:
>>> f"{f"{f"{f"{f"{f"{1+1}"}"}"}"}"}"
'2'
-* Multi-line expressions and comments: In Python 3.11, f-strings expressions
- must be defined in a single line even if outside f-strings expressions could
- span multiple lines (like literal lists being defined over multiple lines),
- making them harder to read. In Python 3.12 you can now define expressions
- spanning multiple lines and include comments on them:
+* Multi-line expressions and comments: In Python 3.11, f-string expressions
+ must be defined in a single line, even if the expression within the f-string
+ could normally span multiple lines
+ (like literal lists being defined over multiple lines),
+ making them harder to read. In Python 3.12 you can now define f-strings
+ spanning multiple lines, and add inline comments:
>>> f"This is the playlist: {", ".join([
... 'Take me back to Eden', # My, my, those eyes like fire
@@ -194,10 +197,10 @@ same quote as the containing f-string. Let's cover these in detail:
'This is the playlist: Take me back to Eden, Alkaline, Ascensionism'
* Backslashes and unicode characters: before Python 3.12 f-string expressions
- couldn't contain any ``\`` character. This also affected unicode escaped
- sequences (such as ``\N{snowman}``) as these contain the ``\N`` part that
- previously could not be part of expression components of f-strings. Now, you
- can define expressions like this:
+ couldn't contain any ``\`` character. This also affected unicode :ref:`escape
+ sequences ` (such as ``\N{snowman}``) as these contain
+ the ``\N`` part that previously could not be part of expression components of
+ f-strings. Now, you can define expressions like this:
>>> print(f"This is the playlist: {"\n".join(songs)}")
This is the playlist: Take me back to Eden
@@ -209,7 +212,7 @@ same quote as the containing f-string. Let's cover these in detail:
See :pep:`701` for more details.
As a positive side-effect of how this feature has been implemented (by parsing f-strings
-with the PEG parser (see :pep:`617`), now error messages for f-strings are more precise
+with :pep:`the PEG parser <617>`, now error messages for f-strings are more precise
and include the exact location of the error. For example, in Python 3.11, the following
f-string raises a :exc:`SyntaxError`:
@@ -244,17 +247,18 @@ PEP 709: Comprehension inlining
Dictionary, list, and set comprehensions are now inlined, rather than creating a
new single-use function object for each execution of the comprehension. This
-speeds up execution of a comprehension by up to 2x.
+speeds up execution of a comprehension by up to two times.
+See :pep:`709` for further details.
-Comprehension iteration variables remain isolated; they don't overwrite a
+Comprehension iteration variables remain isolated and don't overwrite a
variable of the same name in the outer scope, nor are they visible after the
-comprehension. This isolation is now maintained via stack/locals manipulation,
-not via separate function scope.
-
-Inlining does result in a few visible behavior changes:
+comprehension. Inlining does result in a few visible behavior changes:
* There is no longer a separate frame for the comprehension in tracebacks,
and tracing/profiling no longer shows the comprehension as a function call.
+* The :mod:`symtable` module will no longer produce child symbol tables for each
+ comprehension; instead, the comprehension's locals will be included in the
+ parent function's symbol table.
* Calling :func:`locals` inside a comprehension now includes variables
from outside the comprehension, and no longer includes the synthetic ``.0``
variable for the comprehension "argument".
@@ -265,7 +269,9 @@ Inlining does result in a few visible behavior changes:
create a list of keys to iterate over: ``keys = list(locals()); [k for k in
keys]``.
-Contributed by Carl Meyer and Vladimir Matveev in :pep:`709`.
+(Contributed by Carl Meyer and Vladimir Matveev in :pep:`709`.)
+
+.. _whatsnew312-pep688:
PEP 688: Making the buffer protocol accessible in Python
--------------------------------------------------------
@@ -285,9 +291,11 @@ can be used to customize buffer creation.
PEP 684: A Per-Interpreter GIL
------------------------------
-Sub-interpreters may now be created with a unique GIL per interpreter.
+:pep:`684` introduces a per-interpreter :term:`GIL `,
+so that sub-interpreters may now be created with a unique GIL per interpreter.
This allows Python programs to take full advantage of multiple CPU
-cores.
+cores. This is currently only available through the C-API,
+though a Python API is :pep:`anticipated for 3.13 <554>`.
Use the new :c:func:`Py_NewInterpreterFromConfig` function to
create an interpreter with its own GIL::
@@ -301,19 +309,32 @@ create an interpreter with its own GIL::
if (PyStatus_Exception(status)) {
return -1;
}
- /* The new interpeter is now active in the current thread. */
+ /* The new interpreter is now active in the current thread. */
For further examples how to use the C-API for sub-interpreters with a
per-interpreter GIL, see :source:`Modules/_xxsubinterpretersmodule.c`.
-A Python API is anticipated for 3.13. (See :pep:`554`.)
-
(Contributed by Eric Snow in :gh:`104210`, etc.)
+.. _whatsnew312-pep669:
+
+PEP 669: Low impact monitoring for CPython
+------------------------------------------
+
+:pep:`669` defines a new :mod:`API ` for profilers,
+debuggers, and other tools to monitor events in CPython.
+It covers a wide range of events, including calls,
+returns, lines, exceptions, jumps, and more.
+This means that you only pay for what you use, providing support
+for near-zero overhead debuggers and coverage tools.
+See :mod:`sys.monitoring` for details.
+
+(Contributed by Mark Shannon in :gh:`103083`.)
+
New Features Related to Type Hints
==================================
-This section covers major changes affecting :pep:`484` type hints and
+This section covers major changes affecting :pep:`type hints <484>` and
the :mod:`typing` module.
.. _whatsnew312-pep692:
@@ -325,7 +346,7 @@ Typing ``**kwargs`` in a function signature as introduced by :pep:`484` allowed
for valid annotations only in cases where all of the ``**kwargs`` were of the
same type.
-This PEP specifies a more precise way of typing ``**kwargs`` by relying on
+:pep:`692` specifies a more precise way of typing ``**kwargs`` by relying on
typed dictionaries::
from typing import TypedDict, Unpack
@@ -340,6 +361,8 @@ See :pep:`692` for more details.
(Contributed by Franek Magiera in :gh:`103629`.)
+.. _whatsnew312-pep698:
+
PEP 698: Override Decorator for Static Typing
---------------------------------------------
@@ -367,6 +390,8 @@ Example::
def get_colour(self) -> str:
return "red"
+See :pep:`698` for more details.
+
(Contributed by Steven Troxler in :gh:`101561`.)
.. _whatsnew312-pep695:
@@ -412,8 +437,8 @@ parameters with bounds or constraints::
The value of type aliases and the bound and constraints of type variables
created through this syntax are evaluated only on demand (see
-:ref:`lazy-evaluation`). This means type aliases are able to refer to other
-types defined later in the file.
+:ref:`lazy evaluation `). This means type aliases are able to
+refer to other types defined later in the file.
Type parameters declared through a type parameter list are visible within the
scope of the declaration and any nested scopes, but not in the outer scope. For
@@ -436,12 +461,12 @@ and others in :gh:`103764`.)
Other Language Changes
======================
-* Add :ref:`perf_profiling` through the new
- environment variable :envvar:`PYTHONPERFSUPPORT`,
- the new command-line option :option:`-X perf <-X>`,
+* Add :ref:`support for the perf profiler ` through the new
+ environment variable :envvar:`PYTHONPERFSUPPORT`
+ and command-line option :option:`-X perf <-X>`,
as well as the new :func:`sys.activate_stack_trampoline`,
:func:`sys.deactivate_stack_trampoline`,
- and :func:`sys.is_stack_trampoline_active` APIs.
+ and :func:`sys.is_stack_trampoline_active` functions.
(Design by Pablo Galindo. Contributed by Pablo Galindo and Christian Heimes
with contributions from Gregory P. Smith [Google] and Mark Shannon
in :gh:`96123`.)
@@ -450,7 +475,7 @@ Other Language Changes
have a new a *filter* argument that allows limiting tar features than may be
surprising or dangerous, such as creating files outside the destination
directory.
- See :ref:`tarfile-extraction-filter` for details.
+ See :ref:`tarfile extraction filters ` for details.
In Python 3.14, the default will switch to ``'data'``.
(Contributed by Petr Viktorin in :pep:`706`.)
@@ -459,7 +484,7 @@ Other Language Changes
(Contributed by Serhiy Storchaka in :gh:`87995`.)
* :class:`memoryview` now supports the half-float type (the "e" format code).
- (Contributed by Dong-hee Na and Antoine Pitrou in :gh:`90751`.)
+ (Contributed by Donghee Na and Antoine Pitrou in :gh:`90751`.)
* The parser now raises :exc:`SyntaxError` when parsing source code containing
null bytes. (Contributed by Pablo Galindo in :gh:`96670`.)
@@ -478,8 +503,8 @@ Other Language Changes
* A backslash-character pair that is not a valid escape sequence now generates
a :exc:`SyntaxWarning`, instead of :exc:`DeprecationWarning`.
For example, ``re.compile("\d+\.\d+")`` now emits a :exc:`SyntaxWarning`
- (``"\d"`` is an invalid escape sequence), use raw strings for regular
- expression: ``re.compile(r"\d+\.\d+")``.
+ (``"\d"`` is an invalid escape sequence, use raw strings for regular
+ expression: ``re.compile(r"\d+\.\d+")``).
In a future Python version, :exc:`SyntaxError` will eventually be raised,
instead of :exc:`SyntaxWarning`.
(Contributed by Victor Stinner in :gh:`98401`.)
@@ -508,7 +533,7 @@ Other Language Changes
when summing floats or mixed ints and floats.
(Contributed by Raymond Hettinger in :gh:`100425`.)
-* Exceptions raised in a typeobject's ``__set_name__`` method are no longer
+* Exceptions raised in a class or type's ``__set_name__`` method are no longer
wrapped by a :exc:`RuntimeError`. Context information is added to the
exception as a :pep:`678` note. (Contributed by Irit Katriel in :gh:`77757`.)
@@ -544,20 +569,20 @@ asyncio
* Added :func:`asyncio.eager_task_factory` and :func:`asyncio.create_eager_task_factory`
functions to allow opting an event loop in to eager task execution,
making some use-cases 2x to 5x faster.
- (Contributed by Jacob Bower & Itamar O in :gh:`102853`, :gh:`104140`, and :gh:`104138`)
+ (Contributed by Jacob Bower & Itamar Oren in :gh:`102853`, :gh:`104140`, and :gh:`104138`)
-* On Linux, :mod:`asyncio` uses :class:`~asyncio.PidfdChildWatcher` by default
+* On Linux, :mod:`asyncio` uses :class:`asyncio.PidfdChildWatcher` by default
if :func:`os.pidfd_open` is available and functional instead of
- :class:`~asyncio.ThreadedChildWatcher`.
+ :class:`asyncio.ThreadedChildWatcher`.
(Contributed by Kumar Aditya in :gh:`98024`.)
-* The child watcher classes :class:`~asyncio.MultiLoopChildWatcher`,
- :class:`~asyncio.FastChildWatcher`, :class:`~asyncio.AbstractChildWatcher`
- and :class:`~asyncio.SafeChildWatcher` are deprecated and
+* The child watcher classes :class:`asyncio.MultiLoopChildWatcher`,
+ :class:`asyncio.FastChildWatcher`, :class:`asyncio.AbstractChildWatcher`
+ and :class:`asyncio.SafeChildWatcher` are deprecated and
will be removed in Python 3.14. It is recommended to not manually
configure a child watcher as the event loop now uses the best available
- child watcher for each platform (:class:`~asyncio.PidfdChildWatcher`
- if supported and :class:`~asyncio.ThreadedChildWatcher` otherwise).
+ child watcher for each platform (:class:`asyncio.PidfdChildWatcher`
+ if supported and :class:`asyncio.ThreadedChildWatcher` otherwise).
(Contributed by Kumar Aditya in :gh:`94597`.)
* :func:`asyncio.set_child_watcher`, :func:`asyncio.get_child_watcher`,
@@ -571,7 +596,7 @@ asyncio
(Contributed by Kumar Aditya in :gh:`99388`.)
* Add C implementation of :func:`asyncio.current_task` for 4x-6x speedup.
- (Contributed by Itamar Ostricher and Pranav Thulasiram Bhat in :gh:`100344`.)
+ (Contributed by Itamar Oren and Pranav Thulasiram Bhat in :gh:`100344`.)
* :func:`asyncio.iscoroutine` now returns ``False`` for generators as
:mod:`asyncio` does not support legacy generator-based coroutines.
@@ -584,15 +609,16 @@ asyncio
calendar
--------
-* Add enums :data:`~calendar.Month` and :data:`~calendar.Day`.
+* Add enums :data:`calendar.Month` and :data:`calendar.Day`
+ defining months of the year and days of the week.
(Contributed by Prince Roshan in :gh:`103636`.)
csv
---
-* Add :const:`~csv.QUOTE_NOTNULL` and :const:`~csv.QUOTE_STRINGS` flags to
+* Add :const:`csv.QUOTE_NOTNULL` and :const:`csv.QUOTE_STRINGS` flags to
provide finer grained control of ``None`` and empty strings by
- :class:`~csv.writer` objects.
+ :class:`csv.writer` objects.
dis
---
@@ -602,7 +628,7 @@ dis
:mod:`dis` module.
:opcode:`HAVE_ARGUMENT` is still relevant to real opcodes,
but it is not useful for pseudo instructions. Use the new
- :data:`~dis.hasarg` collection instead.
+ :data:`dis.hasarg` collection instead.
(Contributed by Irit Katriel in :gh:`94216`.)
fractions
@@ -611,6 +637,12 @@ fractions
* Objects of type :class:`fractions.Fraction` now support float-style
formatting. (Contributed by Mark Dickinson in :gh:`100161`.)
+importlib.resources
+-------------------
+
+* :func:`importlib.resources.as_file` now supports resource directories.
+ (Contributed by Jason R. Coombs in :gh:`97930`.)
+
inspect
-------
@@ -682,11 +714,11 @@ pathlib
-------
* Add support for subclassing :class:`pathlib.PurePath` and
- :class:`~pathlib.Path`, plus their Posix- and Windows-specific variants.
- Subclasses may override the :meth:`~pathlib.PurePath.with_segments` method
+ :class:`pathlib.Path`, plus their Posix- and Windows-specific variants.
+ Subclasses may override the :meth:`pathlib.PurePath.with_segments` method
to pass information between path instances.
-* Add :meth:`~pathlib.Path.walk` for walking the directory trees and generating
+* Add :meth:`pathlib.Path.walk` for walking the directory trees and generating
all file or directory names within them, similar to :func:`os.walk`.
(Contributed by Stanislav Zmiev in :gh:`90385`.)
@@ -755,20 +787,20 @@ sqlite3
* Add a :ref:`command-line interface `.
(Contributed by Erlend E. Aasland in :gh:`77617`.)
-* Add the :attr:`~sqlite3.Connection.autocommit` attribute
- to :class:`~sqlite3.Connection`
- and the *autocommit* parameter to :func:`~sqlite3.connect`
+* Add the :attr:`sqlite3.Connection.autocommit` attribute
+ to :class:`sqlite3.Connection`
+ and the *autocommit* parameter to :func:`sqlite3.connect`
to control :pep:`249`-compliant
:ref:`transaction handling `.
(Contributed by Erlend E. Aasland in :gh:`83638`.)
* Add *entrypoint* keyword-only parameter to
- :meth:`~sqlite3.Connection.load_extension`,
+ :meth:`sqlite3.Connection.load_extension`,
for overriding the SQLite extension entry point.
(Contributed by Erlend E. Aasland in :gh:`103015`.)
-* Add :meth:`~sqlite3.Connection.getconfig` and
- :meth:`~sqlite3.Connection.setconfig` to :class:`~sqlite3.Connection`
+* Add :meth:`sqlite3.Connection.getconfig` and
+ :meth:`sqlite3.Connection.setconfig` to :class:`sqlite3.Connection`
to make configuration changes to a database connection.
(Contributed by Erlend E. Aasland in :gh:`103489`.)
@@ -838,8 +870,8 @@ tkinter
tokenize
--------
-* The :mod:`tokenize` module includes the changes introduced in :pep:`701`. (
- Contributed by Marta Gómez Macías and Pablo Galindo in :gh:`102856`.)
+* The :mod:`tokenize` module includes the changes introduced in :pep:`701`.
+ (Contributed by Marta Gómez Macías and Pablo Galindo in :gh:`102856`.)
See :ref:`whatsnew312-porting-to-python312` for more information on the
changes to the :mod:`tokenize` module.
@@ -947,7 +979,7 @@ Optimizations
* Added experimental support for using the BOLT binary optimizer in the build
process, which improves performance by 1-5%.
- (Contributed by Kevin Modzelewski in :gh:`90536` and tuned by Dong-hee Na in :gh:`101525`)
+ (Contributed by Kevin Modzelewski in :gh:`90536` and tuned by Donghee Na in :gh:`101525`)
* Speed up the regular expression substitution (functions :func:`re.sub` and
:func:`re.subn` and corresponding :class:`!re.Pattern` methods) for
@@ -955,7 +987,7 @@ Optimizations
(Contributed by Serhiy Storchaka in :gh:`91524`.)
* Speed up :class:`asyncio.Task` creation by deferring expensive string formatting.
- (Contributed by Itamar O in :gh:`103793`.)
+ (Contributed by Itamar Oren in :gh:`103793`.)
* The :func:`tokenize.tokenize` and :func:`tokenize.generate_tokens` functions are
up to 64% faster as a side effect of the changes required to cover :pep:`701` in
@@ -970,14 +1002,17 @@ Optimizations
CPython bytecode changes
========================
-* Remove the :opcode:`LOAD_METHOD` instruction. It has been merged into
+* Remove the :opcode:`!LOAD_METHOD` instruction. It has been merged into
:opcode:`LOAD_ATTR`. :opcode:`LOAD_ATTR` will now behave like the old
- :opcode:`LOAD_METHOD` instruction if the low bit of its oparg is set.
+ :opcode:`!LOAD_METHOD` instruction if the low bit of its oparg is set.
(Contributed by Ken Jin in :gh:`93429`.)
* Remove the :opcode:`!JUMP_IF_FALSE_OR_POP` and :opcode:`!JUMP_IF_TRUE_OR_POP`
instructions. (Contributed by Irit Katriel in :gh:`102859`.)
+* Removed the :opcode:`!PRECALL` instruction. (Contributed by Mark Shannon in
+ :gh:`92925`.)
+
* Add the :opcode:`LOAD_FAST_AND_CLEAR` instruction as part of the
implementation of :pep:`709`. (Contributed by Carl Meyer in :gh:`101441`.)
@@ -1030,6 +1065,18 @@ Deprecated
contain the creation time, which is also available in the new ``st_birthtime``
field. (Contributed by Steve Dower in :gh:`99726`.)
+* :mod:`os`: On POSIX platforms, :func:`os.fork` can now raise a
+ :exc:`DeprecationWarning` when it can detect being called from a
+ multithreaded process. There has always been a fundamental incompatibility
+ with the POSIX platform when doing so. Even if such code *appeared* to work.
+ We added the warning to to raise awareness as issues encounted by code doing
+ this are becoming more frequent. See the :func:`os.fork` documentation for
+ more details.
+
+ When this warning appears due to usage of :mod:`multiprocessing` or
+ :mod:`concurrent.futures` the fix is to use a different
+ :mod:`multiprocessing` start method such as ``"spawn"`` or ``"forkserver"``.
+
* :mod:`shutil`: The *onerror* argument of :func:`shutil.rmtree` is deprecated as will be removed
in Python 3.14. Use *onexc* instead. (Contributed by Irit Katriel in :gh:`102828`.)
@@ -1246,13 +1293,19 @@ although there is currently no date scheduled for their removal.
Removed
=======
-* ``asynchat`` and ``asyncore``: These two modules have been removed
+asynchat and asyncore
+---------------------
+
+* These two modules have been removed
according to the schedule in :pep:`594`,
having been deprecated in Python 3.6.
Use :mod:`asyncio` instead.
(Contributed by Nikita Sobolev in :gh:`96580`.)
-* :mod:`configparser`: Several names deprecated in the :mod:`configparser` way back in 3.2 have
+configparser
+------------
+
+* Several names deprecated in the :mod:`configparser` way back in 3.2 have
been removed per :gh:`89336`:
* :class:`configparser.ParsingError` no longer has a ``filename`` attribute
@@ -1262,13 +1315,19 @@ Removed
* :class:`configparser.ConfigParser` no longer has a ``readfp`` method.
Use :meth:`~configparser.ConfigParser.read_file` instead.
-* ``distutils``: Remove the ``distutils`` package. It was deprecated in Python 3.10 by
+distutils
+---------
+
+* Remove the :py:mod:`!distutils` package. It was deprecated in Python 3.10 by
:pep:`632` "Deprecate distutils module". For projects still using
``distutils`` and cannot be updated to something else, the ``setuptools``
project can be installed: it still provides ``distutils``.
(Contributed by Victor Stinner in :gh:`92584`.)
-* :mod:`ensurepip`: Remove the bundled setuptools wheel from :mod:`ensurepip`,
+ensurepip
+---------
+
+* Remove the bundled setuptools wheel from :mod:`ensurepip`,
and stop installing setuptools in environments created by :mod:`venv`.
``pip (>= 22.1)`` does not require setuptools to be installed in the
@@ -1286,27 +1345,42 @@ Removed
(Contributed by Pradyun Gedam in :gh:`95299`.)
-* :mod:`enum`: Remove ``EnumMeta.__getattr__``, which is no longer needed for
+enum
+----
+
+* Remove :mod:`enum`'s ``EnumMeta.__getattr__``, which is no longer needed for
enum attribute access.
(Contributed by Ethan Furman in :gh:`95083`.)
-* :mod:`ftplib`: Remove the ``FTP_TLS.ssl_version`` class attribute: use the
+ftplib
+------
+
+* Remove :mod:`ftplib`'s ``FTP_TLS.ssl_version`` class attribute: use the
*context* parameter instead.
(Contributed by Victor Stinner in :gh:`94172`.)
-* :mod:`gzip`: Remove the ``filename`` attribute of :class:`gzip.GzipFile`,
+gzip
+----
+
+* Remove the ``filename`` attribute of :mod:`gzip`'s :class:`gzip.GzipFile`,
deprecated since Python 2.6, use the :attr:`~gzip.GzipFile.name` attribute
instead. In write mode, the ``filename`` attribute added ``'.gz'`` file
extension if it was not present.
(Contributed by Victor Stinner in :gh:`94196`.)
-* :mod:`hashlib`: Remove the pure Python implementation of
+hashlib
+-------
+
+* Remove the pure Python implementation of :mod:`hashlib`'s
:func:`hashlib.pbkdf2_hmac()`, deprecated in Python 3.10. Python 3.10 and
newer requires OpenSSL 1.1.1 (:pep:`644`): this OpenSSL version provides
a C implementation of :func:`~hashlib.pbkdf2_hmac()` which is faster.
(Contributed by Victor Stinner in :gh:`94199`.)
-* :mod:`importlib`: Many previously deprecated cleanups in :mod:`importlib` have now been
+importlib
+---------
+
+* Many previously deprecated cleanups in :mod:`importlib` have now been
completed:
* References to, and support for :meth:`!module_repr()` has been removed.
@@ -1322,10 +1396,13 @@ Removed
* ``importlib.abc.Finder``, ``pkgutil.ImpImporter``, and ``pkgutil.ImpLoader``
have been removed. (Contributed by Barry Warsaw in :gh:`98040`.)
- * The :mod:`!imp` module has been removed. (Contributed by Barry Warsaw in
- :gh:`98040`.)
+imp
+---
+
+* The :mod:`!imp` module has been removed. (Contributed by Barry Warsaw in
+ :gh:`98040`.)
- * Replace removed :mod:`!imp` functions with :mod:`importlib` functions:
+* Replace removed :mod:`!imp` functions with :mod:`importlib` functions:
================================= =======================================
imp importlib
@@ -1342,7 +1419,7 @@ Removed
``imp.source_from_cache()`` :func:`importlib.util.source_from_cache`
================================= =======================================
- * Replace ``imp.load_source()`` with::
+* Replace ``imp.load_source()`` with::
import importlib.util
import importlib.machinery
@@ -1357,28 +1434,34 @@ Removed
loader.exec_module(module)
return module
- * Removed :mod:`!imp` functions and attributes with no replacements:
+* Removed :mod:`!imp` functions and attributes with no replacements:
- * undocumented functions:
+ * undocumented functions:
- * ``imp.init_builtin()``
- * ``imp.load_compiled()``
- * ``imp.load_dynamic()``
- * ``imp.load_package()``
+ * ``imp.init_builtin()``
+ * ``imp.load_compiled()``
+ * ``imp.load_dynamic()``
+ * ``imp.load_package()``
- * ``imp.lock_held()``, ``imp.acquire_lock()``, ``imp.release_lock()``:
- the locking scheme has changed in Python 3.3 to per-module locks.
- * ``imp.find_module()`` constants: ``SEARCH_ERROR``, ``PY_SOURCE``,
- ``PY_COMPILED``, ``C_EXTENSION``, ``PY_RESOURCE``, ``PKG_DIRECTORY``,
- ``C_BUILTIN``, ``PY_FROZEN``, ``PY_CODERESOURCE``, ``IMP_HOOK``.
+ * ``imp.lock_held()``, ``imp.acquire_lock()``, ``imp.release_lock()``:
+ the locking scheme has changed in Python 3.3 to per-module locks.
+ * ``imp.find_module()`` constants: ``SEARCH_ERROR``, ``PY_SOURCE``,
+ ``PY_COMPILED``, ``C_EXTENSION``, ``PY_RESOURCE``, ``PKG_DIRECTORY``,
+ ``C_BUILTIN``, ``PY_FROZEN``, ``PY_CODERESOURCE``, ``IMP_HOOK``.
-* :mod:`io`: Remove ``io.OpenWrapper`` and ``_pyio.OpenWrapper``, deprecated in Python
+io
+--
+
+* Remove :mod:`io`'s ``io.OpenWrapper`` and ``_pyio.OpenWrapper``, deprecated in Python
3.10: just use :func:`open` instead. The :func:`open` (:func:`io.open`)
function is a built-in function. Since Python 3.10, :func:`!_pyio.open` is
also a static method.
(Contributed by Victor Stinner in :gh:`94169`.)
-* :mod:`locale`: Remove the :func:`!locale.format` function, deprecated in Python 3.7:
+locale
+------
+
+* Remove :mod:`locale`'s :func:`!locale.format` function, deprecated in Python 3.7:
use :func:`locale.format_string` instead.
(Contributed by Victor Stinner in :gh:`94226`.)
@@ -1390,7 +1473,10 @@ Removed
.. _aiosmtpd: https://pypi.org/project/aiosmtpd/
-* :mod:`sqlite3`: The following undocumented :mod:`sqlite3` features, deprecated in Python
+sqlite3
+-------
+
+* The following undocumented :mod:`sqlite3` features, deprecated in Python
3.10, are now removed:
* ``sqlite3.enable_shared_cache()``
@@ -1406,30 +1492,34 @@ Removed
(Contributed by Erlend E. Aasland in :gh:`92548`.)
-* :mod:`ssl`:
+ssl
+---
- * Remove the :func:`!ssl.RAND_pseudo_bytes` function, deprecated in Python 3.6:
- use :func:`os.urandom` or :func:`ssl.RAND_bytes` instead.
- (Contributed by Victor Stinner in :gh:`94199`.)
+* Remove :mod:`ssl`'s :func:`!ssl.RAND_pseudo_bytes` function, deprecated in Python 3.6:
+ use :func:`os.urandom` or :func:`ssl.RAND_bytes` instead.
+ (Contributed by Victor Stinner in :gh:`94199`.)
- * Remove the :func:`!ssl.match_hostname` function.
- It was deprecated in Python 3.7. OpenSSL performs
- hostname matching since Python 3.7, Python no longer uses the
- :func:`!ssl.match_hostname` function.
- (Contributed by Victor Stinner in :gh:`94199`.)
+* Remove the :func:`!ssl.match_hostname` function.
+ It was deprecated in Python 3.7. OpenSSL performs
+ hostname matching since Python 3.7, Python no longer uses the
+ :func:`!ssl.match_hostname` function.
+ (Contributed by Victor Stinner in :gh:`94199`.)
- * Remove the :func:`!ssl.wrap_socket` function, deprecated in Python 3.7:
- instead, create a :class:`ssl.SSLContext` object and call its
- :class:`ssl.SSLContext.wrap_socket` method. Any package that still uses
- :func:`!ssl.wrap_socket` is broken and insecure. The function neither sends a
- SNI TLS extension nor validates server hostname. Code is subject to `CWE-295
- `_: Improper Certificate
- Validation.
- (Contributed by Victor Stinner in :gh:`94199`.)
+* Remove the :func:`!ssl.wrap_socket` function, deprecated in Python 3.7:
+ instead, create a :class:`ssl.SSLContext` object and call its
+ :class:`ssl.SSLContext.wrap_socket` method. Any package that still uses
+ :func:`!ssl.wrap_socket` is broken and insecure. The function neither sends a
+ SNI TLS extension nor validates server hostname. Code is subject to `CWE-295
+ `_: Improper Certificate
+ Validation.
+ (Contributed by Victor Stinner in :gh:`94199`.)
+
+unittest
+--------
-* :mod:`unittest`: Removed many old deprecated :mod:`unittest` features:
+* Removed many old deprecated :mod:`unittest` features:
- - A number of :class:`~unittest.TestCase` method aliases:
+ * A number of :class:`~unittest.TestCase` method aliases:
============================ =============================== ===============
Deprecated alias Method Name Deprecated in
@@ -1454,34 +1544,46 @@ Removed
You can use https://github.com/isidentical/teyit to automatically modernise
your unit tests.
- - Undocumented and broken :class:`~unittest.TestCase` method
+ * Undocumented and broken :class:`~unittest.TestCase` method
``assertDictContainsSubset`` (deprecated in Python 3.2).
- - Undocumented :meth:`TestLoader.loadTestsFromModule
+ * Undocumented :meth:`TestLoader.loadTestsFromModule
` parameter *use_load_tests*
(deprecated and ignored since Python 3.2).
- - An alias of the :class:`~unittest.TextTestResult` class:
+ * An alias of the :class:`~unittest.TextTestResult` class:
``_TextTestResult`` (deprecated in Python 3.2).
(Contributed by Serhiy Storchaka in :issue:`45162`.)
-* :mod:`webbrowser`: Remove support for obsolete browsers from :mod:`webbrowser`.
+webbrowser
+----------
+
+* Remove support for obsolete browsers from :mod:`webbrowser`.
Removed browsers include: Grail, Mosaic, Netscape, Galeon, Skipstone,
Iceape, Firebird, and Firefox versions 35 and below (:gh:`102871`).
-* :mod:`xml.etree.ElementTree`: Remove the ``ElementTree.Element.copy()`` method of the
+xml.etree.ElementTree
+---------------------
+
+* Remove the ``ElementTree.Element.copy()`` method of the
pure Python implementation, deprecated in Python 3.10, use the
:func:`copy.copy` function instead. The C implementation of :mod:`xml.etree.ElementTree`
has no ``copy()`` method, only a ``__copy__()`` method.
(Contributed by Victor Stinner in :gh:`94383`.)
-* :mod:`zipimport`: Remove ``find_loader()`` and ``find_module()`` methods,
+zipimport
+---------
+
+* Remove :mod:`zipimport`'s ``find_loader()`` and ``find_module()`` methods,
deprecated in Python 3.10: use the ``find_spec()`` method instead. See
:pep:`451` for the rationale.
(Contributed by Victor Stinner in :gh:`94379`.)
-* Removed the ``suspicious`` rule from the documentation Makefile, and
+Others
+------
+
+* Removed the ``suspicious`` rule from the documentation :file:`Makefile`, and
removed ``Doc/tools/rstlint.py``, both in favor of `sphinx-lint
`_.
(Contributed by Julien Palard in :gh:`98179`.)
@@ -1547,7 +1649,7 @@ Changes in the Python API
so only a very small set of users might be affected.
This change helps with interpreter isolation. Furthermore, :mod:`syslog` is a wrapper
around process-global resources, which are best managed from the main interpreter.
- (Contributed by Dong-hee Na in :gh:`99127`.)
+ (Contributed by Donghee Na in :gh:`99127`.)
* The undocumented locking behavior of :func:`~functools.cached_property`
is removed, because it locked across all instances of the class, leading to high
@@ -1571,7 +1673,7 @@ Changes in the Python API
functions is now changed due to the changes introduced in :pep:`701`. This
means that ``STRING`` tokens are not emitted any more for f-strings and the
tokens described in :pep:`701` are now produced instead: ``FSTRING_START``,
- ``FSRING_MIDDLE`` and ``FSTRING_END`` are now emitted for f-string "string"
+ ``FSTRING_MIDDLE`` and ``FSTRING_END`` are now emitted for f-string "string"
parts in addition to the appropriate tokens for the tokenization in the
expression components. For example for the f-string ``f"start {1+1} end"``
the old version of the tokenizer emitted::
@@ -1608,9 +1710,9 @@ Changes in the Python API
Build Changes
=============
-* Python no longer uses ``setup.py`` to build shared C extension modules.
+* Python no longer uses :file:`setup.py` to build shared C extension modules.
Build parameters like headers and libraries are detected in ``configure``
- script. Extensions are built by ``Makefile``. Most extensions use
+ script. Extensions are built by :file:`Makefile`. Most extensions use
``pkg-config`` and fall back to manual detection.
(Contributed by Christian Heimes in :gh:`93939`.)
@@ -1621,9 +1723,9 @@ Build Changes
* CPython now uses the ThinLTO option as the default link time optimization policy
if the Clang compiler accepts the flag.
- (Contributed by Dong-hee Na in :gh:`89536`.)
+ (Contributed by Donghee Na in :gh:`89536`.)
-* Add ``COMPILEALL_OPTS`` variable in Makefile to override :mod:`compileall`
+* Add ``COMPILEALL_OPTS`` variable in :file:`Makefile` to override :mod:`compileall`
options (default: ``-j0``) in ``make install``. Also merged the 3
``compileall`` commands into a single command to build .pyc files for all
optimization levels (0, 1, 2) at once.
@@ -1750,7 +1852,7 @@ New Features
* Added :c:func:`PyCode_AddWatcher` and :c:func:`PyCode_ClearWatcher`
APIs to register callbacks to receive notification on creation and
destruction of code objects.
- (Contributed by Itamar Ostricher in :gh:`91054`.)
+ (Contributed by Itamar Oren in :gh:`91054`.)
* Add :c:func:`PyFrame_GetVar` and :c:func:`PyFrame_GetVarString` functions to
get a frame variable by its name.
@@ -1790,7 +1892,7 @@ New Features
- ``SSTATE_INTERNED_IMMORTAL_STATIC`` An identifier for interned unicode
objects that are immortal and static
- ``sys.getunicodeinternedsize`` This returns the total number of unicode
- objects that have been interned. This is now needed for refleak.py to
+ objects that have been interned. This is now needed for :file:`refleak.py` to
correctly track reference counts and allocated blocks
(Contributed by Eddie Elizondo in :gh:`84436`.)
@@ -1981,10 +2083,10 @@ Deprecated
* Creating immutable types (:c:macro:`Py_TPFLAGS_IMMUTABLETYPE`) with mutable
bases is deprecated and will be disabled in Python 3.14.
-* The ``structmember.h`` header is deprecated, though it continues to be
+* The :file:`structmember.h` header is deprecated, though it continues to be
available and there are no plans to remove it.
- Its contents are now available just by including ``Python.h``,
+ Its contents are now available just by including :file:`Python.h`,
with a ``Py`` prefix added if it was missing:
- :c:struct:`PyMemberDef`, :c:func:`PyMember_GetOne` and
@@ -1994,14 +2096,14 @@ Deprecated
- The flags :c:macro:`Py_READONLY` (previously ``READONLY``) and
:c:macro:`Py_AUDIT_READ` (previously all uppercase)
- Several items are not exposed from ``Python.h``:
+ Several items are not exposed from :file:`Python.h`:
- :c:macro:`T_OBJECT` (use :c:macro:`Py_T_OBJECT_EX`)
- :c:macro:`T_NONE` (previously undocumented, and pretty quirky)
- The macro ``WRITE_RESTRICTED`` which does nothing.
- The macros ``RESTRICTED`` and ``READ_RESTRICTED``, equivalents of
:c:macro:`Py_AUDIT_READ`.
- - In some configurations, ```` is not included from ``Python.h``.
+ - In some configurations, ```` is not included from :file:`Python.h`.
It should be included manually when using ``offsetof()``.
The deprecated header continues to provide its original
@@ -2031,8 +2133,8 @@ Deprecated
Removed
-------
-* Remove the ``token.h`` header file. There was never any public tokenizer C
- API. The ``token.h`` header file was only designed to be used by Python
+* Remove the :file:`token.h` header file. There was never any public tokenizer C
+ API. The :file:`token.h` header file was only designed to be used by Python
internals.
(Contributed by Victor Stinner in :gh:`92651`.)
diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst
index ffded6ea8350f7..a4b0f69d67b617 100644
--- a/Doc/whatsnew/3.13.rst
+++ b/Doc/whatsnew/3.13.rst
@@ -115,12 +115,24 @@ array
It can be used instead of ``'u'`` type code, which is deprecated.
(Contributed by Inada Naoki in :gh:`80480`.)
+copy
+----
+
+* Add :func:`copy.replace` function which allows to create a modified copy of
+ an object, which is especially useful for immutable objects.
+ It supports named tuples created with the factory function
+ :func:`collections.namedtuple`, :class:`~dataclasses.dataclass` instances,
+ various :mod:`datetime` objects, :class:`~inspect.Signature` objects,
+ :class:`~inspect.Parameter` objects, :ref:`code object `, and
+ any user classes which define the :meth:`!__replace__` method.
+ (Contributed by Serhiy Storchaka in :gh:`108751`.)
+
dbm
---
* Add :meth:`dbm.gnu.gdbm.clear` and :meth:`dbm.ndbm.ndbm.clear` methods that remove all items
from the database.
- (Contributed by Dong-hee Na in :gh:`107122`.)
+ (Contributed by Donghee Na in :gh:`107122`.)
doctest
-------
@@ -176,6 +188,10 @@ pdb
the new ``exceptions [exc_number]`` command for Pdb. (Contributed by Matthias
Bussonnier in :gh:`106676`.)
+* Expressions/Statements whose prefix is a pdb command are now correctly
+ identified and executed.
+ (Contributed by Tian Gao in :gh:`108464`.)
+
sqlite3
-------
@@ -195,7 +211,7 @@ tkinter
traceback
---------
-* Add *show_group* paramter to :func:`traceback.TracebackException.format_exception_only`
+* Add *show_group* parameter to :func:`traceback.TracebackException.format_exception_only`
to format the nested exceptions of a :exc:`BaseExceptionGroup` instance, recursively.
(Contributed by Irit Katriel in :gh:`105292`.)
@@ -207,6 +223,16 @@ typing
check whether a class is a :class:`typing.Protocol`. (Contributed by Jelle Zijlstra in
:gh:`104873`.)
+venv
+----
+
+* Add support for adding source control management (SCM) ignore files to a
+ virtual environment's directory. By default, Git is supported. This is
+ implemented as opt-in via the API which can be extended to support other SCMs
+ (:class:`venv.EnvBuilder` and :func:`venv.create`), and opt-out via the CLI
+ (using ``--without-scm-ignore-files``). (Contributed by Brett Cannon in
+ :gh:`108125`.)
+
Optimizations
=============
@@ -235,10 +261,19 @@ Deprecated
practice.
(Contributed by Victor Stinner in :gh:`106535`.)
-* :mod:`typing`: Creating a :class:`typing.NamedTuple` class using keyword arguments to denote
- the fields (``NT = NamedTuple("NT", x=int, y=int)``) is deprecated, and will
- be disallowed in Python 3.15. Use the class-based syntax or the functional
- syntax instead. (Contributed by Alex Waygood in :gh:`105566`.)
+* :mod:`http.server`: :class:`http.server.CGIHTTPRequestHandler` now emits a
+ :exc:`DeprecationWarning` as it will be removed in 3.15. Process based CGI
+ http servers have been out of favor for a very long time. This code was
+ outdated, unmaintained, and rarely used. It has a high potential for both
+ security and functionality bugs. This includes removal of the ``--cgi``
+ flag to the ``python -m http.server`` command line in 3.15.
+
+* :mod:`typing`:
+
+ * Creating a :class:`typing.NamedTuple` class using keyword arguments to denote
+ the fields (``NT = NamedTuple("NT", x=int, y=int)``) is deprecated, and will
+ be disallowed in Python 3.15. Use the class-based syntax or the functional
+ syntax instead. (Contributed by Alex Waygood in :gh:`105566`.)
* When using the functional syntax to create a :class:`typing.NamedTuple`
class or a :class:`typing.TypedDict` class, failing to pass a value to the
@@ -288,6 +323,11 @@ Deprecated
(Contributed by Erlend E. Aasland in :gh:`107948` and :gh:`108278`.)
+* The ``dis.HAVE_ARGUMENT`` separator is deprecated. Check membership
+ in :data:`~dis.hasarg` instead.
+ (Contributed by Irit Katriel in :gh:`109319`.)
+
+
Pending Removal in Python 3.14
------------------------------
@@ -396,6 +436,11 @@ Pending Removal in Python 3.14
Pending Removal in Python 3.15
------------------------------
+* :class:`http.server.CGIHTTPRequestHandler` will be removed along with its
+ related ``--cgi`` flag to ``python -m http.server``. It was obsolete and
+ rarely used. No direct replacement exists. *Anything* is better than CGI
+ to interface a web server with a request handler.
+
* :class:`typing.NamedTuple`:
* The undocumented keyword argument syntax for creating NamedTuple classes
@@ -886,6 +931,18 @@ New Features
be treated as a failure.
(Contributed by Serhiy Storchaka in :gh:`106307`.)
+* Add fixed variants of functions which silently ignore errors:
+
+ - :c:func:`PyObject_HasAttrWithError` replaces :c:func:`PyObject_HasAttr`.
+ - :c:func:`PyObject_HasAttrStringWithError` replaces :c:func:`PyObject_HasAttrString`.
+ - :c:func:`PyMapping_HasKeyWithError` replaces :c:func:`PyMapping_HasKey`.
+ - :c:func:`PyMapping_HasKeyStringWithError` replaces :c:func:`PyMapping_HasKeyString`.
+
+ New functions return not only ``1`` for true and ``0`` for false, but also
+ ``-1`` for error.
+
+ (Contributed by Serhiy Storchaka in :gh:`108511`.)
+
* If Python is built in :ref:`debug mode ` or :option:`with
assertions <--with-assertions>`, :c:func:`PyTuple_SET_ITEM` and
:c:func:`PyList_SET_ITEM` now check the index argument with an assertion.
@@ -937,6 +994,21 @@ Porting to Python 3.13
functions: ``close()``, ``getpagesize()``, ``getpid()`` and ``sysconf()``.
(Contributed by Victor Stinner in :gh:`108765`.)
+* ``Python.h`` no longer includes these standard header files: ````,
+ ```` and ````. If needed, they should now be
+ included explicitly. For example, ```` provides the ``clock()`` and
+ ``gmtime()`` functions, ```` provides the ``select()``
+ function, and ```` provides the ``futimes()``, ``gettimeofday()``
+ and ``setitimer()`` functions.
+ (Contributed by Victor Stinner in :gh:`108765`.)
+
+* ``Python.h`` no longer includes the ```` standard header file. If
+ needed, it should now be included explicitly. For example, it provides
+ ``isalpha()`` and ``tolower()`` functions which are locale dependent. Python
+ provides locale independent functions, like :c:func:`!Py_ISALPHA` and
+ :c:func:`!Py_TOLOWER`.
+ (Contributed by Victor Stinner in :gh:`108765`.)
+
Deprecated
----------
diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst
index 3f98c82c2fa556..e440193d6f3d29 100644
--- a/Doc/whatsnew/3.3.rst
+++ b/Doc/whatsnew/3.3.rst
@@ -2067,7 +2067,7 @@ The :pep:`418` added new functions to the :mod:`time` module:
Other new functions:
* :func:`~time.clock_getres`, :func:`~time.clock_gettime` and
- :func:`~time.clock_settime` functions with ``CLOCK_xxx`` constants.
+ :func:`~time.clock_settime` functions with :samp:`CLOCK_{xxx}` constants.
(Contributed by Victor Stinner in :issue:`10278`.)
To improve cross platform consistency, :func:`~time.sleep` now raises a
diff --git a/Doc/whatsnew/3.4.rst b/Doc/whatsnew/3.4.rst
index a36e9fa852723a..2ddab76814369e 100644
--- a/Doc/whatsnew/3.4.rst
+++ b/Doc/whatsnew/3.4.rst
@@ -2085,7 +2085,7 @@ Deprecations in the Python API
:meth:`importlib.abc.MetaPathFinder.find_spec`;
:meth:`!importlib.abc.PathEntryFinder.find_loader` and
:meth:`!find_module` are replaced by
- :meth:`importlib.abc.PathEntryFinder.find_spec`; all of the ``xxxLoader`` ABC
+ :meth:`importlib.abc.PathEntryFinder.find_spec`; all of the :samp:`{xxx}Loader` ABC
``load_module`` methods (:meth:`!importlib.abc.Loader.load_module`,
:meth:`!importlib.abc.InspectLoader.load_module`,
:meth:`!importlib.abc.FileLoader.load_module`,
diff --git a/Doc/whatsnew/3.5.rst b/Doc/whatsnew/3.5.rst
index 0c45a42d1a7c17..ae6affcab664c6 100644
--- a/Doc/whatsnew/3.5.rst
+++ b/Doc/whatsnew/3.5.rst
@@ -921,7 +921,7 @@ and improves their substitutability for lists.
Docstrings produced by :func:`~collections.namedtuple` can now be updated::
Point = namedtuple('Point', ['x', 'y'])
- Point.__doc__ += ': Cartesian coodinate'
+ Point.__doc__ += ': Cartesian coordinate'
Point.x.__doc__ = 'abscissa'
Point.y.__doc__ = 'ordinate'
diff --git a/Doc/whatsnew/3.8.rst b/Doc/whatsnew/3.8.rst
index 7946bf910af2c4..e15180c89f594c 100644
--- a/Doc/whatsnew/3.8.rst
+++ b/Doc/whatsnew/3.8.rst
@@ -404,7 +404,7 @@ Other Language Changes
or :meth:`~object.__complex__` is not available.
(Contributed by Serhiy Storchaka in :issue:`20092`.)
-* Added support of ``\N{name}`` escapes in :mod:`regular expressions `::
+* Added support of :samp:`\\N\\{{name}\\}` escapes in :mod:`regular expressions `::
>>> notice = 'Copyright © 2019'
>>> copyright_year_pattern = re.compile(r'\N{copyright sign}\s*(\d{4})')
@@ -947,7 +947,7 @@ This made it difficult to update, experiment with, or teach the various
logging configuration options using the interactive prompt or a Jupyter
notebook.
-(Suggested by Raymond Hettinger, implemented by Dong-hee Na, and
+(Suggested by Raymond Hettinger, implemented by Donghee Na, and
reviewed by Vinay Sajip in :issue:`33897`.)
@@ -1714,7 +1714,7 @@ Deprecated
* The :meth:`~threading.Thread.isAlive()` method of :class:`threading.Thread`
has been deprecated.
- (Contributed by Dong-hee Na in :issue:`35283`.)
+ (Contributed by Donghee Na in :issue:`35283`.)
* Many builtin and extension functions that take integer arguments will
now emit a deprecation warning for :class:`~decimal.Decimal`\ s,
diff --git a/Doc/whatsnew/3.9.rst b/Doc/whatsnew/3.9.rst
index 8e2df19419bfc2..cb2482ee48d7fa 100644
--- a/Doc/whatsnew/3.9.rst
+++ b/Doc/whatsnew/3.9.rst
@@ -44,7 +44,6 @@
This article explains the new features in Python 3.9, compared to 3.8.
Python 3.9 was released on October 5, 2020.
-
For full details, see the :ref:`changelog `.
.. seealso::
@@ -415,7 +414,7 @@ datetime
The :meth:`~datetime.date.isocalendar()` of :class:`datetime.date`
and :meth:`~datetime.datetime.isocalendar()` of :class:`datetime.datetime`
methods now returns a :func:`~collections.namedtuple` instead of a :class:`tuple`.
-(Contributed by Dong-hee Na in :issue:`24416`.)
+(Contributed by Donghee Na in :issue:`24416`.)
distutils
---------
@@ -429,14 +428,14 @@ fcntl
Added constants :const:`~fcntl.F_OFD_GETLK`, :const:`~fcntl.F_OFD_SETLK`
and :const:`~fcntl.F_OFD_SETLKW`.
-(Contributed by Dong-hee Na in :issue:`38602`.)
+(Contributed by Donghee Na in :issue:`38602`.)
ftplib
-------
:class:`~ftplib.FTP` and :class:`~ftplib.FTP_TLS` now raise a :class:`ValueError`
if the given timeout for their constructor is zero to prevent the creation of
-a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.)
+a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.)
gc
--
@@ -468,7 +467,7 @@ http
----
HTTP status codes ``103 EARLY_HINTS``, ``418 IM_A_TEAPOT`` and ``425 TOO_EARLY`` are added to
-:class:`http.HTTPStatus`. (Contributed by Dong-hee Na in :issue:`39509` and Ross Rhodes in :issue:`39507`.)
+:class:`http.HTTPStatus`. (Contributed by Donghee Na in :issue:`39509` and Ross Rhodes in :issue:`39507`.)
IDLE and idlelib
----------------
@@ -509,14 +508,14 @@ an optional *timeout* parameter for their constructors.
Also, the :meth:`~imaplib.IMAP4.open` method now has an optional *timeout* parameter
with this change. The overridden methods of :class:`~imaplib.IMAP4_SSL` and
:class:`~imaplib.IMAP4_stream` were applied to this change.
-(Contributed by Dong-hee Na in :issue:`38615`.)
+(Contributed by Donghee Na in :issue:`38615`.)
:meth:`imaplib.IMAP4.unselect` is added.
:meth:`imaplib.IMAP4.unselect` frees server's resources associated with the
selected mailbox and returns the server to the authenticated
state. This command performs the same actions as :meth:`imaplib.IMAP4.close`, except
that no messages are permanently removed from the currently
-selected mailbox. (Contributed by Dong-hee Na in :issue:`40375`.)
+selected mailbox. (Contributed by Donghee Na in :issue:`40375`.)
importlib
---------
@@ -588,13 +587,13 @@ nntplib
:class:`~!nntplib.NNTP` and :class:`~!nntplib.NNTP_SSL` now raise a :class:`ValueError`
if the given timeout for their constructor is zero to prevent the creation of
-a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.)
+a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.)
os
--
Added :const:`~os.CLD_KILLED` and :const:`~os.CLD_STOPPED` for :attr:`si_code`.
-(Contributed by Dong-hee Na in :issue:`38493`.)
+(Contributed by Donghee Na in :issue:`38493`.)
Exposed the Linux-specific :func:`os.pidfd_open` (:issue:`38692`) and
:const:`os.P_PIDFD` (:issue:`38713`) for process management with file
@@ -629,7 +628,7 @@ poplib
:class:`~poplib.POP3` and :class:`~poplib.POP3_SSL` now raise a :class:`ValueError`
if the given timeout for their constructor is zero to prevent the creation of
-a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.)
+a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.)
pprint
------
@@ -661,10 +660,10 @@ smtplib
:class:`~smtplib.SMTP` and :class:`~smtplib.SMTP_SSL` now raise a :class:`ValueError`
if the given timeout for their constructor is zero to prevent the creation of
-a non-blocking socket. (Contributed by Dong-hee Na in :issue:`39259`.)
+a non-blocking socket. (Contributed by Donghee Na in :issue:`39259`.)
:class:`~smtplib.LMTP` constructor now has an optional *timeout* parameter.
-(Contributed by Dong-hee Na in :issue:`39329`.)
+(Contributed by Donghee Na in :issue:`39329`.)
socket
------
@@ -777,7 +776,7 @@ Optimizations
* A number of Python builtins (:class:`range`, :class:`tuple`, :class:`set`,
:class:`frozenset`, :class:`list`, :class:`dict`) are now sped up by using
:pep:`590` vectorcall protocol.
- (Contributed by Dong-hee Na, Mark Shannon, Jeroen Demeyer and Petr Viktorin in :issue:`37207`.)
+ (Contributed by Donghee Na, Mark Shannon, Jeroen Demeyer and Petr Viktorin in :issue:`37207`.)
* Optimized :func:`~set.difference_update` for the case when the other set
is much larger than the base set.
@@ -791,7 +790,7 @@ Optimizations
* :term:`floor division` of float operation now has a better performance. Also
the message of :exc:`ZeroDivisionError` for this operation is updated.
- (Contributed by Dong-hee Na in :issue:`39434`.)
+ (Contributed by Donghee Na in :issue:`39434`.)
* Decoding short ASCII strings with UTF-8 and ascii codecs is now about
15% faster. (Contributed by Inada Naoki in :issue:`37348`.)
@@ -961,7 +960,7 @@ Removed
are not supported or not enabled by NNTP server administrators.
For ``xgtitle()``, please use :meth:`!nntplib.NNTP.descriptions` or
:meth:`!nntplib.NNTP.description` instead.
- (Contributed by Dong-hee Na in :issue:`39366`.)
+ (Contributed by Donghee Na in :issue:`39366`.)
* :class:`array.array`: ``tostring()`` and ``fromstring()`` methods have been
removed. They were aliases to ``tobytes()`` and ``frombytes()``, deprecated
@@ -994,7 +993,7 @@ Removed
* The :meth:`~threading.Thread.isAlive()` method of :class:`threading.Thread`
has been removed. It was deprecated since Python 3.8.
Use :meth:`~threading.Thread.is_alive()` instead.
- (Contributed by Dong-hee Na in :issue:`37804`.)
+ (Contributed by Donghee Na in :issue:`37804`.)
* Methods ``getchildren()`` and ``getiterator()`` of classes
:class:`~xml.etree.ElementTree.ElementTree` and
@@ -1315,7 +1314,7 @@ New Features
* The :c:func:`PyModule_AddType` function is added to help adding a type
to a module.
- (Contributed by Dong-hee Na in :issue:`40024`.)
+ (Contributed by Donghee Na in :issue:`40024`.)
* Added the functions :c:func:`PyObject_GC_IsTracked` and
:c:func:`PyObject_GC_IsFinalized` to the public API to allow to query if
diff --git a/Grammar/python.gram b/Grammar/python.gram
index e7c817856d514b..73aaa796b075bc 100644
--- a/Grammar/python.gram
+++ b/Grammar/python.gram
@@ -19,8 +19,6 @@ _PyPegen_parse(Parser *p)
result = eval_rule(p);
} else if (p->start_rule == Py_func_type_input) {
result = func_type_rule(p);
- } else if (p->start_rule == Py_fstring_input) {
- result = fstring_rule(p);
}
return result;
@@ -89,7 +87,6 @@ file[mod_ty]: a=[statements] ENDMARKER { _PyPegen_make_module(p, a) }
interactive[mod_ty]: a=statement_newline { _PyAST_Interactive(a, p->arena) }
eval[mod_ty]: a=expressions NEWLINE* ENDMARKER { _PyAST_Expression(a, p->arena) }
func_type[mod_ty]: '(' a=[type_expressions] ')' '->' b=expression NEWLINE* ENDMARKER { _PyAST_FunctionType(a, b, p->arena) }
-fstring[expr_ty]: star_expressions
# GENERAL STATEMENTS
# ==================
@@ -647,20 +644,20 @@ type_param_seq[asdl_type_param_seq*]: a[asdl_type_param_seq*]=','.type_param+ ['
type_param[type_param_ty] (memo):
| a=NAME b=[type_param_bound] { _PyAST_TypeVar(a->v.Name.id, b, EXTRA) }
- | '*' a=NAME colon=":" e=expression {
+ | '*' a=NAME colon=':' e=expression {
RAISE_SYNTAX_ERROR_STARTING_FROM(colon, e->kind == Tuple_kind
? "cannot use constraints with TypeVarTuple"
: "cannot use bound with TypeVarTuple")
}
| '*' a=NAME { _PyAST_TypeVarTuple(a->v.Name.id, EXTRA) }
- | '**' a=NAME colon=":" e=expression {
+ | '**' a=NAME colon=':' e=expression {
RAISE_SYNTAX_ERROR_STARTING_FROM(colon, e->kind == Tuple_kind
? "cannot use constraints with ParamSpec"
: "cannot use bound with ParamSpec")
}
| '**' a=NAME { _PyAST_ParamSpec(a->v.Name.id, EXTRA) }
-type_param_bound[expr_ty]: ":" e=expression { e }
+type_param_bound[expr_ty]: ':' e=expression { e }
# EXPRESSIONS
# -----------
@@ -915,7 +912,7 @@ fstring_middle[expr_ty]:
| fstring_replacement_field
| t=FSTRING_MIDDLE { _PyPegen_constant_from_token(p, t) }
fstring_replacement_field[expr_ty]:
- | '{' a=(yield_expr | star_expressions) debug_expr="="? conversion=[fstring_conversion] format=[fstring_full_format_spec] rbrace='}' {
+ | '{' a=(yield_expr | star_expressions) debug_expr='='? conversion=[fstring_conversion] format=[fstring_full_format_spec] rbrace='}' {
_PyPegen_formatted_value(p, a, debug_expr, conversion, format, rbrace, EXTRA) }
| invalid_replacement_field
fstring_conversion[ResultTokenWithMetadata*]:
@@ -1170,7 +1167,7 @@ invalid_expression:
_PyPegen_check_legacy_stmt(p, a) ? NULL : p->tokens[p->mark-1]->level == 0 ? NULL :
RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "invalid syntax. Perhaps you forgot a comma?") }
| a=disjunction 'if' b=disjunction !('else'|':') { RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "expected 'else' after 'if' expression") }
- | a='lambda' [lambda_params] b=':' &(FSTRING_MIDDLE | fstring_replacement_field) {
+ | a='lambda' [lambda_params] b=':' &FSTRING_MIDDLE {
RAISE_SYNTAX_ERROR_KNOWN_RANGE(a, b, "f-string: lambda expressions are not allowed without parentheses") }
invalid_named_expression(memo):
diff --git a/Include/Python.h b/Include/Python.h
index 4cc72bb23ce7a3..7312cc87d5cc33 100644
--- a/Include/Python.h
+++ b/Include/Python.h
@@ -17,7 +17,6 @@
// Include standard header files
#include // assert()
-#include // tolower()
#include // uintptr_t
#include // INT_MAX
#include // HUGE_VAL
@@ -49,6 +48,7 @@
#include "pytypedefs.h"
#include "pybuffer.h"
#include "pystats.h"
+#include "pyatomic.h"
#include "object.h"
#include "objimpl.h"
#include "typeslots.h"
diff --git a/Include/abstract.h b/Include/abstract.h
index dd915004e7834e..bd12a54963c13f 100644
--- a/Include/abstract.h
+++ b/Include/abstract.h
@@ -50,6 +50,25 @@ extern "C" {
This function always succeeds. */
+
+/* Implemented elsewhere:
+
+ int PyObject_HasAttrStringWithError(PyObject *o, const char *attr_name);
+
+ Returns 1 if object 'o' has the attribute attr_name, and 0 otherwise.
+ This is equivalent to the Python expression: hasattr(o,attr_name).
+ Returns -1 on failure. */
+
+
+/* Implemented elsewhere:
+
+ int PyObject_HasAttrWithError(PyObject *o, PyObject *attr_name);
+
+ Returns 1 if o has the attribute attr_name, and 0 otherwise.
+ This is equivalent to the Python expression: hasattr(o,attr_name).
+ Returns -1 on failure. */
+
+
/* Implemented elsewhere:
PyObject* PyObject_GetAttr(PyObject *o, PyObject *attr_name);
@@ -821,6 +840,18 @@ PyAPI_FUNC(int) PyMapping_HasKeyString(PyObject *o, const char *key);
This function always succeeds. */
PyAPI_FUNC(int) PyMapping_HasKey(PyObject *o, PyObject *key);
+/* Return 1 if the mapping object has the key 'key', and 0 otherwise.
+ This is equivalent to the Python expression: key in o.
+ On failure, return -1. */
+
+PyAPI_FUNC(int) PyMapping_HasKeyWithError(PyObject *o, PyObject *key);
+
+/* Return 1 if the mapping object has the key 'key', and 0 otherwise.
+ This is equivalent to the Python expression: key in o.
+ On failure, return -1. */
+
+PyAPI_FUNC(int) PyMapping_HasKeyStringWithError(PyObject *o, const char *key);
+
/* On success, return a list or tuple of the keys in mapping object 'o'.
On failure, return NULL. */
PyAPI_FUNC(PyObject *) PyMapping_Keys(PyObject *o);
diff --git a/Include/compile.h b/Include/compile.h
index 3c5acd7209f763..52d0bc76c9fca4 100644
--- a/Include/compile.h
+++ b/Include/compile.h
@@ -10,9 +10,6 @@ extern "C" {
#define Py_eval_input 258
#define Py_func_type_input 345
-/* This doesn't need to match anything */
-#define Py_fstring_input 800
-
#ifndef Py_LIMITED_API
# define Py_CPYTHON_COMPILE_H
# include "cpython/compile.h"
diff --git a/Include/cpython/code.h b/Include/cpython/code.h
index 24c5ec23590c94..45b09a1265df80 100644
--- a/Include/cpython/code.h
+++ b/Include/cpython/code.h
@@ -8,16 +8,21 @@
extern "C" {
#endif
-
+/* Count of all local monitoring events */
+#define _PY_MONITORING_LOCAL_EVENTS 10
/* Count of all "real" monitoring events (not derived from other events) */
#define _PY_MONITORING_UNGROUPED_EVENTS 15
/* Count of all monitoring events */
#define _PY_MONITORING_EVENTS 17
-/* Table of which tools are active for each monitored event. */
-typedef struct _Py_Monitors {
+/* Tables of which tools are active for each monitored event. */
+typedef struct _Py_LocalMonitors {
+ uint8_t tools[_PY_MONITORING_LOCAL_EVENTS];
+} _Py_LocalMonitors;
+
+typedef struct _Py_GlobalMonitors {
uint8_t tools[_PY_MONITORING_UNGROUPED_EVENTS];
-} _Py_Monitors;
+} _Py_GlobalMonitors;
/* Each instruction in a code object is a fixed-width value,
* currently 2 bytes: 1-byte opcode + 1-byte oparg. The EXTENDED_ARG
@@ -88,9 +93,9 @@ typedef struct {
*/
typedef struct {
/* Monitoring specific to this code object */
- _Py_Monitors local_monitors;
+ _Py_LocalMonitors local_monitors;
/* Monitoring that is active on this code object */
- _Py_Monitors active_monitors;
+ _Py_LocalMonitors active_monitors;
/* The tools that are to be notified for events for the matching code unit */
uint8_t *tools;
/* Information to support line events */
diff --git a/Include/cpython/import.h b/Include/cpython/import.h
index cdfdd15bfa48d2..7daf0b84fcf71b 100644
--- a/Include/cpython/import.h
+++ b/Include/cpython/import.h
@@ -17,7 +17,6 @@ struct _frozen {
const unsigned char *code;
int size;
int is_package;
- PyObject *(*get_code)(void);
};
/* Embedding apps may change this pointer to point to their favorite
diff --git a/Include/cpython/initconfig.h b/Include/cpython/initconfig.h
index 7fb7a9868be926..ee130467824daa 100644
--- a/Include/cpython/initconfig.h
+++ b/Include/cpython/initconfig.h
@@ -215,6 +215,11 @@ typedef struct PyConfig {
// If non-zero, we believe we're running from a source tree.
int _is_python_build;
+
+#ifdef Py_STATS
+ // If non-zero, turns on statistics gathering.
+ int _pystats;
+#endif
} PyConfig;
PyAPI_FUNC(void) PyConfig_InitPythonConfig(PyConfig *config);
diff --git a/Include/cpython/optimizer.h b/Include/cpython/optimizer.h
index 10457afc180a00..47536108a9665e 100644
--- a/Include/cpython/optimizer.h
+++ b/Include/cpython/optimizer.h
@@ -40,7 +40,7 @@ PyAPI_FUNC(_PyOptimizerObject *) PyUnstable_GetOptimizer(void);
PyAPI_FUNC(_PyExecutorObject *) PyUnstable_GetExecutor(PyCodeObject *code, int offset);
-struct _PyInterpreterFrame *
+int
_PyOptimizer_BackEdge(struct _PyInterpreterFrame *frame, _Py_CODEUNIT *src, _Py_CODEUNIT *dest, PyObject **stack_pointer);
extern _PyOptimizerObject _PyOptimizer_Default;
diff --git a/Include/cpython/pyatomic.h b/Include/cpython/pyatomic.h
index 73712db847087d..ab182381b39f00 100644
--- a/Include/cpython/pyatomic.h
+++ b/Include/cpython/pyatomic.h
@@ -83,9 +83,9 @@
// # release
// ...
-#ifndef Py_ATOMIC_H
-#define Py_ATOMIC_H
-
+#ifndef Py_CPYTHON_ATOMIC_H
+# error "this header file must not be included directly"
+#endif
// --- _Py_atomic_add --------------------------------------------------------
// Atomically adds `value` to `obj` and returns the previous value
@@ -501,6 +501,3 @@ static inline void _Py_atomic_fence_release(void);
#else
# error "no available pyatomic implementation for this platform/compiler"
#endif
-
-#endif /* Py_ATOMIC_H */
-
diff --git a/Include/cpython/pyatomic_msc.h b/Include/cpython/pyatomic_msc.h
index c88bb03cc8f94a..287ed43b5714cd 100644
--- a/Include/cpython/pyatomic_msc.h
+++ b/Include/cpython/pyatomic_msc.h
@@ -906,7 +906,7 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
#if defined(_M_X64) || defined(_M_IX86)
*(void * volatile *)obj = value;
#elif defined(_M_ARM64)
- __stlr64(obj, (uintptr_t)value);
+ __stlr64((unsigned __int64 volatile *)obj, (uintptr_t)value);
#else
# error "no implementation of _Py_atomic_store_ptr_release"
#endif
diff --git a/Include/cpython/pyerrors.h b/Include/cpython/pyerrors.h
index 9633a5407f28a6..da96eec4b35aab 100644
--- a/Include/cpython/pyerrors.h
+++ b/Include/cpython/pyerrors.h
@@ -116,10 +116,6 @@ PyAPI_FUNC(PyObject *) PyErr_ProgramTextObject(
PyObject *filename,
int lineno);
-PyAPI_FUNC(void) _PyErr_WriteUnraisableMsg(
- const char *err_msg,
- PyObject *obj);
-
PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalErrorFunc(
const char *func,
const char *message);
diff --git a/Include/cpython/pystate.h b/Include/cpython/pystate.h
index fc5f58db86dbe8..5e184d0ca0944b 100644
--- a/Include/cpython/pystate.h
+++ b/Include/cpython/pystate.h
@@ -194,18 +194,17 @@ struct _ts {
};
-/* WASI has limited call stack. Python's recursion limit depends on code
- layout, optimization, and WASI runtime. Wasmtime can handle about 700
- recursions, sometimes less. 500 is a more conservative limit. */
-#ifndef C_RECURSION_LIMIT
-# ifdef __wasi__
-# define C_RECURSION_LIMIT 500
-# else
- // This value is duplicated in Lib/test/support/__init__.py
-# define C_RECURSION_LIMIT 1500
-# endif
+#ifdef __wasi__
+ // WASI has limited call stack. Python's recursion limit depends on code
+ // layout, optimization, and WASI runtime. Wasmtime can handle about 700
+ // recursions, sometimes less. 500 is a more conservative limit.
+# define Py_C_RECURSION_LIMIT 500
+#else
+ // This value is duplicated in Lib/test/support/__init__.py
+# define Py_C_RECURSION_LIMIT 1500
#endif
+
/* other API */
/* Similar to PyThreadState_Get(), but don't issue a fatal error
@@ -310,6 +309,7 @@ PyAPI_FUNC(void) _PyCrossInterpreterData_Clear(
PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *);
PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *);
PyAPI_FUNC(int) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *);
+PyAPI_FUNC(int) _PyCrossInterpreterData_ReleaseAndRawFree(_PyCrossInterpreterData *);
PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *);
diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h
new file mode 100644
index 00000000000000..150e16faa96ca1
--- /dev/null
+++ b/Include/cpython/pystats.h
@@ -0,0 +1,120 @@
+// Statistics on Python performance.
+//
+// API:
+//
+// - _Py_INCREF_STAT_INC() and _Py_DECREF_STAT_INC() used by Py_INCREF()
+// and Py_DECREF().
+// - _Py_stats variable
+//
+// Functions of the sys module:
+//
+// - sys._stats_on()
+// - sys._stats_off()
+// - sys._stats_clear()
+// - sys._stats_dump()
+//
+// Python must be built with ./configure --enable-pystats to define the
+// Py_STATS macro.
+//
+// Define _PY_INTERPRETER macro to increment interpreter_increfs and
+// interpreter_decrefs. Otherwise, increment increfs and decrefs.
+
+#ifndef Py_CPYTHON_PYSTATS_H
+# error "this header file must not be included directly"
+#endif
+
+#define SPECIALIZATION_FAILURE_KINDS 36
+
+/* Stats for determining who is calling PyEval_EvalFrame */
+#define EVAL_CALL_TOTAL 0
+#define EVAL_CALL_VECTOR 1
+#define EVAL_CALL_GENERATOR 2
+#define EVAL_CALL_LEGACY 3
+#define EVAL_CALL_FUNCTION_VECTORCALL 4
+#define EVAL_CALL_BUILD_CLASS 5
+#define EVAL_CALL_SLOT 6
+#define EVAL_CALL_FUNCTION_EX 7
+#define EVAL_CALL_API 8
+#define EVAL_CALL_METHOD 9
+
+#define EVAL_CALL_KINDS 10
+
+typedef struct _specialization_stats {
+ uint64_t success;
+ uint64_t failure;
+ uint64_t hit;
+ uint64_t deferred;
+ uint64_t miss;
+ uint64_t deopt;
+ uint64_t failure_kinds[SPECIALIZATION_FAILURE_KINDS];
+} SpecializationStats;
+
+typedef struct _opcode_stats {
+ SpecializationStats specialization;
+ uint64_t execution_count;
+ uint64_t pair_count[256];
+} OpcodeStats;
+
+typedef struct _call_stats {
+ uint64_t inlined_py_calls;
+ uint64_t pyeval_calls;
+ uint64_t frames_pushed;
+ uint64_t frame_objects_created;
+ uint64_t eval_calls[EVAL_CALL_KINDS];
+} CallStats;
+
+typedef struct _object_stats {
+ uint64_t increfs;
+ uint64_t decrefs;
+ uint64_t interpreter_increfs;
+ uint64_t interpreter_decrefs;
+ uint64_t allocations;
+ uint64_t allocations512;
+ uint64_t allocations4k;
+ uint64_t allocations_big;
+ uint64_t frees;
+ uint64_t to_freelist;
+ uint64_t from_freelist;
+ uint64_t new_values;
+ uint64_t dict_materialized_on_request;
+ uint64_t dict_materialized_new_key;
+ uint64_t dict_materialized_too_big;
+ uint64_t dict_materialized_str_subclass;
+ uint64_t dict_dematerialized;
+ uint64_t type_cache_hits;
+ uint64_t type_cache_misses;
+ uint64_t type_cache_dunder_hits;
+ uint64_t type_cache_dunder_misses;
+ uint64_t type_cache_collisions;
+ uint64_t optimization_attempts;
+ uint64_t optimization_traces_created;
+ uint64_t optimization_traces_executed;
+ uint64_t optimization_uops_executed;
+ /* Temporary value used during GC */
+ uint64_t object_visits;
+} ObjectStats;
+
+typedef struct _gc_stats {
+ uint64_t collections;
+ uint64_t object_visits;
+ uint64_t objects_collected;
+} GCStats;
+
+typedef struct _stats {
+ OpcodeStats opcode_stats[256];
+ CallStats call_stats;
+ ObjectStats object_stats;
+ GCStats *gc_stats;
+} PyStats;
+
+
+// Export for shared extensions like 'math'
+PyAPI_DATA(PyStats*) _Py_stats;
+
+#ifdef _PY_INTERPRETER
+# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_increfs++; } while (0)
+# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_decrefs++; } while (0)
+#else
+# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.increfs++; } while (0)
+# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.decrefs++; } while (0)
+#endif
diff --git a/Include/exports.h b/Include/exports.h
index 59373c39ff757c..ce601216f17156 100644
--- a/Include/exports.h
+++ b/Include/exports.h
@@ -1,6 +1,29 @@
#ifndef Py_EXPORTS_H
#define Py_EXPORTS_H
+/* Declarations for symbol visibility.
+
+ PyAPI_FUNC(type): Declares a public Python API function and return type
+ PyAPI_DATA(type): Declares public Python data and its type
+ PyMODINIT_FUNC: A Python module init function. If these functions are
+ inside the Python core, they are private to the core.
+ If in an extension module, it may be declared with
+ external linkage depending on the platform.
+
+ As a number of platforms support/require "__declspec(dllimport/dllexport)",
+ we support a HAVE_DECLSPEC_DLL macro to save duplication.
+*/
+
+/*
+ All windows ports, except cygwin, are handled in PC/pyconfig.h.
+
+ Cygwin is the only other autoconf platform requiring special
+ linkage handling and it uses __declspec().
+*/
+#if defined(__CYGWIN__)
+# define HAVE_DECLSPEC_DLL
+#endif
+
#if defined(_WIN32) || defined(__CYGWIN__)
#if defined(Py_ENABLE_SHARED)
#define Py_IMPORTED_SYMBOL __declspec(dllimport)
@@ -33,4 +56,53 @@
#endif
#endif
+/* only get special linkage if built as shared or platform is Cygwin */
+#if defined(Py_ENABLE_SHARED) || defined(__CYGWIN__)
+# if defined(HAVE_DECLSPEC_DLL)
+# if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
+# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
+# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
+ /* module init functions inside the core need no external linkage */
+ /* except for Cygwin to handle embedding */
+# if defined(__CYGWIN__)
+# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
+# else /* __CYGWIN__ */
+# define PyMODINIT_FUNC PyObject*
+# endif /* __CYGWIN__ */
+# else /* Py_BUILD_CORE */
+ /* Building an extension module, or an embedded situation */
+ /* public Python functions and data are imported */
+ /* Under Cygwin, auto-import functions to prevent compilation */
+ /* failures similar to those described at the bottom of 4.1: */
+ /* http://docs.python.org/extending/windows.html#a-cookbook-approach */
+# if !defined(__CYGWIN__)
+# define PyAPI_FUNC(RTYPE) Py_IMPORTED_SYMBOL RTYPE
+# endif /* !__CYGWIN__ */
+# define PyAPI_DATA(RTYPE) extern Py_IMPORTED_SYMBOL RTYPE
+ /* module init functions outside the core must be exported */
+# if defined(__cplusplus)
+# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
+# else /* __cplusplus */
+# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
+# endif /* __cplusplus */
+# endif /* Py_BUILD_CORE */
+# endif /* HAVE_DECLSPEC_DLL */
+#endif /* Py_ENABLE_SHARED */
+
+/* If no external linkage macros defined by now, create defaults */
+#ifndef PyAPI_FUNC
+# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
+#endif
+#ifndef PyAPI_DATA
+# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
+#endif
+#ifndef PyMODINIT_FUNC
+# if defined(__cplusplus)
+# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
+# else /* __cplusplus */
+# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
+# endif /* __cplusplus */
+#endif
+
+
#endif /* Py_EXPORTS_H */
diff --git a/Include/fileutils.h b/Include/fileutils.h
index ba5acc84fcb185..1509198e45f0ca 100644
--- a/Include/fileutils.h
+++ b/Include/fileutils.h
@@ -1,5 +1,41 @@
#ifndef Py_FILEUTILS_H
#define Py_FILEUTILS_H
+
+/*******************************
+ * stat() and fstat() fiddling *
+ *******************************/
+
+#ifdef HAVE_SYS_STAT_H
+# include // S_ISREG()
+#elif defined(HAVE_STAT_H)
+# include // S_ISREG()
+#endif
+
+#ifndef S_IFMT
+ // VisualAge C/C++ Failed to Define MountType Field in sys/stat.h.
+# define S_IFMT 0170000
+#endif
+#ifndef S_IFLNK
+ // Windows doesn't define S_IFLNK, but posixmodule.c maps
+ // IO_REPARSE_TAG_SYMLINK to S_IFLNK.
+# define S_IFLNK 0120000
+#endif
+#ifndef S_ISREG
+# define S_ISREG(x) (((x) & S_IFMT) == S_IFREG)
+#endif
+#ifndef S_ISDIR
+# define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR)
+#endif
+#ifndef S_ISCHR
+# define S_ISCHR(x) (((x) & S_IFMT) == S_IFCHR)
+#endif
+#ifndef S_ISLNK
+# define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK)
+#endif
+
+
+// Move this down here since some C++ #include's don't like to be included
+// inside an extern "C".
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/Include/internal/pycore_atomic_funcs.h b/Include/internal/pycore_atomic_funcs.h
deleted file mode 100644
index a708789cea733b..00000000000000
--- a/Include/internal/pycore_atomic_funcs.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* Atomic functions: similar to pycore_atomic.h, but don't need
- to declare variables as atomic.
-
- Py_ssize_t type:
-
- * value = _Py_atomic_size_get(&var)
- * _Py_atomic_size_set(&var, value)
-
- Use sequentially-consistent ordering (__ATOMIC_SEQ_CST memory order):
- enforce total ordering with all other atomic functions.
-*/
-#ifndef Py_ATOMIC_FUNC_H
-#define Py_ATOMIC_FUNC_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef Py_BUILD_CORE
-# error "this header requires Py_BUILD_CORE define"
-#endif
-
-#if defined(_MSC_VER)
-# include // _InterlockedExchange()
-#endif
-
-
-// Use builtin atomic operations in GCC >= 4.7 and clang
-#ifdef HAVE_BUILTIN_ATOMIC
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
- return __atomic_load_n(var, __ATOMIC_SEQ_CST);
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
- __atomic_store_n(var, value, __ATOMIC_SEQ_CST);
-}
-
-#elif defined(_MSC_VER)
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
-#if SIZEOF_VOID_P == 8
- Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
- volatile __int64 *volatile_var = (volatile __int64 *)var;
- __int64 old;
- do {
- old = *volatile_var;
- } while(_InterlockedCompareExchange64(volatile_var, old, old) != old);
-#else
- Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
- volatile long *volatile_var = (volatile long *)var;
- long old;
- do {
- old = *volatile_var;
- } while(_InterlockedCompareExchange(volatile_var, old, old) != old);
-#endif
- return old;
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
-#if SIZEOF_VOID_P == 8
- Py_BUILD_ASSERT(sizeof(__int64) == sizeof(*var));
- volatile __int64 *volatile_var = (volatile __int64 *)var;
- _InterlockedExchange64(volatile_var, value);
-#else
- Py_BUILD_ASSERT(sizeof(long) == sizeof(*var));
- volatile long *volatile_var = (volatile long *)var;
- _InterlockedExchange(volatile_var, value);
-#endif
-}
-
-#else
-// Fallback implementation using volatile
-
-static inline Py_ssize_t _Py_atomic_size_get(Py_ssize_t *var)
-{
- volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
- return *volatile_var;
-}
-
-static inline void _Py_atomic_size_set(Py_ssize_t *var, Py_ssize_t value)
-{
- volatile Py_ssize_t *volatile_var = (volatile Py_ssize_t *)var;
- *volatile_var = value;
-}
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-#endif /* Py_ATOMIC_FUNC_H */
diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h
index e9535023cec46b..23d0fa399d7e6f 100644
--- a/Include/internal/pycore_ceval.h
+++ b/Include/internal/pycore_ceval.h
@@ -44,7 +44,7 @@ extern void _PyEval_SignalReceived(PyInterpreterState *interp);
// Export for '_testinternalcapi' shared extension
PyAPI_FUNC(int) _PyEval_AddPendingCall(
PyInterpreterState *interp,
- int (*func)(void *),
+ _Py_pending_call_func func,
void *arg,
int mainthreadonly);
diff --git a/Include/internal/pycore_ceval_state.h b/Include/internal/pycore_ceval_state.h
index 6e3d669dc646af..d0af5b542233e0 100644
--- a/Include/internal/pycore_ceval_state.h
+++ b/Include/internal/pycore_ceval_state.h
@@ -11,6 +11,8 @@ extern "C" {
#include "pycore_gil.h" // struct _gil_runtime_state
+typedef int (*_Py_pending_call_func)(void *);
+
struct _pending_calls {
int busy;
PyThread_type_lock lock;
@@ -22,7 +24,7 @@ struct _pending_calls {
int async_exc;
#define NPENDINGCALLS 32
struct _pending_call {
- int (*func)(void *);
+ _Py_pending_call_func func;
void *arg;
} calls[NPENDINGCALLS];
int first;
diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h
index f5127a81144353..a77fa11baf8413 100644
--- a/Include/internal/pycore_code.h
+++ b/Include/internal/pycore_code.h
@@ -233,6 +233,9 @@ extern void _PyLineTable_InitAddressRange(
extern int _PyLineTable_NextAddressRange(PyCodeAddressRange *range);
extern int _PyLineTable_PreviousAddressRange(PyCodeAddressRange *range);
+/** API for executors */
+extern void _PyCode_Clear_Executors(PyCodeObject *code);
+
#define ENABLE_SPECIALIZATION 1
/* Specialization functions */
@@ -250,7 +253,7 @@ extern void _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container,
extern void _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub,
_Py_CODEUNIT *instr);
extern void _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr,
- int nargs, PyObject *kwnames);
+ int nargs);
extern void _Py_Specialize_BinaryOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr,
int oparg, PyObject **locals);
extern void _Py_Specialize_CompareOp(PyObject *lhs, PyObject *rhs,
@@ -268,17 +271,17 @@ extern int _PyStaticCode_Init(PyCodeObject *co);
#ifdef Py_STATS
-#define STAT_INC(opname, name) do { if (_py_stats) _py_stats->opcode_stats[opname].specialization.name++; } while (0)
-#define STAT_DEC(opname, name) do { if (_py_stats) _py_stats->opcode_stats[opname].specialization.name--; } while (0)
-#define OPCODE_EXE_INC(opname) do { if (_py_stats) _py_stats->opcode_stats[opname].execution_count++; } while (0)
-#define CALL_STAT_INC(name) do { if (_py_stats) _py_stats->call_stats.name++; } while (0)
-#define OBJECT_STAT_INC(name) do { if (_py_stats) _py_stats->object_stats.name++; } while (0)
+#define STAT_INC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name++; } while (0)
+#define STAT_DEC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name--; } while (0)
+#define OPCODE_EXE_INC(opname) do { if (_Py_stats) _Py_stats->opcode_stats[opname].execution_count++; } while (0)
+#define CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.name++; } while (0)
+#define OBJECT_STAT_INC(name) do { if (_Py_stats) _Py_stats->object_stats.name++; } while (0)
#define OBJECT_STAT_INC_COND(name, cond) \
- do { if (_py_stats && cond) _py_stats->object_stats.name++; } while (0)
-#define EVAL_CALL_STAT_INC(name) do { if (_py_stats) _py_stats->call_stats.eval_calls[name]++; } while (0)
+ do { if (_Py_stats && cond) _Py_stats->object_stats.name++; } while (0)
+#define EVAL_CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.eval_calls[name]++; } while (0)
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) \
- do { if (_py_stats && PyFunction_Check(callable)) _py_stats->call_stats.eval_calls[name]++; } while (0)
-#define GC_STAT_ADD(gen, name, n) do { if (_py_stats) _py_stats->gc_stats[(gen)].name += (n); } while (0)
+ do { if (_Py_stats && PyFunction_Check(callable)) _Py_stats->call_stats.eval_calls[name]++; } while (0)
+#define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0)
// Export for '_opcode' shared extension
PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void);
@@ -462,8 +465,6 @@ adaptive_counter_backoff(uint16_t counter) {
return adaptive_counter_bits(value, backoff);
}
-extern uint32_t _Py_next_func_version;
-
/* Comparison bit masks. */
diff --git a/Include/internal/pycore_emscripten_signal.h b/Include/internal/pycore_emscripten_signal.h
index d1bcb9a92c7726..754193e21dec5a 100644
--- a/Include/internal/pycore_emscripten_signal.h
+++ b/Include/internal/pycore_emscripten_signal.h
@@ -18,6 +18,7 @@ _Py_CheckEmscriptenSignalsPeriodically(void);
#define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY() _Py_CheckEmscriptenSignalsPeriodically()
extern int Py_EMSCRIPTEN_SIGNAL_HANDLING;
+extern int _Py_emscripten_signal_clock;
#else
diff --git a/Include/internal/pycore_emscripten_trampoline.h b/Include/internal/pycore_emscripten_trampoline.h
new file mode 100644
index 00000000000000..e519c99ad86cce
--- /dev/null
+++ b/Include/internal/pycore_emscripten_trampoline.h
@@ -0,0 +1,81 @@
+#ifndef Py_EMSCRIPTEN_TRAMPOLINE_H
+#define Py_EMSCRIPTEN_TRAMPOLINE_H
+
+#include "pycore_runtime.h" // _PyRuntimeState
+
+/**
+ * C function call trampolines to mitigate bad function pointer casts.
+ *
+ * Section 6.3.2.3, paragraph 8 reads:
+ *
+ * A pointer to a function of one type may be converted to a pointer to a
+ * function of another type and back again; the result shall compare equal to
+ * the original pointer. If a converted pointer is used to call a function
+ * whose type is not compatible with the pointed-to type, the behavior is
+ * undefined.
+ *
+ * Typical native ABIs ignore additional arguments or fill in missing values
+ * with 0/NULL in function pointer cast. Compilers do not show warnings when a
+ * function pointer is explicitly casted to an incompatible type.
+ *
+ * Bad fpcasts are an issue in WebAssembly. WASM's indirect_call has strict
+ * function signature checks. Argument count, types, and return type must match.
+ *
+ * Third party code unintentionally rely on problematic fpcasts. The call
+ * trampoline mitigates common occurrences of bad fpcasts on Emscripten.
+ */
+
+#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
+
+void _Py_EmscriptenTrampoline_Init(_PyRuntimeState *runtime);
+
+PyObject*
+_PyEM_TrampolineCall_JavaScript(PyCFunctionWithKeywords func,
+ PyObject* self,
+ PyObject* args,
+ PyObject* kw);
+
+PyObject*
+_PyEM_TrampolineCall_Reflection(PyCFunctionWithKeywords func,
+ PyObject* self,
+ PyObject* args,
+ PyObject* kw);
+
+#define _PyEM_TrampolineCall(meth, self, args, kw) \
+ ((_PyRuntime.wasm_type_reflection_available) ? \
+ (_PyEM_TrampolineCall_Reflection((PyCFunctionWithKeywords)(meth), (self), (args), (kw))) : \
+ (_PyEM_TrampolineCall_JavaScript((PyCFunctionWithKeywords)(meth), (self), (args), (kw))))
+
+#define _PyCFunction_TrampolineCall(meth, self, args) \
+ _PyEM_TrampolineCall( \
+ (*(PyCFunctionWithKeywords)(void(*)(void))(meth)), (self), (args), NULL)
+
+#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
+ _PyEM_TrampolineCall((meth), (self), (args), (kw))
+
+#define descr_set_trampoline_call(set, obj, value, closure) \
+ ((int)_PyEM_TrampolineCall((PyCFunctionWithKeywords)(set), (obj), (value), (PyObject*)(closure)))
+
+#define descr_get_trampoline_call(get, obj, closure) \
+ _PyEM_TrampolineCall((PyCFunctionWithKeywords)(get), (obj), (PyObject*)(closure), NULL)
+
+
+#else // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
+
+#define _Py_EmscriptenTrampoline_Init(runtime)
+
+#define _PyCFunction_TrampolineCall(meth, self, args) \
+ (meth)((self), (args))
+
+#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
+ (meth)((self), (args), (kw))
+
+#define descr_set_trampoline_call(set, obj, value, closure) \
+ (set)((obj), (value), (closure))
+
+#define descr_get_trampoline_call(get, obj, closure) \
+ (get)((obj), (closure))
+
+#endif // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
+
+#endif // ndef Py_EMSCRIPTEN_SIGNAL_H
diff --git a/Include/internal/pycore_fileutils.h b/Include/internal/pycore_fileutils.h
index 9236e5907a48d5..2f89da2c6ecd91 100644
--- a/Include/internal/pycore_fileutils.h
+++ b/Include/internal/pycore_fileutils.h
@@ -318,6 +318,8 @@ PyAPI_FUNC(int) _PyLong_FileDescriptor_Converter(PyObject *, void *);
// Export for test_peg_generator
PyAPI_FUNC(char*) _Py_UniversalNewlineFgetsWithSize(char *, int, FILE*, PyObject *, size_t*);
+extern int _PyFile_Flush(PyObject *);
+
#ifdef __cplusplus
}
#endif
diff --git a/Include/internal/pycore_instruments.h b/Include/internal/pycore_instruments.h
index 43214aef7f7a1b..97dcfb9f8672f7 100644
--- a/Include/internal/pycore_instruments.h
+++ b/Include/internal/pycore_instruments.h
@@ -5,7 +5,6 @@
# error "this header requires Py_BUILD_CORE define"
#endif
-#include "pycore_bitutils.h" // _Py_popcount32
#include "pycore_frame.h" // _PyInterpreterFrame
#ifdef __cplusplus
@@ -29,7 +28,7 @@ extern "C" {
#define PY_MONITORING_EVENT_STOP_ITERATION 9
#define PY_MONITORING_IS_INSTRUMENTED_EVENT(ev) \
- ((ev) <= PY_MONITORING_EVENT_STOP_ITERATION)
+ ((ev) < _PY_MONITORING_LOCAL_EVENTS)
/* Other events, mainly exceptions */
diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h
index f171c546efd53c..ba5764e943e676 100644
--- a/Include/internal/pycore_interp.h
+++ b/Include/internal/pycore_interp.h
@@ -186,8 +186,9 @@ struct _is {
_PyOptimizerObject *optimizer;
uint16_t optimizer_resume_threshold;
uint16_t optimizer_backedge_threshold;
+ uint32_t next_func_version;
- _Py_Monitors monitors;
+ _Py_GlobalMonitors monitors;
bool f_opcode_trace_set;
bool sys_profile_initialized;
bool sys_trace_initialized;
diff --git a/Include/internal/pycore_llist.h b/Include/internal/pycore_llist.h
new file mode 100644
index 00000000000000..5fd261da05fa5d
--- /dev/null
+++ b/Include/internal/pycore_llist.h
@@ -0,0 +1,107 @@
+// A doubly-linked list that can be embedded in a struct.
+//
+// Usage:
+// struct llist_node head = LLIST_INIT(head);
+// typedef struct {
+// ...
+// struct llist_node node;
+// ...
+// } MyObj;
+//
+// llist_insert_tail(&head, &obj->node);
+// llist_remove(&obj->node);
+//
+// struct llist_node *node;
+// llist_for_each(node, &head) {
+// MyObj *obj = llist_data(node, MyObj, node);
+// ...
+// }
+//
+
+#ifndef Py_INTERNAL_LLIST_H
+#define Py_INTERNAL_LLIST_H
+
+#include
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "Py_BUILD_CORE must be defined to include this header"
+#endif
+
+struct llist_node {
+ struct llist_node *next;
+ struct llist_node *prev;
+};
+
+// Get the struct containing a node.
+#define llist_data(node, type, member) \
+ (type*)((char*)node - offsetof(type, member))
+
+// Iterate over a list.
+#define llist_for_each(node, head) \
+ for (node = (head)->next; node != (head); node = node->next)
+
+// Iterate over a list, but allow removal of the current node.
+#define llist_for_each_safe(node, head) \
+ for (struct llist_node *_next = (node = (head)->next, node->next); \
+ node != (head); node = _next, _next = node->next)
+
+#define LLIST_INIT(head) { &head, &head }
+
+static inline void
+llist_init(struct llist_node *head)
+{
+ head->next = head;
+ head->prev = head;
+}
+
+// Returns 1 if the list is empty, 0 otherwise.
+static inline int
+llist_empty(struct llist_node *head)
+{
+ return head->next == head;
+}
+
+// Appends to the tail of the list.
+static inline void
+llist_insert_tail(struct llist_node *head, struct llist_node *node)
+{
+ node->prev = head->prev;
+ node->next = head;
+ head->prev->next = node;
+ head->prev = node;
+}
+
+// Remove a node from the list.
+static inline void
+llist_remove(struct llist_node *node)
+{
+ struct llist_node *prev = node->prev;
+ struct llist_node *next = node->next;
+ prev->next = next;
+ next->prev = prev;
+ node->prev = NULL;
+ node->next = NULL;
+}
+
+// Append all nodes from head2 onto head1. head2 is left empty.
+static inline void
+llist_concat(struct llist_node *head1, struct llist_node *head2)
+{
+ if (!llist_empty(head2)) {
+ head1->prev->next = head2->next;
+ head2->next->prev = head1->prev;
+
+ head1->prev = head2->prev;
+ head2->prev->next = head1;
+ llist_init(head2);
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_LLIST_H */
diff --git a/Include/internal/pycore_lock.h b/Include/internal/pycore_lock.h
new file mode 100644
index 00000000000000..c4bb76a40e7b12
--- /dev/null
+++ b/Include/internal/pycore_lock.h
@@ -0,0 +1,158 @@
+// Lightweight locks and other synchronization mechanisms.
+//
+// These implementations are based on WebKit's WTF::Lock. See
+// https://webkit.org/blog/6161/locking-in-webkit/ for a description of the
+// design.
+#ifndef Py_INTERNAL_LOCK_H
+#define Py_INTERNAL_LOCK_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_time.h" // _PyTime_t
+
+
+// A mutex that occupies one byte. The lock can be zero initialized.
+//
+// Only the two least significant bits are used. The remaining bits should be
+// zero:
+// 0b00: unlocked
+// 0b01: locked
+// 0b10: unlocked and has parked threads
+// 0b11: locked and has parked threads
+//
+// Typical initialization:
+// PyMutex m = (PyMutex){0};
+//
+// Typical usage:
+// PyMutex_Lock(&m);
+// ...
+// PyMutex_Unlock(&m);
+typedef struct _PyMutex {
+ uint8_t v;
+} PyMutex;
+
+#define _Py_UNLOCKED 0
+#define _Py_LOCKED 1
+#define _Py_HAS_PARKED 2
+
+// (private) slow path for locking the mutex
+PyAPI_FUNC(void) _PyMutex_LockSlow(PyMutex *m);
+
+// (private) slow path for unlocking the mutex
+PyAPI_FUNC(void) _PyMutex_UnlockSlow(PyMutex *m);
+
+// Locks the mutex.
+//
+// If the mutex is currently locked, the calling thread will be parked until
+// the mutex is unlocked. If the current thread holds the GIL, then the GIL
+// will be released while the thread is parked.
+static inline void
+PyMutex_Lock(PyMutex *m)
+{
+ uint8_t expected = _Py_UNLOCKED;
+ if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_LOCKED)) {
+ _PyMutex_LockSlow(m);
+ }
+}
+
+// Unlocks the mutex.
+static inline void
+PyMutex_Unlock(PyMutex *m)
+{
+ uint8_t expected = _Py_LOCKED;
+ if (!_Py_atomic_compare_exchange_uint8(&m->v, &expected, _Py_UNLOCKED)) {
+ _PyMutex_UnlockSlow(m);
+ }
+}
+
+// Checks if the mutex is currently locked.
+static inline int
+PyMutex_IsLocked(PyMutex *m)
+{
+ return (_Py_atomic_load_uint8(&m->v) & _Py_LOCKED) != 0;
+}
+
+typedef enum _PyLockFlags {
+ // Do not detach/release the GIL when waiting on the lock.
+ _Py_LOCK_DONT_DETACH = 0,
+
+ // Detach/release the GIL while waiting on the lock.
+ _PY_LOCK_DETACH = 1,
+
+ // Handle signals if interrupted while waiting on the lock.
+ _PY_LOCK_HANDLE_SIGNALS = 2,
+} _PyLockFlags;
+
+// Lock a mutex with an optional timeout and additional options. See
+// _PyLockFlags for details.
+extern PyLockStatus
+_PyMutex_LockTimed(PyMutex *m, _PyTime_t timeout_ns, _PyLockFlags flags);
+
+// Unlock a mutex, returns 0 if the mutex is not locked (used for improved
+// error messages).
+extern int _PyMutex_TryUnlock(PyMutex *m);
+
+
+// PyEvent is a one-time event notification
+typedef struct {
+ uint8_t v;
+} PyEvent;
+
+// Set the event and notify any waiting threads.
+// Export for '_testinternalcapi' shared extension
+PyAPI_FUNC(void) _PyEvent_Notify(PyEvent *evt);
+
+// Wait for the event to be set. If the event is already set, then this returns
+// immediately.
+PyAPI_FUNC(void) PyEvent_Wait(PyEvent *evt);
+
+// Wait for the event to be set, or until the timeout expires. If the event is
+// already set, then this returns immediately. Returns 1 if the event was set,
+// and 0 if the timeout expired or thread was interrupted.
+PyAPI_FUNC(int) PyEvent_WaitTimed(PyEvent *evt, _PyTime_t timeout_ns);
+
+
+// _PyRawMutex implements a word-sized mutex that that does not depend on the
+// parking lot API, and therefore can be used in the parking lot
+// implementation.
+//
+// The mutex uses a packed representation: the least significant bit is used to
+// indicate whether the mutex is locked or not. The remaining bits are either
+// zero or a pointer to a `struct raw_mutex_entry` (see lock.c).
+typedef struct {
+ uintptr_t v;
+} _PyRawMutex;
+
+// Slow paths for lock/unlock
+extern void _PyRawMutex_LockSlow(_PyRawMutex *m);
+extern void _PyRawMutex_UnlockSlow(_PyRawMutex *m);
+
+static inline void
+_PyRawMutex_Lock(_PyRawMutex *m)
+{
+ uintptr_t unlocked = _Py_UNLOCKED;
+ if (_Py_atomic_compare_exchange_uintptr(&m->v, &unlocked, _Py_LOCKED)) {
+ return;
+ }
+ _PyRawMutex_LockSlow(m);
+}
+
+static inline void
+_PyRawMutex_Unlock(_PyRawMutex *m)
+{
+ uintptr_t locked = _Py_LOCKED;
+ if (_Py_atomic_compare_exchange_uintptr(&m->v, &locked, _Py_UNLOCKED)) {
+ return;
+ }
+ _PyRawMutex_UnlockSlow(m);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_LOCK_H */
diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h
index daa06ebfbf91a4..2d50f42c9c614d 100644
--- a/Include/internal/pycore_object.h
+++ b/Include/internal/pycore_object.h
@@ -10,6 +10,7 @@ extern "C" {
#include
#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED()
+#include "pycore_emscripten_trampoline.h" // _PyCFunction_TrampolineCall()
#include "pycore_interp.h" // PyInterpreterState.gc
#include "pycore_pystate.h" // _PyInterpreterState_GET()
diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h
index aaf3541e095468..bb37e9a1d1b6b6 100644
--- a/Include/internal/pycore_opcode_metadata.h
+++ b/Include/internal/pycore_opcode_metadata.h
@@ -25,8 +25,8 @@
((OP) == POP_BLOCK) || \
0)
-#define EXIT_TRACE 300
-#define SAVE_IP 301
+#define _EXIT_TRACE 300
+#define _SET_IP 301
#define _GUARD_BOTH_INT 302
#define _BINARY_OP_MULTIPLY_INT 303
#define _BINARY_OP_ADD_INT 304
@@ -39,40 +39,38 @@
#define _BINARY_OP_ADD_UNICODE 311
#define _BINARY_OP_INPLACE_ADD_UNICODE 312
#define _POP_FRAME 313
-#define _LOAD_LOCALS 314
-#define _LOAD_FROM_DICT_OR_GLOBALS 315
-#define _GUARD_GLOBALS_VERSION 316
-#define _GUARD_BUILTINS_VERSION 317
-#define _LOAD_GLOBAL_MODULE 318
-#define _LOAD_GLOBAL_BUILTINS 319
-#define _GUARD_TYPE_VERSION 320
-#define _CHECK_MANAGED_OBJECT_HAS_VALUES 321
-#define _LOAD_ATTR_INSTANCE_VALUE 322
-#define IS_NONE 323
-#define _ITER_CHECK_LIST 324
-#define _ITER_JUMP_LIST 325
-#define _IS_ITER_EXHAUSTED_LIST 326
-#define _ITER_NEXT_LIST 327
-#define _ITER_CHECK_TUPLE 328
-#define _ITER_JUMP_TUPLE 329
-#define _IS_ITER_EXHAUSTED_TUPLE 330
-#define _ITER_NEXT_TUPLE 331
-#define _ITER_CHECK_RANGE 332
-#define _ITER_JUMP_RANGE 333
-#define _IS_ITER_EXHAUSTED_RANGE 334
-#define _ITER_NEXT_RANGE 335
-#define _CHECK_CALL_BOUND_METHOD_EXACT_ARGS 336
-#define _INIT_CALL_BOUND_METHOD_EXACT_ARGS 337
-#define _CHECK_PEP_523 338
-#define _CHECK_FUNCTION_EXACT_ARGS 339
-#define _CHECK_STACK_SPACE 340
-#define _INIT_CALL_PY_EXACT_ARGS 341
-#define _PUSH_FRAME 342
-#define _POP_JUMP_IF_FALSE 343
-#define _POP_JUMP_IF_TRUE 344
-#define JUMP_TO_TOP 345
-#define SAVE_CURRENT_IP 346
-#define INSERT 347
+#define _GUARD_GLOBALS_VERSION 314
+#define _GUARD_BUILTINS_VERSION 315
+#define _LOAD_GLOBAL_MODULE 316
+#define _LOAD_GLOBAL_BUILTINS 317
+#define _GUARD_TYPE_VERSION 318
+#define _CHECK_MANAGED_OBJECT_HAS_VALUES 319
+#define _LOAD_ATTR_INSTANCE_VALUE 320
+#define _IS_NONE 321
+#define _ITER_CHECK_LIST 322
+#define _ITER_JUMP_LIST 323
+#define _IS_ITER_EXHAUSTED_LIST 324
+#define _ITER_NEXT_LIST 325
+#define _ITER_CHECK_TUPLE 326
+#define _ITER_JUMP_TUPLE 327
+#define _IS_ITER_EXHAUSTED_TUPLE 328
+#define _ITER_NEXT_TUPLE 329
+#define _ITER_CHECK_RANGE 330
+#define _ITER_JUMP_RANGE 331
+#define _IS_ITER_EXHAUSTED_RANGE 332
+#define _ITER_NEXT_RANGE 333
+#define _CHECK_CALL_BOUND_METHOD_EXACT_ARGS 334
+#define _INIT_CALL_BOUND_METHOD_EXACT_ARGS 335
+#define _CHECK_PEP_523 336
+#define _CHECK_FUNCTION_EXACT_ARGS 337
+#define _CHECK_STACK_SPACE 338
+#define _INIT_CALL_PY_EXACT_ARGS 339
+#define _PUSH_FRAME 340
+#define _POP_JUMP_IF_FALSE 341
+#define _POP_JUMP_IF_TRUE 342
+#define _JUMP_TO_TOP 343
+#define _SAVE_CURRENT_IP 344
+#define _INSERT 345
extern int _PyOpcode_num_popped(int opcode, int oparg, bool jump);
#ifdef NEED_OPCODE_METADATA
@@ -82,6 +80,8 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 0;
case RESUME:
return 0;
+ case RESUME_CHECK:
+ return 0;
case INSTRUMENTED_RESUME:
return 0;
case LOAD_CLOSURE:
@@ -170,6 +170,8 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 2;
case BINARY_OP_ADD_UNICODE:
return 2;
+ case _BINARY_OP_INPLACE_ADD_UNICODE:
+ return 2;
case BINARY_OP_INPLACE_ADD_UNICODE:
return 2;
case BINARY_SUBSCR:
@@ -266,16 +268,12 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 1;
case DELETE_GLOBAL:
return 0;
- case _LOAD_LOCALS:
- return 0;
case LOAD_LOCALS:
return 0;
- case _LOAD_FROM_DICT_OR_GLOBALS:
+ case LOAD_FROM_DICT_OR_GLOBALS:
return 1;
case LOAD_NAME:
return 0;
- case LOAD_FROM_DICT_OR_GLOBALS:
- return 1;
case LOAD_GLOBAL:
return 0;
case _GUARD_GLOBALS_VERSION:
@@ -406,7 +404,7 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 1;
case POP_JUMP_IF_TRUE:
return 1;
- case IS_NONE:
+ case _IS_NONE:
return 1;
case POP_JUMP_IF_NONE:
return 1;
@@ -434,6 +432,8 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 0;
case _ITER_CHECK_LIST:
return 1;
+ case _ITER_JUMP_LIST:
+ return 1;
case _IS_ITER_EXHAUSTED_LIST:
return 1;
case _ITER_NEXT_LIST:
@@ -442,6 +442,8 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 1;
case _ITER_CHECK_TUPLE:
return 1;
+ case _ITER_JUMP_TUPLE:
+ return 1;
case _IS_ITER_EXHAUSTED_TUPLE:
return 1;
case _ITER_NEXT_TUPLE:
@@ -450,6 +452,8 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 1;
case _ITER_CHECK_RANGE:
return 1;
+ case _ITER_JUMP_RANGE:
+ return 1;
case _IS_ITER_EXHAUSTED_RANGE:
return 1;
case _ITER_NEXT_RANGE:
@@ -484,8 +488,6 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 1;
case LOAD_ATTR_METHOD_LAZY_DICT:
return 1;
- case KW_NAMES:
- return 0;
case INSTRUMENTED_CALL:
return 0;
case CALL:
@@ -510,38 +512,42 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return oparg + 2;
case CALL_PY_WITH_DEFAULTS:
return oparg + 2;
- case CALL_NO_KW_TYPE_1:
+ case CALL_TYPE_1:
return oparg + 2;
- case CALL_NO_KW_STR_1:
+ case CALL_STR_1:
return oparg + 2;
- case CALL_NO_KW_TUPLE_1:
+ case CALL_TUPLE_1:
return oparg + 2;
- case CALL_NO_KW_ALLOC_AND_ENTER_INIT:
+ case CALL_ALLOC_AND_ENTER_INIT:
return oparg + 2;
case EXIT_INIT_CHECK:
return 1;
case CALL_BUILTIN_CLASS:
return oparg + 2;
- case CALL_NO_KW_BUILTIN_O:
+ case CALL_BUILTIN_O:
return oparg + 2;
- case CALL_NO_KW_BUILTIN_FAST:
+ case CALL_BUILTIN_FAST:
return oparg + 2;
case CALL_BUILTIN_FAST_WITH_KEYWORDS:
return oparg + 2;
- case CALL_NO_KW_LEN:
+ case CALL_LEN:
return oparg + 2;
- case CALL_NO_KW_ISINSTANCE:
+ case CALL_ISINSTANCE:
return oparg + 2;
- case CALL_NO_KW_LIST_APPEND:
+ case CALL_LIST_APPEND:
return oparg + 2;
- case CALL_NO_KW_METHOD_DESCRIPTOR_O:
+ case CALL_METHOD_DESCRIPTOR_O:
return oparg + 2;
case CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS:
return oparg + 2;
- case CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS:
+ case CALL_METHOD_DESCRIPTOR_NOARGS:
return oparg + 2;
- case CALL_NO_KW_METHOD_DESCRIPTOR_FAST:
+ case CALL_METHOD_DESCRIPTOR_FAST:
return oparg + 2;
+ case INSTRUMENTED_CALL_KW:
+ return 0;
+ case CALL_KW:
+ return oparg + 3;
case INSTRUMENTED_CALL_FUNCTION_EX:
return 0;
case CALL_FUNCTION_EX:
@@ -590,15 +596,15 @@ int _PyOpcode_num_popped(int opcode, int oparg, bool jump) {
return 1;
case _POP_JUMP_IF_TRUE:
return 1;
- case JUMP_TO_TOP:
+ case _JUMP_TO_TOP:
return 0;
- case SAVE_IP:
+ case _SET_IP:
return 0;
- case SAVE_CURRENT_IP:
+ case _SAVE_CURRENT_IP:
return 0;
- case EXIT_TRACE:
+ case _EXIT_TRACE:
return 0;
- case INSERT:
+ case _INSERT:
return oparg + 1;
default:
return -1;
@@ -614,6 +620,8 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 0;
case RESUME:
return 0;
+ case RESUME_CHECK:
+ return 0;
case INSTRUMENTED_RESUME:
return 0;
case LOAD_CLOSURE:
@@ -702,6 +710,8 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 1;
case BINARY_OP_ADD_UNICODE:
return 1;
+ case _BINARY_OP_INPLACE_ADD_UNICODE:
+ return 0;
case BINARY_OP_INPLACE_ADD_UNICODE:
return 0;
case BINARY_SUBSCR:
@@ -798,16 +808,12 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 0;
case DELETE_GLOBAL:
return 0;
- case _LOAD_LOCALS:
- return 1;
case LOAD_LOCALS:
return 1;
- case _LOAD_FROM_DICT_OR_GLOBALS:
+ case LOAD_FROM_DICT_OR_GLOBALS:
return 1;
case LOAD_NAME:
return 1;
- case LOAD_FROM_DICT_OR_GLOBALS:
- return 1;
case LOAD_GLOBAL:
return ((oparg & 1) ? 1 : 0) + 1;
case _GUARD_GLOBALS_VERSION:
@@ -938,7 +944,7 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 0;
case POP_JUMP_IF_TRUE:
return 0;
- case IS_NONE:
+ case _IS_NONE:
return 1;
case POP_JUMP_IF_NONE:
return 0;
@@ -966,6 +972,8 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 0;
case _ITER_CHECK_LIST:
return 1;
+ case _ITER_JUMP_LIST:
+ return 1;
case _IS_ITER_EXHAUSTED_LIST:
return 2;
case _ITER_NEXT_LIST:
@@ -974,6 +982,8 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 2;
case _ITER_CHECK_TUPLE:
return 1;
+ case _ITER_JUMP_TUPLE:
+ return 1;
case _IS_ITER_EXHAUSTED_TUPLE:
return 2;
case _ITER_NEXT_TUPLE:
@@ -982,6 +992,8 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 2;
case _ITER_CHECK_RANGE:
return 1;
+ case _ITER_JUMP_RANGE:
+ return 1;
case _IS_ITER_EXHAUSTED_RANGE:
return 2;
case _ITER_NEXT_RANGE:
@@ -1016,8 +1028,6 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 1;
case LOAD_ATTR_METHOD_LAZY_DICT:
return 2;
- case KW_NAMES:
- return 0;
case INSTRUMENTED_CALL:
return 0;
case CALL:
@@ -1042,37 +1052,41 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 1;
case CALL_PY_WITH_DEFAULTS:
return 1;
- case CALL_NO_KW_TYPE_1:
+ case CALL_TYPE_1:
return 1;
- case CALL_NO_KW_STR_1:
+ case CALL_STR_1:
return 1;
- case CALL_NO_KW_TUPLE_1:
+ case CALL_TUPLE_1:
return 1;
- case CALL_NO_KW_ALLOC_AND_ENTER_INIT:
+ case CALL_ALLOC_AND_ENTER_INIT:
return 1;
case EXIT_INIT_CHECK:
return 0;
case CALL_BUILTIN_CLASS:
return 1;
- case CALL_NO_KW_BUILTIN_O:
+ case CALL_BUILTIN_O:
return 1;
- case CALL_NO_KW_BUILTIN_FAST:
+ case CALL_BUILTIN_FAST:
return 1;
case CALL_BUILTIN_FAST_WITH_KEYWORDS:
return 1;
- case CALL_NO_KW_LEN:
+ case CALL_LEN:
return 1;
- case CALL_NO_KW_ISINSTANCE:
+ case CALL_ISINSTANCE:
return 1;
- case CALL_NO_KW_LIST_APPEND:
+ case CALL_LIST_APPEND:
return 1;
- case CALL_NO_KW_METHOD_DESCRIPTOR_O:
+ case CALL_METHOD_DESCRIPTOR_O:
return 1;
case CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS:
return 1;
- case CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS:
+ case CALL_METHOD_DESCRIPTOR_NOARGS:
return 1;
- case CALL_NO_KW_METHOD_DESCRIPTOR_FAST:
+ case CALL_METHOD_DESCRIPTOR_FAST:
+ return 1;
+ case INSTRUMENTED_CALL_KW:
+ return 0;
+ case CALL_KW:
return 1;
case INSTRUMENTED_CALL_FUNCTION_EX:
return 0;
@@ -1122,15 +1136,15 @@ int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) {
return 0;
case _POP_JUMP_IF_TRUE:
return 0;
- case JUMP_TO_TOP:
+ case _JUMP_TO_TOP:
return 0;
- case SAVE_IP:
+ case _SET_IP:
return 0;
- case SAVE_CURRENT_IP:
+ case _SAVE_CURRENT_IP:
return 0;
- case EXIT_TRACE:
+ case _EXIT_TRACE:
return 0;
- case INSERT:
+ case _INSERT:
return oparg + 1;
default:
return -1;
@@ -1192,7 +1206,7 @@ struct opcode_macro_expansion {
#define OPARG_CACHE_4 4
#define OPARG_TOP 5
#define OPARG_BOTTOM 6
-#define OPARG_SAVE_IP 7
+#define OPARG_SET_IP 7
#define OPCODE_METADATA_FMT(OP) (_PyOpcode_opcode_metadata[(OP)].instr_format)
#define SAME_OPCODE_METADATA(OP1, OP2) \
@@ -1207,6 +1221,7 @@ extern const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SI
const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = {
[NOP] = { true, INSTR_FMT_IX, 0 },
[RESUME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG },
+ [RESUME_CHECK] = { true, INSTR_FMT_IX, HAS_DEOPT_FLAG },
[INSTRUMENTED_RESUME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG },
[LOAD_CLOSURE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG },
[LOAD_FAST_CHECK] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_LOCAL_FLAG | HAS_ERROR_FLAG },
@@ -1300,11 +1315,9 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = {
[DELETE_ATTR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
[STORE_GLOBAL] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
[DELETE_GLOBAL] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
- [_LOAD_LOCALS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG },
[LOAD_LOCALS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG },
- [_LOAD_FROM_DICT_OR_GLOBALS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
- [LOAD_NAME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
[LOAD_FROM_DICT_OR_GLOBALS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
+ [LOAD_NAME] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
[LOAD_GLOBAL] = { true, INSTR_FMT_IBC000, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG },
[_GUARD_GLOBALS_VERSION] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG },
[_GUARD_BUILTINS_VERSION] = { true, INSTR_FMT_IXC, HAS_DEOPT_FLAG },
@@ -1368,11 +1381,11 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = {
[JUMP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[JUMP_NO_INTERRUPT] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[ENTER_EXECUTOR] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG },
- [POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
- [POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
- [IS_NONE] = { true, INSTR_FMT_IX, 0 },
- [POP_JUMP_IF_NONE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
- [POP_JUMP_IF_NOT_NONE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
+ [POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG },
+ [POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG },
+ [_IS_NONE] = { true, INSTR_FMT_IX, 0 },
+ [POP_JUMP_IF_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG },
+ [POP_JUMP_IF_NOT_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[JUMP_BACKWARD_NO_INTERRUPT] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_JUMP_FLAG },
[GET_LEN] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG },
[MATCH_CLASS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG },
@@ -1412,7 +1425,6 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = {
[LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
[LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
[LOAD_ATTR_METHOD_LAZY_DICT] = { true, INSTR_FMT_IBC00000000, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
- [KW_NAMES] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_CONST_FLAG },
[INSTRUMENTED_CALL] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG },
[CALL] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG },
[_CHECK_CALL_BOUND_METHOD_EXACT_ARGS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
@@ -1421,26 +1433,28 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = {
[_CHECK_FUNCTION_EXACT_ARGS] = { true, INSTR_FMT_IBC0, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
[_CHECK_STACK_SPACE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
[_INIT_CALL_PY_EXACT_ARGS] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
- [_PUSH_FRAME] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG },
- [CALL_BOUND_METHOD_EXACT_ARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_PY_EXACT_ARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [_PUSH_FRAME] = { true, INSTR_FMT_IX, 0 },
+ [CALL_BOUND_METHOD_EXACT_ARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
+ [CALL_PY_EXACT_ARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
[CALL_PY_WITH_DEFAULTS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
- [CALL_NO_KW_TYPE_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
- [CALL_NO_KW_STR_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_TUPLE_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_ALLOC_AND_ENTER_INIT] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_TYPE_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG },
+ [CALL_STR_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_TUPLE_1] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_ALLOC_AND_ENTER_INIT] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
[EXIT_INIT_CHECK] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG },
[CALL_BUILTIN_CLASS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_BUILTIN_O] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_BUILTIN_FAST] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_BUILTIN_O] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_BUILTIN_FAST] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
[CALL_BUILTIN_FAST_WITH_KEYWORDS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_LEN] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_ISINSTANCE] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_LIST_APPEND] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_METHOD_DESCRIPTOR_O] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_LEN] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_ISINSTANCE] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_LIST_APPEND] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_METHOD_DESCRIPTOR_O] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
[CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
- [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_METHOD_DESCRIPTOR_NOARGS] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [CALL_METHOD_DESCRIPTOR_FAST] = { true, INSTR_FMT_IBC00, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_DEOPT_FLAG | HAS_ERROR_FLAG },
+ [INSTRUMENTED_CALL_KW] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_ERROR_FLAG },
+ [CALL_KW] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG },
[INSTRUMENTED_CALL_FUNCTION_EX] = { true, INSTR_FMT_IX, 0 },
[CALL_FUNCTION_EX] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG | HAS_ERROR_FLAG },
[MAKE_FUNCTION] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG },
@@ -1456,20 +1470,20 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[OPCODE_METADATA_SIZE] = {
[INSTRUMENTED_INSTRUCTION] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG },
[INSTRUMENTED_JUMP_FORWARD] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
[INSTRUMENTED_JUMP_BACKWARD] = { true, INSTR_FMT_IB, HAS_ARG_FLAG | HAS_EVAL_BREAK_FLAG },
- [INSTRUMENTED_POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
- [INSTRUMENTED_POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
- [INSTRUMENTED_POP_JUMP_IF_NONE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
- [INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
+ [INSTRUMENTED_POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG },
+ [INSTRUMENTED_POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG },
+ [INSTRUMENTED_POP_JUMP_IF_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG },
+ [INSTRUMENTED_POP_JUMP_IF_NOT_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG },
[EXTENDED_ARG] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
[CACHE] = { true, INSTR_FMT_IX, 0 },
[RESERVED] = { true, INSTR_FMT_IX, 0 },
[_POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
[_POP_JUMP_IF_TRUE] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
- [JUMP_TO_TOP] = { true, INSTR_FMT_IX, HAS_EVAL_BREAK_FLAG },
- [SAVE_IP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
- [SAVE_CURRENT_IP] = { true, INSTR_FMT_IX, 0 },
- [EXIT_TRACE] = { true, INSTR_FMT_IX, 0 },
- [INSERT] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
+ [_JUMP_TO_TOP] = { true, INSTR_FMT_IX, HAS_EVAL_BREAK_FLAG },
+ [_SET_IP] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
+ [_SAVE_CURRENT_IP] = { true, INSTR_FMT_IX, 0 },
+ [_EXIT_TRACE] = { true, INSTR_FMT_IX, 0 },
+ [_INSERT] = { true, INSTR_FMT_IB, HAS_ARG_FLAG },
};
#endif // NEED_OPCODE_METADATA
@@ -1477,7 +1491,7 @@ extern const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACR
#ifdef NEED_OPCODE_METADATA
const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPANSION_SIZE] = {
[NOP] = { .nuops = 1, .uops = { { NOP, 0, 0 } } },
- [RESUME] = { .nuops = 1, .uops = { { RESUME, 0, 0 } } },
+ [RESUME_CHECK] = { .nuops = 1, .uops = { { RESUME_CHECK, 0, 0 } } },
[LOAD_FAST_CHECK] = { .nuops = 1, .uops = { { LOAD_FAST_CHECK, 0, 0 } } },
[LOAD_FAST] = { .nuops = 1, .uops = { { LOAD_FAST, 0, 0 } } },
[LOAD_FAST_AND_CLEAR] = { .nuops = 1, .uops = { { LOAD_FAST_AND_CLEAR, 0, 0 } } },
@@ -1522,8 +1536,8 @@ const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPAN
[DELETE_SUBSCR] = { .nuops = 1, .uops = { { DELETE_SUBSCR, 0, 0 } } },
[CALL_INTRINSIC_1] = { .nuops = 1, .uops = { { CALL_INTRINSIC_1, 0, 0 } } },
[CALL_INTRINSIC_2] = { .nuops = 1, .uops = { { CALL_INTRINSIC_2, 0, 0 } } },
- [RETURN_VALUE] = { .nuops = 3, .uops = { { SAVE_IP, 7, 0 }, { SAVE_CURRENT_IP, 0, 0 }, { _POP_FRAME, 0, 0 } } },
- [RETURN_CONST] = { .nuops = 4, .uops = { { LOAD_CONST, 0, 0 }, { SAVE_IP, 7, 0 }, { SAVE_CURRENT_IP, 0, 0 }, { _POP_FRAME, 0, 0 } } },
+ [RETURN_VALUE] = { .nuops = 3, .uops = { { _SET_IP, 7, 0 }, { _SAVE_CURRENT_IP, 0, 0 }, { _POP_FRAME, 0, 0 } } },
+ [RETURN_CONST] = { .nuops = 4, .uops = { { LOAD_CONST, 0, 0 }, { _SET_IP, 7, 0 }, { _SAVE_CURRENT_IP, 0, 0 }, { _POP_FRAME, 0, 0 } } },
[GET_AITER] = { .nuops = 1, .uops = { { GET_AITER, 0, 0 } } },
[GET_ANEXT] = { .nuops = 1, .uops = { { GET_ANEXT, 0, 0 } } },
[GET_AWAITABLE] = { .nuops = 1, .uops = { { GET_AWAITABLE, 0, 0 } } },
@@ -1541,9 +1555,9 @@ const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPAN
[DELETE_ATTR] = { .nuops = 1, .uops = { { DELETE_ATTR, 0, 0 } } },
[STORE_GLOBAL] = { .nuops = 1, .uops = { { STORE_GLOBAL, 0, 0 } } },
[DELETE_GLOBAL] = { .nuops = 1, .uops = { { DELETE_GLOBAL, 0, 0 } } },
- [LOAD_LOCALS] = { .nuops = 1, .uops = { { _LOAD_LOCALS, 0, 0 } } },
- [LOAD_NAME] = { .nuops = 2, .uops = { { _LOAD_LOCALS, 0, 0 }, { _LOAD_FROM_DICT_OR_GLOBALS, 0, 0 } } },
- [LOAD_FROM_DICT_OR_GLOBALS] = { .nuops = 1, .uops = { { _LOAD_FROM_DICT_OR_GLOBALS, 0, 0 } } },
+ [LOAD_LOCALS] = { .nuops = 1, .uops = { { LOAD_LOCALS, 0, 0 } } },
+ [LOAD_FROM_DICT_OR_GLOBALS] = { .nuops = 1, .uops = { { LOAD_FROM_DICT_OR_GLOBALS, 0, 0 } } },
+ [LOAD_NAME] = { .nuops = 1, .uops = { { LOAD_NAME, 0, 0 } } },
[LOAD_GLOBAL] = { .nuops = 1, .uops = { { LOAD_GLOBAL, 0, 0 } } },
[LOAD_GLOBAL_MODULE] = { .nuops = 2, .uops = { { _GUARD_GLOBALS_VERSION, 1, 1 }, { _LOAD_GLOBAL_MODULE, 1, 3 } } },
[LOAD_GLOBAL_BUILTIN] = { .nuops = 3, .uops = { { _GUARD_GLOBALS_VERSION, 1, 1 }, { _GUARD_BUILTINS_VERSION, 1, 2 }, { _LOAD_GLOBAL_BUILTINS, 1, 3 } } },
@@ -1586,19 +1600,22 @@ const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPAN
[GET_YIELD_FROM_ITER] = { .nuops = 1, .uops = { { GET_YIELD_FROM_ITER, 0, 0 } } },
[WITH_EXCEPT_START] = { .nuops = 1, .uops = { { WITH_EXCEPT_START, 0, 0 } } },
[PUSH_EXC_INFO] = { .nuops = 1, .uops = { { PUSH_EXC_INFO, 0, 0 } } },
- [CALL_BOUND_METHOD_EXACT_ARGS] = { .nuops = 9, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _INIT_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { SAVE_IP, 7, 3 }, { SAVE_CURRENT_IP, 0, 0 }, { _PUSH_FRAME, 0, 0 } } },
- [CALL_PY_EXACT_ARGS] = { .nuops = 7, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { SAVE_IP, 7, 3 }, { SAVE_CURRENT_IP, 0, 0 }, { _PUSH_FRAME, 0, 0 } } },
- [CALL_NO_KW_TYPE_1] = { .nuops = 1, .uops = { { CALL_NO_KW_TYPE_1, 0, 0 } } },
- [CALL_NO_KW_STR_1] = { .nuops = 1, .uops = { { CALL_NO_KW_STR_1, 0, 0 } } },
- [CALL_NO_KW_TUPLE_1] = { .nuops = 1, .uops = { { CALL_NO_KW_TUPLE_1, 0, 0 } } },
+ [CALL_BOUND_METHOD_EXACT_ARGS] = { .nuops = 9, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _INIT_CALL_BOUND_METHOD_EXACT_ARGS, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { _SET_IP, 7, 3 }, { _SAVE_CURRENT_IP, 0, 0 }, { _PUSH_FRAME, 0, 0 } } },
+ [CALL_PY_EXACT_ARGS] = { .nuops = 7, .uops = { { _CHECK_PEP_523, 0, 0 }, { _CHECK_FUNCTION_EXACT_ARGS, 2, 1 }, { _CHECK_STACK_SPACE, 0, 0 }, { _INIT_CALL_PY_EXACT_ARGS, 0, 0 }, { _SET_IP, 7, 3 }, { _SAVE_CURRENT_IP, 0, 0 }, { _PUSH_FRAME, 0, 0 } } },
+ [CALL_TYPE_1] = { .nuops = 1, .uops = { { CALL_TYPE_1, 0, 0 } } },
+ [CALL_STR_1] = { .nuops = 1, .uops = { { CALL_STR_1, 0, 0 } } },
+ [CALL_TUPLE_1] = { .nuops = 1, .uops = { { CALL_TUPLE_1, 0, 0 } } },
[EXIT_INIT_CHECK] = { .nuops = 1, .uops = { { EXIT_INIT_CHECK, 0, 0 } } },
- [CALL_NO_KW_BUILTIN_O] = { .nuops = 1, .uops = { { CALL_NO_KW_BUILTIN_O, 0, 0 } } },
- [CALL_NO_KW_BUILTIN_FAST] = { .nuops = 1, .uops = { { CALL_NO_KW_BUILTIN_FAST, 0, 0 } } },
- [CALL_NO_KW_LEN] = { .nuops = 1, .uops = { { CALL_NO_KW_LEN, 0, 0 } } },
- [CALL_NO_KW_ISINSTANCE] = { .nuops = 1, .uops = { { CALL_NO_KW_ISINSTANCE, 0, 0 } } },
- [CALL_NO_KW_METHOD_DESCRIPTOR_O] = { .nuops = 1, .uops = { { CALL_NO_KW_METHOD_DESCRIPTOR_O, 0, 0 } } },
- [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = { .nuops = 1, .uops = { { CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS, 0, 0 } } },
- [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = { .nuops = 1, .uops = { { CALL_NO_KW_METHOD_DESCRIPTOR_FAST, 0, 0 } } },
+ [CALL_BUILTIN_CLASS] = { .nuops = 1, .uops = { { CALL_BUILTIN_CLASS, 0, 0 } } },
+ [CALL_BUILTIN_O] = { .nuops = 1, .uops = { { CALL_BUILTIN_O, 0, 0 } } },
+ [CALL_BUILTIN_FAST] = { .nuops = 1, .uops = { { CALL_BUILTIN_FAST, 0, 0 } } },
+ [CALL_BUILTIN_FAST_WITH_KEYWORDS] = { .nuops = 1, .uops = { { CALL_BUILTIN_FAST_WITH_KEYWORDS, 0, 0 } } },
+ [CALL_LEN] = { .nuops = 1, .uops = { { CALL_LEN, 0, 0 } } },
+ [CALL_ISINSTANCE] = { .nuops = 1, .uops = { { CALL_ISINSTANCE, 0, 0 } } },
+ [CALL_METHOD_DESCRIPTOR_O] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_O, 0, 0 } } },
+ [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS, 0, 0 } } },
+ [CALL_METHOD_DESCRIPTOR_NOARGS] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_NOARGS, 0, 0 } } },
+ [CALL_METHOD_DESCRIPTOR_FAST] = { .nuops = 1, .uops = { { CALL_METHOD_DESCRIPTOR_FAST, 0, 0 } } },
[MAKE_FUNCTION] = { .nuops = 1, .uops = { { MAKE_FUNCTION, 0, 0 } } },
[SET_FUNCTION_ATTRIBUTE] = { .nuops = 1, .uops = { { SET_FUNCTION_ATTRIBUTE, 0, 0 } } },
[BUILD_SLICE] = { .nuops = 1, .uops = { { BUILD_SLICE, 0, 0 } } },
@@ -1614,8 +1631,8 @@ const struct opcode_macro_expansion _PyOpcode_macro_expansion[OPCODE_MACRO_EXPAN
extern const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE];
#ifdef NEED_OPCODE_METADATA
const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE] = {
- [EXIT_TRACE] = "EXIT_TRACE",
- [SAVE_IP] = "SAVE_IP",
+ [_EXIT_TRACE] = "_EXIT_TRACE",
+ [_SET_IP] = "_SET_IP",
[_GUARD_BOTH_INT] = "_GUARD_BOTH_INT",
[_BINARY_OP_MULTIPLY_INT] = "_BINARY_OP_MULTIPLY_INT",
[_BINARY_OP_ADD_INT] = "_BINARY_OP_ADD_INT",
@@ -1628,8 +1645,6 @@ const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE] = {
[_BINARY_OP_ADD_UNICODE] = "_BINARY_OP_ADD_UNICODE",
[_BINARY_OP_INPLACE_ADD_UNICODE] = "_BINARY_OP_INPLACE_ADD_UNICODE",
[_POP_FRAME] = "_POP_FRAME",
- [_LOAD_LOCALS] = "_LOAD_LOCALS",
- [_LOAD_FROM_DICT_OR_GLOBALS] = "_LOAD_FROM_DICT_OR_GLOBALS",
[_GUARD_GLOBALS_VERSION] = "_GUARD_GLOBALS_VERSION",
[_GUARD_BUILTINS_VERSION] = "_GUARD_BUILTINS_VERSION",
[_LOAD_GLOBAL_MODULE] = "_LOAD_GLOBAL_MODULE",
@@ -1637,7 +1652,7 @@ const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE] = {
[_GUARD_TYPE_VERSION] = "_GUARD_TYPE_VERSION",
[_CHECK_MANAGED_OBJECT_HAS_VALUES] = "_CHECK_MANAGED_OBJECT_HAS_VALUES",
[_LOAD_ATTR_INSTANCE_VALUE] = "_LOAD_ATTR_INSTANCE_VALUE",
- [IS_NONE] = "IS_NONE",
+ [_IS_NONE] = "_IS_NONE",
[_ITER_CHECK_LIST] = "_ITER_CHECK_LIST",
[_ITER_JUMP_LIST] = "_ITER_JUMP_LIST",
[_IS_ITER_EXHAUSTED_LIST] = "_IS_ITER_EXHAUSTED_LIST",
@@ -1659,9 +1674,9 @@ const char * const _PyOpcode_uop_name[OPCODE_UOP_NAME_SIZE] = {
[_PUSH_FRAME] = "_PUSH_FRAME",
[_POP_JUMP_IF_FALSE] = "_POP_JUMP_IF_FALSE",
[_POP_JUMP_IF_TRUE] = "_POP_JUMP_IF_TRUE",
- [JUMP_TO_TOP] = "JUMP_TO_TOP",
- [SAVE_CURRENT_IP] = "SAVE_CURRENT_IP",
- [INSERT] = "INSERT",
+ [_JUMP_TO_TOP] = "_JUMP_TO_TOP",
+ [_SAVE_CURRENT_IP] = "_SAVE_CURRENT_IP",
+ [_INSERT] = "_INSERT",
};
#endif // NEED_OPCODE_METADATA
@@ -1673,21 +1688,9 @@ const char *const _PyOpcode_OpName[268] = {
[RESUME] = "RESUME",
[BEFORE_ASYNC_WITH] = "BEFORE_ASYNC_WITH",
[BEFORE_WITH] = "BEFORE_WITH",
- [BINARY_OP_ADD_FLOAT] = "BINARY_OP_ADD_FLOAT",
- [BINARY_OP_ADD_INT] = "BINARY_OP_ADD_INT",
- [BINARY_OP_ADD_UNICODE] = "BINARY_OP_ADD_UNICODE",
[BINARY_OP_INPLACE_ADD_UNICODE] = "BINARY_OP_INPLACE_ADD_UNICODE",
- [BINARY_OP_MULTIPLY_FLOAT] = "BINARY_OP_MULTIPLY_FLOAT",
- [BINARY_OP_MULTIPLY_INT] = "BINARY_OP_MULTIPLY_INT",
- [BINARY_OP_SUBTRACT_FLOAT] = "BINARY_OP_SUBTRACT_FLOAT",
- [BINARY_OP_SUBTRACT_INT] = "BINARY_OP_SUBTRACT_INT",
[BINARY_SLICE] = "BINARY_SLICE",
[BINARY_SUBSCR] = "BINARY_SUBSCR",
- [BINARY_SUBSCR_DICT] = "BINARY_SUBSCR_DICT",
- [BINARY_SUBSCR_GETITEM] = "BINARY_SUBSCR_GETITEM",
- [BINARY_SUBSCR_LIST_INT] = "BINARY_SUBSCR_LIST_INT",
- [BINARY_SUBSCR_STR_INT] = "BINARY_SUBSCR_STR_INT",
- [BINARY_SUBSCR_TUPLE_INT] = "BINARY_SUBSCR_TUPLE_INT",
[CHECK_EG_MATCH] = "CHECK_EG_MATCH",
[CHECK_EXC_MATCH] = "CHECK_EXC_MATCH",
[CLEANUP_THROW] = "CLEANUP_THROW",
@@ -1719,19 +1722,9 @@ const char *const _PyOpcode_OpName[268] = {
[RETURN_GENERATOR] = "RETURN_GENERATOR",
[RETURN_VALUE] = "RETURN_VALUE",
[SETUP_ANNOTATIONS] = "SETUP_ANNOTATIONS",
- [STORE_ATTR_INSTANCE_VALUE] = "STORE_ATTR_INSTANCE_VALUE",
- [STORE_ATTR_SLOT] = "STORE_ATTR_SLOT",
[STORE_SLICE] = "STORE_SLICE",
[STORE_SUBSCR] = "STORE_SUBSCR",
- [STORE_SUBSCR_DICT] = "STORE_SUBSCR_DICT",
- [STORE_SUBSCR_LIST_INT] = "STORE_SUBSCR_LIST_INT",
[TO_BOOL] = "TO_BOOL",
- [TO_BOOL_ALWAYS_TRUE] = "TO_BOOL_ALWAYS_TRUE",
- [TO_BOOL_BOOL] = "TO_BOOL_BOOL",
- [TO_BOOL_INT] = "TO_BOOL_INT",
- [TO_BOOL_LIST] = "TO_BOOL_LIST",
- [TO_BOOL_NONE] = "TO_BOOL_NONE",
- [TO_BOOL_STR] = "TO_BOOL_STR",
[UNARY_INVERT] = "UNARY_INVERT",
[UNARY_NEGATIVE] = "UNARY_NEGATIVE",
[UNARY_NOT] = "UNARY_NOT",
@@ -1745,31 +1738,11 @@ const char *const _PyOpcode_OpName[268] = {
[BUILD_STRING] = "BUILD_STRING",
[BUILD_TUPLE] = "BUILD_TUPLE",
[CALL] = "CALL",
- [CALL_BOUND_METHOD_EXACT_ARGS] = "CALL_BOUND_METHOD_EXACT_ARGS",
- [CALL_BUILTIN_CLASS] = "CALL_BUILTIN_CLASS",
- [CALL_BUILTIN_FAST_WITH_KEYWORDS] = "CALL_BUILTIN_FAST_WITH_KEYWORDS",
[CALL_FUNCTION_EX] = "CALL_FUNCTION_EX",
[CALL_INTRINSIC_1] = "CALL_INTRINSIC_1",
[CALL_INTRINSIC_2] = "CALL_INTRINSIC_2",
- [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = "CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS",
- [CALL_NO_KW_ALLOC_AND_ENTER_INIT] = "CALL_NO_KW_ALLOC_AND_ENTER_INIT",
- [CALL_NO_KW_BUILTIN_FAST] = "CALL_NO_KW_BUILTIN_FAST",
- [CALL_NO_KW_BUILTIN_O] = "CALL_NO_KW_BUILTIN_O",
- [CALL_NO_KW_ISINSTANCE] = "CALL_NO_KW_ISINSTANCE",
- [CALL_NO_KW_LEN] = "CALL_NO_KW_LEN",
- [CALL_NO_KW_LIST_APPEND] = "CALL_NO_KW_LIST_APPEND",
- [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = "CALL_NO_KW_METHOD_DESCRIPTOR_FAST",
- [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = "CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS",
- [CALL_NO_KW_METHOD_DESCRIPTOR_O] = "CALL_NO_KW_METHOD_DESCRIPTOR_O",
- [CALL_NO_KW_STR_1] = "CALL_NO_KW_STR_1",
- [CALL_NO_KW_TUPLE_1] = "CALL_NO_KW_TUPLE_1",
- [CALL_NO_KW_TYPE_1] = "CALL_NO_KW_TYPE_1",
- [CALL_PY_EXACT_ARGS] = "CALL_PY_EXACT_ARGS",
- [CALL_PY_WITH_DEFAULTS] = "CALL_PY_WITH_DEFAULTS",
+ [CALL_KW] = "CALL_KW",
[COMPARE_OP] = "COMPARE_OP",
- [COMPARE_OP_FLOAT] = "COMPARE_OP_FLOAT",
- [COMPARE_OP_INT] = "COMPARE_OP_INT",
- [COMPARE_OP_STR] = "COMPARE_OP_STR",
[CONTAINS_OP] = "CONTAINS_OP",
[CONVERT_VALUE] = "CONVERT_VALUE",
[COPY] = "COPY",
@@ -1784,10 +1757,6 @@ const char *const _PyOpcode_OpName[268] = {
[ENTER_EXECUTOR] = "ENTER_EXECUTOR",
[EXTENDED_ARG] = "EXTENDED_ARG",
[FOR_ITER] = "FOR_ITER",
- [FOR_ITER_GEN] = "FOR_ITER_GEN",
- [FOR_ITER_LIST] = "FOR_ITER_LIST",
- [FOR_ITER_RANGE] = "FOR_ITER_RANGE",
- [FOR_ITER_TUPLE] = "FOR_ITER_TUPLE",
[GET_AWAITABLE] = "GET_AWAITABLE",
[IMPORT_FROM] = "IMPORT_FROM",
[IMPORT_NAME] = "IMPORT_NAME",
@@ -1795,22 +1764,9 @@ const char *const _PyOpcode_OpName[268] = {
[JUMP_BACKWARD] = "JUMP_BACKWARD",
[JUMP_BACKWARD_NO_INTERRUPT] = "JUMP_BACKWARD_NO_INTERRUPT",
[JUMP_FORWARD] = "JUMP_FORWARD",
- [KW_NAMES] = "KW_NAMES",
[LIST_APPEND] = "LIST_APPEND",
[LIST_EXTEND] = "LIST_EXTEND",
[LOAD_ATTR] = "LOAD_ATTR",
- [LOAD_ATTR_CLASS] = "LOAD_ATTR_CLASS",
- [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = "LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN",
- [LOAD_ATTR_INSTANCE_VALUE] = "LOAD_ATTR_INSTANCE_VALUE",
- [LOAD_ATTR_METHOD_LAZY_DICT] = "LOAD_ATTR_METHOD_LAZY_DICT",
- [LOAD_ATTR_METHOD_NO_DICT] = "LOAD_ATTR_METHOD_NO_DICT",
- [LOAD_ATTR_METHOD_WITH_VALUES] = "LOAD_ATTR_METHOD_WITH_VALUES",
- [LOAD_ATTR_MODULE] = "LOAD_ATTR_MODULE",
- [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = "LOAD_ATTR_NONDESCRIPTOR_NO_DICT",
- [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = "LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES",
- [LOAD_ATTR_PROPERTY] = "LOAD_ATTR_PROPERTY",
- [LOAD_ATTR_SLOT] = "LOAD_ATTR_SLOT",
- [LOAD_ATTR_WITH_HINT] = "LOAD_ATTR_WITH_HINT",
[LOAD_CONST] = "LOAD_CONST",
[LOAD_DEREF] = "LOAD_DEREF",
[LOAD_FAST] = "LOAD_FAST",
@@ -1820,12 +1776,8 @@ const char *const _PyOpcode_OpName[268] = {
[LOAD_FROM_DICT_OR_DEREF] = "LOAD_FROM_DICT_OR_DEREF",
[LOAD_FROM_DICT_OR_GLOBALS] = "LOAD_FROM_DICT_OR_GLOBALS",
[LOAD_GLOBAL] = "LOAD_GLOBAL",
- [LOAD_GLOBAL_BUILTIN] = "LOAD_GLOBAL_BUILTIN",
- [LOAD_GLOBAL_MODULE] = "LOAD_GLOBAL_MODULE",
[LOAD_NAME] = "LOAD_NAME",
[LOAD_SUPER_ATTR] = "LOAD_SUPER_ATTR",
- [LOAD_SUPER_ATTR_ATTR] = "LOAD_SUPER_ATTR_ATTR",
- [LOAD_SUPER_ATTR_METHOD] = "LOAD_SUPER_ATTR_METHOD",
[MAKE_CELL] = "MAKE_CELL",
[MAP_ADD] = "MAP_ADD",
[MATCH_CLASS] = "MATCH_CLASS",
@@ -1837,12 +1789,10 @@ const char *const _PyOpcode_OpName[268] = {
[RERAISE] = "RERAISE",
[RETURN_CONST] = "RETURN_CONST",
[SEND] = "SEND",
- [SEND_GEN] = "SEND_GEN",
[SET_ADD] = "SET_ADD",
[SET_FUNCTION_ATTRIBUTE] = "SET_FUNCTION_ATTRIBUTE",
[SET_UPDATE] = "SET_UPDATE",
[STORE_ATTR] = "STORE_ATTR",
- [STORE_ATTR_WITH_HINT] = "STORE_ATTR_WITH_HINT",
[STORE_DEREF] = "STORE_DEREF",
[STORE_FAST] = "STORE_FAST",
[STORE_FAST_LOAD_FAST] = "STORE_FAST_LOAD_FAST",
@@ -1852,10 +1802,76 @@ const char *const _PyOpcode_OpName[268] = {
[SWAP] = "SWAP",
[UNPACK_EX] = "UNPACK_EX",
[UNPACK_SEQUENCE] = "UNPACK_SEQUENCE",
+ [YIELD_VALUE] = "YIELD_VALUE",
+ [BINARY_OP_ADD_FLOAT] = "BINARY_OP_ADD_FLOAT",
+ [BINARY_OP_ADD_INT] = "BINARY_OP_ADD_INT",
+ [BINARY_OP_ADD_UNICODE] = "BINARY_OP_ADD_UNICODE",
+ [BINARY_OP_MULTIPLY_FLOAT] = "BINARY_OP_MULTIPLY_FLOAT",
+ [BINARY_OP_MULTIPLY_INT] = "BINARY_OP_MULTIPLY_INT",
+ [BINARY_OP_SUBTRACT_FLOAT] = "BINARY_OP_SUBTRACT_FLOAT",
+ [BINARY_OP_SUBTRACT_INT] = "BINARY_OP_SUBTRACT_INT",
+ [BINARY_SUBSCR_DICT] = "BINARY_SUBSCR_DICT",
+ [BINARY_SUBSCR_GETITEM] = "BINARY_SUBSCR_GETITEM",
+ [BINARY_SUBSCR_LIST_INT] = "BINARY_SUBSCR_LIST_INT",
+ [BINARY_SUBSCR_STR_INT] = "BINARY_SUBSCR_STR_INT",
+ [BINARY_SUBSCR_TUPLE_INT] = "BINARY_SUBSCR_TUPLE_INT",
+ [CALL_ALLOC_AND_ENTER_INIT] = "CALL_ALLOC_AND_ENTER_INIT",
+ [CALL_BOUND_METHOD_EXACT_ARGS] = "CALL_BOUND_METHOD_EXACT_ARGS",
+ [CALL_BUILTIN_CLASS] = "CALL_BUILTIN_CLASS",
+ [CALL_BUILTIN_FAST] = "CALL_BUILTIN_FAST",
+ [CALL_BUILTIN_FAST_WITH_KEYWORDS] = "CALL_BUILTIN_FAST_WITH_KEYWORDS",
+ [CALL_BUILTIN_O] = "CALL_BUILTIN_O",
+ [CALL_ISINSTANCE] = "CALL_ISINSTANCE",
+ [CALL_LEN] = "CALL_LEN",
+ [CALL_LIST_APPEND] = "CALL_LIST_APPEND",
+ [CALL_METHOD_DESCRIPTOR_FAST] = "CALL_METHOD_DESCRIPTOR_FAST",
+ [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = "CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS",
+ [CALL_METHOD_DESCRIPTOR_NOARGS] = "CALL_METHOD_DESCRIPTOR_NOARGS",
+ [CALL_METHOD_DESCRIPTOR_O] = "CALL_METHOD_DESCRIPTOR_O",
+ [CALL_PY_EXACT_ARGS] = "CALL_PY_EXACT_ARGS",
+ [CALL_PY_WITH_DEFAULTS] = "CALL_PY_WITH_DEFAULTS",
+ [CALL_STR_1] = "CALL_STR_1",
+ [CALL_TUPLE_1] = "CALL_TUPLE_1",
+ [CALL_TYPE_1] = "CALL_TYPE_1",
+ [COMPARE_OP_FLOAT] = "COMPARE_OP_FLOAT",
+ [COMPARE_OP_INT] = "COMPARE_OP_INT",
+ [COMPARE_OP_STR] = "COMPARE_OP_STR",
+ [FOR_ITER_GEN] = "FOR_ITER_GEN",
+ [FOR_ITER_LIST] = "FOR_ITER_LIST",
+ [FOR_ITER_RANGE] = "FOR_ITER_RANGE",
+ [FOR_ITER_TUPLE] = "FOR_ITER_TUPLE",
+ [LOAD_ATTR_CLASS] = "LOAD_ATTR_CLASS",
+ [LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN] = "LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN",
+ [LOAD_ATTR_INSTANCE_VALUE] = "LOAD_ATTR_INSTANCE_VALUE",
+ [LOAD_ATTR_METHOD_LAZY_DICT] = "LOAD_ATTR_METHOD_LAZY_DICT",
+ [LOAD_ATTR_METHOD_NO_DICT] = "LOAD_ATTR_METHOD_NO_DICT",
+ [LOAD_ATTR_METHOD_WITH_VALUES] = "LOAD_ATTR_METHOD_WITH_VALUES",
+ [LOAD_ATTR_MODULE] = "LOAD_ATTR_MODULE",
+ [LOAD_ATTR_NONDESCRIPTOR_NO_DICT] = "LOAD_ATTR_NONDESCRIPTOR_NO_DICT",
+ [LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES] = "LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES",
+ [LOAD_ATTR_PROPERTY] = "LOAD_ATTR_PROPERTY",
+ [LOAD_ATTR_SLOT] = "LOAD_ATTR_SLOT",
+ [LOAD_ATTR_WITH_HINT] = "LOAD_ATTR_WITH_HINT",
+ [LOAD_GLOBAL_BUILTIN] = "LOAD_GLOBAL_BUILTIN",
+ [LOAD_GLOBAL_MODULE] = "LOAD_GLOBAL_MODULE",
+ [LOAD_SUPER_ATTR_ATTR] = "LOAD_SUPER_ATTR_ATTR",
+ [LOAD_SUPER_ATTR_METHOD] = "LOAD_SUPER_ATTR_METHOD",
+ [RESUME_CHECK] = "RESUME_CHECK",
+ [SEND_GEN] = "SEND_GEN",
+ [STORE_ATTR_INSTANCE_VALUE] = "STORE_ATTR_INSTANCE_VALUE",
+ [STORE_ATTR_SLOT] = "STORE_ATTR_SLOT",
+ [STORE_ATTR_WITH_HINT] = "STORE_ATTR_WITH_HINT",
+ [STORE_SUBSCR_DICT] = "STORE_SUBSCR_DICT",
+ [STORE_SUBSCR_LIST_INT] = "STORE_SUBSCR_LIST_INT",
+ [TO_BOOL_ALWAYS_TRUE] = "TO_BOOL_ALWAYS_TRUE",
+ [TO_BOOL_BOOL] = "TO_BOOL_BOOL",
+ [TO_BOOL_INT] = "TO_BOOL_INT",
+ [TO_BOOL_LIST] = "TO_BOOL_LIST",
+ [TO_BOOL_NONE] = "TO_BOOL_NONE",
+ [TO_BOOL_STR] = "TO_BOOL_STR",
[UNPACK_SEQUENCE_LIST] = "UNPACK_SEQUENCE_LIST",
[UNPACK_SEQUENCE_TUPLE] = "UNPACK_SEQUENCE_TUPLE",
[UNPACK_SEQUENCE_TWO_TUPLE] = "UNPACK_SEQUENCE_TWO_TUPLE",
- [YIELD_VALUE] = "YIELD_VALUE",
[INSTRUMENTED_RESUME] = "INSTRUMENTED_RESUME",
[INSTRUMENTED_END_FOR] = "INSTRUMENTED_END_FOR",
[INSTRUMENTED_END_SEND] = "INSTRUMENTED_END_SEND",
@@ -1865,6 +1881,7 @@ const char *const _PyOpcode_OpName[268] = {
[INSTRUMENTED_LOAD_SUPER_ATTR] = "INSTRUMENTED_LOAD_SUPER_ATTR",
[INSTRUMENTED_FOR_ITER] = "INSTRUMENTED_FOR_ITER",
[INSTRUMENTED_CALL] = "INSTRUMENTED_CALL",
+ [INSTRUMENTED_CALL_KW] = "INSTRUMENTED_CALL_KW",
[INSTRUMENTED_CALL_FUNCTION_EX] = "INSTRUMENTED_CALL_FUNCTION_EX",
[INSTRUMENTED_INSTRUCTION] = "INSTRUMENTED_INSTRUCTION",
[INSTRUMENTED_JUMP_FORWARD] = "INSTRUMENTED_JUMP_FORWARD",
@@ -1893,7 +1910,6 @@ extern const uint8_t _PyOpcode_Caches[256];
#ifdef NEED_OPCODE_METADATA
const uint8_t _PyOpcode_Caches[256] = {
[TO_BOOL] = 3,
- [BINARY_OP] = 1,
[BINARY_SUBSCR] = 1,
[STORE_SUBSCR] = 1,
[SEND] = 1,
@@ -1903,8 +1919,13 @@ const uint8_t _PyOpcode_Caches[256] = {
[LOAD_SUPER_ATTR] = 1,
[LOAD_ATTR] = 9,
[COMPARE_OP] = 1,
+ [POP_JUMP_IF_FALSE] = 1,
+ [POP_JUMP_IF_TRUE] = 1,
+ [POP_JUMP_IF_NONE] = 1,
+ [POP_JUMP_IF_NOT_NONE] = 1,
[FOR_ITER] = 1,
[CALL] = 3,
+ [BINARY_OP] = 1,
[JUMP_BACKWARD] = 1,
};
#endif // NEED_OPCODE_METADATA
@@ -1939,27 +1960,28 @@ const uint8_t _PyOpcode_Deopt[256] = {
[BUILD_TUPLE] = BUILD_TUPLE,
[CACHE] = CACHE,
[CALL] = CALL,
+ [CALL_ALLOC_AND_ENTER_INIT] = CALL,
[CALL_BOUND_METHOD_EXACT_ARGS] = CALL,
[CALL_BUILTIN_CLASS] = CALL,
+ [CALL_BUILTIN_FAST] = CALL,
[CALL_BUILTIN_FAST_WITH_KEYWORDS] = CALL,
+ [CALL_BUILTIN_O] = CALL,
[CALL_FUNCTION_EX] = CALL_FUNCTION_EX,
[CALL_INTRINSIC_1] = CALL_INTRINSIC_1,
[CALL_INTRINSIC_2] = CALL_INTRINSIC_2,
+ [CALL_ISINSTANCE] = CALL,
+ [CALL_KW] = CALL_KW,
+ [CALL_LEN] = CALL,
+ [CALL_LIST_APPEND] = CALL,
+ [CALL_METHOD_DESCRIPTOR_FAST] = CALL,
[CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = CALL,
- [CALL_NO_KW_ALLOC_AND_ENTER_INIT] = CALL,
- [CALL_NO_KW_BUILTIN_FAST] = CALL,
- [CALL_NO_KW_BUILTIN_O] = CALL,
- [CALL_NO_KW_ISINSTANCE] = CALL,
- [CALL_NO_KW_LEN] = CALL,
- [CALL_NO_KW_LIST_APPEND] = CALL,
- [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = CALL,
- [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = CALL,
- [CALL_NO_KW_METHOD_DESCRIPTOR_O] = CALL,
- [CALL_NO_KW_STR_1] = CALL,
- [CALL_NO_KW_TUPLE_1] = CALL,
- [CALL_NO_KW_TYPE_1] = CALL,
+ [CALL_METHOD_DESCRIPTOR_NOARGS] = CALL,
+ [CALL_METHOD_DESCRIPTOR_O] = CALL,
[CALL_PY_EXACT_ARGS] = CALL,
[CALL_PY_WITH_DEFAULTS] = CALL,
+ [CALL_STR_1] = CALL,
+ [CALL_TUPLE_1] = CALL,
+ [CALL_TYPE_1] = CALL,
[CHECK_EG_MATCH] = CHECK_EG_MATCH,
[CHECK_EXC_MATCH] = CHECK_EXC_MATCH,
[CLEANUP_THROW] = CLEANUP_THROW,
@@ -2002,6 +2024,7 @@ const uint8_t _PyOpcode_Deopt[256] = {
[IMPORT_NAME] = IMPORT_NAME,
[INSTRUMENTED_CALL] = INSTRUMENTED_CALL,
[INSTRUMENTED_CALL_FUNCTION_EX] = INSTRUMENTED_CALL_FUNCTION_EX,
+ [INSTRUMENTED_CALL_KW] = INSTRUMENTED_CALL_KW,
[INSTRUMENTED_END_FOR] = INSTRUMENTED_END_FOR,
[INSTRUMENTED_END_SEND] = INSTRUMENTED_END_SEND,
[INSTRUMENTED_FOR_ITER] = INSTRUMENTED_FOR_ITER,
@@ -2023,7 +2046,6 @@ const uint8_t _PyOpcode_Deopt[256] = {
[JUMP_BACKWARD] = JUMP_BACKWARD,
[JUMP_BACKWARD_NO_INTERRUPT] = JUMP_BACKWARD_NO_INTERRUPT,
[JUMP_FORWARD] = JUMP_FORWARD,
- [KW_NAMES] = KW_NAMES,
[LIST_APPEND] = LIST_APPEND,
[LIST_EXTEND] = LIST_EXTEND,
[LOAD_ASSERTION_ERROR] = LOAD_ASSERTION_ERROR,
@@ -2077,6 +2099,7 @@ const uint8_t _PyOpcode_Deopt[256] = {
[RERAISE] = RERAISE,
[RESERVED] = RESERVED,
[RESUME] = RESUME,
+ [RESUME_CHECK] = RESUME,
[RETURN_CONST] = RETURN_CONST,
[RETURN_GENERATOR] = RETURN_GENERATOR,
[RETURN_VALUE] = RETURN_VALUE,
@@ -2122,37 +2145,36 @@ const uint8_t _PyOpcode_Deopt[256] = {
#endif // NEED_OPCODE_METADATA
#define EXTRA_CASES \
- case 188: \
- case 189: \
- case 190: \
- case 191: \
- case 192: \
- case 193: \
- case 194: \
- case 195: \
- case 196: \
- case 197: \
- case 198: \
- case 199: \
- case 200: \
- case 201: \
- case 202: \
- case 203: \
- case 204: \
- case 205: \
- case 206: \
- case 207: \
- case 208: \
- case 209: \
- case 210: \
- case 211: \
- case 212: \
- case 213: \
- case 214: \
- case 215: \
- case 216: \
- case 217: \
- case 218: \
+ case 119: \
+ case 120: \
+ case 121: \
+ case 122: \
+ case 123: \
+ case 124: \
+ case 125: \
+ case 126: \
+ case 127: \
+ case 128: \
+ case 129: \
+ case 130: \
+ case 131: \
+ case 132: \
+ case 133: \
+ case 134: \
+ case 135: \
+ case 136: \
+ case 137: \
+ case 138: \
+ case 139: \
+ case 140: \
+ case 141: \
+ case 142: \
+ case 143: \
+ case 144: \
+ case 145: \
+ case 146: \
+ case 147: \
+ case 148: \
case 219: \
case 220: \
case 221: \
@@ -2170,7 +2192,6 @@ const uint8_t _PyOpcode_Deopt[256] = {
case 233: \
case 234: \
case 235: \
- case 236: \
case 255: \
;
diff --git a/Include/internal/pycore_parking_lot.h b/Include/internal/pycore_parking_lot.h
new file mode 100644
index 00000000000000..f444da730055e8
--- /dev/null
+++ b/Include/internal/pycore_parking_lot.h
@@ -0,0 +1,99 @@
+// ParkingLot is an internal API for building efficient synchronization
+// primitives like mutexes and events.
+//
+// The API and name is inspired by WebKit's WTF::ParkingLot, which in turn
+// is inspired Linux's futex API.
+// See https://webkit.org/blog/6161/locking-in-webkit/.
+//
+// The core functionality is an atomic "compare-and-sleep" operation along with
+// an atomic "wake-up" operation.
+
+#ifndef Py_INTERNAL_PARKING_LOT_H
+#define Py_INTERNAL_PARKING_LOT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_time.h" // _PyTime_t
+
+
+enum {
+ // The thread was unparked by another thread.
+ Py_PARK_OK = 0,
+
+ // The value of `address` did not match `expected`.
+ Py_PARK_AGAIN = -1,
+
+ // The thread was unparked due to a timeout.
+ Py_PARK_TIMEOUT = -2,
+
+ // The thread was interrupted by a signal.
+ Py_PARK_INTR = -3,
+};
+
+// Checks that `*address == *expected` and puts the thread to sleep until an
+// unpark operation is called on the same `address`. Otherwise, the function
+// returns `Py_PARK_AGAIN`. The comparison behaves like memcmp, but is
+// performed atomically with respect to unpark operations.
+//
+// The `address_size` argument is the size of the data pointed to by the
+// `address` and `expected` pointers (i.e., sizeof(*address)). It must be
+// 1, 2, 4, or 8.
+//
+// The `timeout_ns` argument specifies the maximum amount of time to wait, with
+// -1 indicating an infinite wait.
+//
+// `park_arg`, which can be NULL, is passed to the unpark operation.
+//
+// If `detach` is true, then the thread will detach/release the GIL while
+// waiting.
+//
+// Example usage:
+//
+// if (_Py_atomic_compare_exchange_uint8(address, &expected, new_value)) {
+// int res = _PyParkingLot_Park(address, &new_value, sizeof(*address),
+// timeout_ns, NULL, 1);
+// ...
+// }
+PyAPI_FUNC(int)
+_PyParkingLot_Park(const void *address, const void *expected,
+ size_t address_size, _PyTime_t timeout_ns,
+ void *park_arg, int detach);
+
+// Callback for _PyParkingLot_Unpark:
+//
+// `arg` is the data of the same name provided to the _PyParkingLot_Unpark()
+// call.
+// `park_arg` is the data provided to _PyParkingLot_Park() call or NULL if
+// no waiting thread was found.
+// `has_more_waiters` is true if there are more threads waiting on the same
+// address. May be true in cases where threads are waiting on a different
+// address that map to the same internal bucket.
+typedef void _Py_unpark_fn_t(void *arg, void *park_arg, int has_more_waiters);
+
+// Unparks a single thread waiting on `address`.
+//
+// Note that fn() is called regardless of whether a thread was unparked. If
+// no threads are waiting on `address` then the `park_arg` argument to fn()
+// will be NULL.
+//
+// Example usage:
+// void callback(void *arg, void *park_arg, int has_more_waiters);
+// _PyParkingLot_Unpark(address, &callback, arg);
+PyAPI_FUNC(void)
+_PyParkingLot_Unpark(const void *address, _Py_unpark_fn_t *fn, void *arg);
+
+// Unparks all threads waiting on `address`.
+PyAPI_FUNC(void) _PyParkingLot_UnparkAll(const void *address);
+
+// Resets the parking lot state after a fork. Forgets all parked threads.
+PyAPI_FUNC(void) _PyParkingLot_AfterFork(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_PARKING_LOT_H */
diff --git a/Include/internal/pycore_pyerrors.h b/Include/internal/pycore_pyerrors.h
index 0f16fb894d17e1..184eb35e52b47b 100644
--- a/Include/internal/pycore_pyerrors.h
+++ b/Include/internal/pycore_pyerrors.h
@@ -170,6 +170,11 @@ Py_DEPRECATED(3.12) extern void _PyErr_ChainExceptions(PyObject *, PyObject *, P
// Export for '_zoneinfo' shared extension
PyAPI_FUNC(void) _PyErr_ChainExceptions1(PyObject *);
+// Export for '_lsprof' shared extension
+PyAPI_FUNC(void) _PyErr_WriteUnraisableMsg(
+ const char *err_msg,
+ PyObject *obj);
+
#ifdef __cplusplus
}
#endif
diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h
index a30036aeb57e05..9fc8ae903b2ac0 100644
--- a/Include/internal/pycore_pystate.h
+++ b/Include/internal/pycore_pystate.h
@@ -67,6 +67,12 @@ _Py_ThreadCanHandleSignals(PyInterpreterState *interp)
extern _Py_thread_local PyThreadState *_Py_tss_tstate;
#endif
+#ifndef NDEBUG
+extern int _PyThreadState_CheckConsistency(PyThreadState *tstate);
+#endif
+
+int _PyThreadState_MustExit(PyThreadState *tstate);
+
// Export for most shared extensions, used via _PyThreadState_GET() static
// inline function.
PyAPI_FUNC(PyThreadState *) _PyThreadState_GetCurrent(void);
diff --git a/Include/internal/pycore_pystats.h b/Include/internal/pycore_pystats.h
new file mode 100644
index 00000000000000..f8af398a560586
--- /dev/null
+++ b/Include/internal/pycore_pystats.h
@@ -0,0 +1,21 @@
+#ifndef Py_INTERNAL_PYSTATS_H
+#define Py_INTERNAL_PYSTATS_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#ifdef Py_STATS
+extern void _Py_StatsOn(void);
+extern void _Py_StatsOff(void);
+extern void _Py_StatsClear(void);
+extern int _Py_PrintSpecializationStats(int to_file);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif // !Py_INTERNAL_PYSTATS_H
diff --git a/Include/internal/pycore_runtime.h b/Include/internal/pycore_runtime.h
index 2ce46f3201d8af..0ddc405f221a1c 100644
--- a/Include/internal/pycore_runtime.h
+++ b/Include/internal/pycore_runtime.h
@@ -267,6 +267,13 @@ typedef struct pyruntimestate {
/* PyInterpreterState.interpreters.main */
PyInterpreterState _main_interpreter;
+
+#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
+ // Used in "Python/emscripten_trampoline.c" to choose between type
+ // reflection trampoline and EM_JS trampoline.
+ bool wasm_type_reflection_available;
+#endif
+
} _PyRuntimeState;
diff --git a/Include/internal/pycore_semaphore.h b/Include/internal/pycore_semaphore.h
new file mode 100644
index 00000000000000..2a4ecb7147acee
--- /dev/null
+++ b/Include/internal/pycore_semaphore.h
@@ -0,0 +1,64 @@
+// The _PySemaphore API a simplified cross-platform semaphore used to implement
+// wakeup/sleep.
+#ifndef Py_INTERNAL_SEMAPHORE_H
+#define Py_INTERNAL_SEMAPHORE_H
+
+#ifndef Py_BUILD_CORE
+# error "this header requires Py_BUILD_CORE define"
+#endif
+
+#include "pycore_time.h" // _PyTime_t
+
+#ifdef MS_WINDOWS
+# define WIN32_LEAN_AND_MEAN
+# include
+#elif defined(HAVE_PTHREAD_H)
+# include
+#elif defined(HAVE_PTHREAD_STUBS)
+# include "cpython/pthread_stubs.h"
+#else
+# error "Require native threads. See https://bugs.python.org/issue31370"
+#endif
+
+#if (defined(_POSIX_SEMAPHORES) && (_POSIX_SEMAPHORES+0) != -1 && \
+ defined(HAVE_SEM_TIMEDWAIT))
+# define _Py_USE_SEMAPHORES
+# include
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _PySemaphore {
+#if defined(MS_WINDOWS)
+ HANDLE platform_sem;
+#elif defined(_Py_USE_SEMAPHORES)
+ sem_t platform_sem;
+#else
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int counter;
+#endif
+} _PySemaphore;
+
+// Puts the current thread to sleep until _PySemaphore_Wakeup() is called.
+// If `detach` is true, then the thread will detach/release the GIL while
+// sleeping.
+PyAPI_FUNC(int)
+_PySemaphore_Wait(_PySemaphore *sema, _PyTime_t timeout_ns, int detach);
+
+// Wakes up a single thread waiting on sema. Note that _PySemaphore_Wakeup()
+// can be called before _PySemaphore_Wait().
+PyAPI_FUNC(void)
+_PySemaphore_Wakeup(_PySemaphore *sema);
+
+// Initializes/destroys a semaphore
+PyAPI_FUNC(void) _PySemaphore_Init(_PySemaphore *sema);
+PyAPI_FUNC(void) _PySemaphore_Destroy(_PySemaphore *sema);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_INTERNAL_SEMAPHORE_H */
diff --git a/Include/internal/pycore_uops.h b/Include/internal/pycore_uops.h
index 254eeca2361bea..249f5c010e0092 100644
--- a/Include/internal/pycore_uops.h
+++ b/Include/internal/pycore_uops.h
@@ -8,6 +8,8 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif
+#include "pycore_frame.h" // _PyInterpreterFrame
+
#define _Py_UOP_MAX_TRACE_LENGTH 64
typedef struct {
diff --git a/Include/object.h b/Include/object.h
index b94b2907e4f163..9058558e3cd4d9 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -394,6 +394,10 @@ PyAPI_FUNC(int) PyObject_GetOptionalAttrString(PyObject *, const char *, PyObjec
PyAPI_FUNC(int) PyObject_SetAttr(PyObject *, PyObject *, PyObject *);
PyAPI_FUNC(int) PyObject_DelAttr(PyObject *v, PyObject *name);
PyAPI_FUNC(int) PyObject_HasAttr(PyObject *, PyObject *);
+#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000
+PyAPI_FUNC(int) PyObject_HasAttrWithError(PyObject *, PyObject *);
+PyAPI_FUNC(int) PyObject_HasAttrStringWithError(PyObject *, const char *);
+#endif
PyAPI_FUNC(PyObject *) PyObject_SelfIter(PyObject *);
PyAPI_FUNC(PyObject *) PyObject_GenericGetAttr(PyObject *, PyObject *);
PyAPI_FUNC(int) PyObject_GenericSetAttr(PyObject *, PyObject *, PyObject *);
@@ -656,17 +660,15 @@ static inline void Py_DECREF(PyObject *op) {
#elif defined(Py_REF_DEBUG)
static inline void Py_DECREF(const char *filename, int lineno, PyObject *op)
{
+ if (op->ob_refcnt <= 0) {
+ _Py_NegativeRefcount(filename, lineno, op);
+ }
if (_Py_IsImmortal(op)) {
return;
}
_Py_DECREF_STAT_INC();
_Py_DECREF_DecRefTotal();
- if (--op->ob_refcnt != 0) {
- if (op->ob_refcnt < 0) {
- _Py_NegativeRefcount(filename, lineno, op);
- }
- }
- else {
+ if (--op->ob_refcnt == 0) {
_Py_Dealloc(op);
}
}
diff --git a/Include/opcode_ids.h b/Include/opcode_ids.h
index cd43716415d1db..ba25bd459c1bcd 100644
--- a/Include/opcode_ids.h
+++ b/Include/opcode_ids.h
@@ -13,202 +13,204 @@ extern "C" {
#define CACHE 0
#define BEFORE_ASYNC_WITH 1
#define BEFORE_WITH 2
-#define BINARY_OP_ADD_FLOAT 3
-#define BINARY_OP_ADD_INT 4
-#define BINARY_OP_ADD_UNICODE 5
-#define BINARY_OP_INPLACE_ADD_UNICODE 6
-#define BINARY_OP_MULTIPLY_FLOAT 7
-#define BINARY_OP_MULTIPLY_INT 8
-#define BINARY_OP_SUBTRACT_FLOAT 9
-#define BINARY_OP_SUBTRACT_INT 10
-#define BINARY_SLICE 11
-#define BINARY_SUBSCR 12
-#define BINARY_SUBSCR_DICT 13
-#define BINARY_SUBSCR_GETITEM 14
-#define BINARY_SUBSCR_LIST_INT 15
-#define BINARY_SUBSCR_STR_INT 16
+#define BINARY_OP_INPLACE_ADD_UNICODE 3
+#define BINARY_SLICE 4
+#define BINARY_SUBSCR 5
+#define CHECK_EG_MATCH 6
+#define CHECK_EXC_MATCH 7
+#define CLEANUP_THROW 8
+#define DELETE_SUBSCR 9
+#define END_ASYNC_FOR 10
+#define END_FOR 11
+#define END_SEND 12
+#define EXIT_INIT_CHECK 13
+#define FORMAT_SIMPLE 14
+#define FORMAT_WITH_SPEC 15
+#define GET_AITER 16
#define RESERVED 17
-#define BINARY_SUBSCR_TUPLE_INT 18
-#define CHECK_EG_MATCH 19
-#define CHECK_EXC_MATCH 20
-#define CLEANUP_THROW 21
-#define DELETE_SUBSCR 22
-#define END_ASYNC_FOR 23
-#define END_FOR 24
-#define END_SEND 25
-#define EXIT_INIT_CHECK 26
-#define FORMAT_SIMPLE 27
-#define FORMAT_WITH_SPEC 28
-#define GET_AITER 29
-#define GET_ANEXT 30
-#define GET_ITER 31
-#define GET_LEN 32
-#define GET_YIELD_FROM_ITER 33
-#define INTERPRETER_EXIT 34
-#define LOAD_ASSERTION_ERROR 35
-#define LOAD_BUILD_CLASS 36
-#define LOAD_LOCALS 37
-#define MAKE_FUNCTION 38
-#define MATCH_KEYS 39
-#define MATCH_MAPPING 40
-#define MATCH_SEQUENCE 41
-#define NOP 42
-#define POP_EXCEPT 43
-#define POP_TOP 44
-#define PUSH_EXC_INFO 45
-#define PUSH_NULL 46
-#define RETURN_GENERATOR 47
-#define RETURN_VALUE 48
-#define SETUP_ANNOTATIONS 49
-#define STORE_ATTR_INSTANCE_VALUE 50
-#define STORE_ATTR_SLOT 51
-#define STORE_SLICE 52
-#define STORE_SUBSCR 53
-#define STORE_SUBSCR_DICT 54
-#define STORE_SUBSCR_LIST_INT 55
-#define TO_BOOL 56
-#define TO_BOOL_ALWAYS_TRUE 57
-#define TO_BOOL_BOOL 58
-#define TO_BOOL_INT 59
-#define TO_BOOL_LIST 60
-#define TO_BOOL_NONE 61
-#define TO_BOOL_STR 62
-#define UNARY_INVERT 63
-#define UNARY_NEGATIVE 64
-#define UNARY_NOT 65
-#define WITH_EXCEPT_START 66
-#define HAVE_ARGUMENT 67
-#define BINARY_OP 67
-#define BUILD_CONST_KEY_MAP 68
-#define BUILD_LIST 69
-#define BUILD_MAP 70
-#define BUILD_SET 71
-#define BUILD_SLICE 72
-#define BUILD_STRING 73
-#define BUILD_TUPLE 74
-#define CALL 75
-#define CALL_BOUND_METHOD_EXACT_ARGS 76
-#define CALL_BUILTIN_CLASS 77
-#define CALL_BUILTIN_FAST_WITH_KEYWORDS 78
-#define CALL_FUNCTION_EX 79
-#define CALL_INTRINSIC_1 80
-#define CALL_INTRINSIC_2 81
-#define CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS 82
-#define CALL_NO_KW_ALLOC_AND_ENTER_INIT 83
-#define CALL_NO_KW_BUILTIN_FAST 84
-#define CALL_NO_KW_BUILTIN_O 85
-#define CALL_NO_KW_ISINSTANCE 86
-#define CALL_NO_KW_LEN 87
-#define CALL_NO_KW_LIST_APPEND 88
-#define CALL_NO_KW_METHOD_DESCRIPTOR_FAST 89
-#define CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS 90
-#define CALL_NO_KW_METHOD_DESCRIPTOR_O 91
-#define CALL_NO_KW_STR_1 92
-#define CALL_NO_KW_TUPLE_1 93
-#define CALL_NO_KW_TYPE_1 94
-#define CALL_PY_EXACT_ARGS 95
-#define CALL_PY_WITH_DEFAULTS 96
-#define COMPARE_OP 97
-#define COMPARE_OP_FLOAT 98
-#define COMPARE_OP_INT 99
-#define COMPARE_OP_STR 100
-#define CONTAINS_OP 101
-#define CONVERT_VALUE 102
-#define COPY 103
-#define COPY_FREE_VARS 104
-#define DELETE_ATTR 105
-#define DELETE_DEREF 106
-#define DELETE_FAST 107
-#define DELETE_GLOBAL 108
-#define DELETE_NAME 109
-#define DICT_MERGE 110
-#define DICT_UPDATE 111
-#define ENTER_EXECUTOR 112
-#define EXTENDED_ARG 113
-#define FOR_ITER 114
-#define FOR_ITER_GEN 115
-#define FOR_ITER_LIST 116
-#define FOR_ITER_RANGE 117
-#define FOR_ITER_TUPLE 118
-#define GET_AWAITABLE 119
-#define IMPORT_FROM 120
-#define IMPORT_NAME 121
-#define IS_OP 122
-#define JUMP_BACKWARD 123
-#define JUMP_BACKWARD_NO_INTERRUPT 124
-#define JUMP_FORWARD 125
-#define KW_NAMES 126
-#define LIST_APPEND 127
-#define LIST_EXTEND 128
-#define LOAD_ATTR 129
-#define LOAD_ATTR_CLASS 130
-#define LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN 131
-#define LOAD_ATTR_INSTANCE_VALUE 132
-#define LOAD_ATTR_METHOD_LAZY_DICT 133
-#define LOAD_ATTR_METHOD_NO_DICT 134
-#define LOAD_ATTR_METHOD_WITH_VALUES 135
-#define LOAD_ATTR_MODULE 136
-#define LOAD_ATTR_NONDESCRIPTOR_NO_DICT 137
-#define LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES 138
-#define LOAD_ATTR_PROPERTY 139
-#define LOAD_ATTR_SLOT 140
-#define LOAD_ATTR_WITH_HINT 141
-#define LOAD_CONST 142
-#define LOAD_DEREF 143
-#define LOAD_FAST 144
-#define LOAD_FAST_AND_CLEAR 145
-#define LOAD_FAST_CHECK 146
-#define LOAD_FAST_LOAD_FAST 147
-#define LOAD_FROM_DICT_OR_DEREF 148
-#define LOAD_FROM_DICT_OR_GLOBALS 149
-#define LOAD_GLOBAL 150
-#define LOAD_GLOBAL_BUILTIN 151
-#define LOAD_GLOBAL_MODULE 152
-#define LOAD_NAME 153
-#define LOAD_SUPER_ATTR 154
-#define LOAD_SUPER_ATTR_ATTR 155
-#define LOAD_SUPER_ATTR_METHOD 156
-#define MAKE_CELL 157
-#define MAP_ADD 158
-#define MATCH_CLASS 159
-#define POP_JUMP_IF_FALSE 160
-#define POP_JUMP_IF_NONE 161
-#define POP_JUMP_IF_NOT_NONE 162
-#define POP_JUMP_IF_TRUE 163
-#define RAISE_VARARGS 164
-#define RERAISE 165
-#define RESUME 166
-#define RETURN_CONST 167
-#define SEND 168
-#define SEND_GEN 169
-#define SET_ADD 170
-#define SET_FUNCTION_ATTRIBUTE 171
-#define SET_UPDATE 172
-#define STORE_ATTR 173
-#define STORE_ATTR_WITH_HINT 174
-#define STORE_DEREF 175
-#define STORE_FAST 176
-#define STORE_FAST_LOAD_FAST 177
-#define STORE_FAST_STORE_FAST 178
-#define STORE_GLOBAL 179
-#define STORE_NAME 180
-#define SWAP 181
-#define UNPACK_EX 182
-#define UNPACK_SEQUENCE 183
-#define UNPACK_SEQUENCE_LIST 184
-#define UNPACK_SEQUENCE_TUPLE 185
-#define UNPACK_SEQUENCE_TWO_TUPLE 186
-#define YIELD_VALUE 187
-#define MIN_INSTRUMENTED_OPCODE 237
-#define INSTRUMENTED_RESUME 237
-#define INSTRUMENTED_END_FOR 238
-#define INSTRUMENTED_END_SEND 239
-#define INSTRUMENTED_RETURN_VALUE 240
-#define INSTRUMENTED_RETURN_CONST 241
-#define INSTRUMENTED_YIELD_VALUE 242
-#define INSTRUMENTED_LOAD_SUPER_ATTR 243
-#define INSTRUMENTED_FOR_ITER 244
-#define INSTRUMENTED_CALL 245
+#define GET_ANEXT 18
+#define GET_ITER 19
+#define GET_LEN 20
+#define GET_YIELD_FROM_ITER 21
+#define INTERPRETER_EXIT 22
+#define LOAD_ASSERTION_ERROR 23
+#define LOAD_BUILD_CLASS 24
+#define LOAD_LOCALS 25
+#define MAKE_FUNCTION 26
+#define MATCH_KEYS 27
+#define MATCH_MAPPING 28
+#define MATCH_SEQUENCE 29
+#define NOP 30
+#define POP_EXCEPT 31
+#define POP_TOP 32
+#define PUSH_EXC_INFO 33
+#define PUSH_NULL 34
+#define RETURN_GENERATOR 35
+#define RETURN_VALUE 36
+#define SETUP_ANNOTATIONS 37
+#define STORE_SLICE 38
+#define STORE_SUBSCR 39
+#define TO_BOOL 40
+#define UNARY_INVERT 41
+#define UNARY_NEGATIVE 42
+#define UNARY_NOT 43
+#define WITH_EXCEPT_START 44
+#define HAVE_ARGUMENT 45
+#define BINARY_OP 45
+#define BUILD_CONST_KEY_MAP 46
+#define BUILD_LIST 47
+#define BUILD_MAP 48
+#define BUILD_SET 49
+#define BUILD_SLICE 50
+#define BUILD_STRING 51
+#define BUILD_TUPLE 52
+#define CALL 53
+#define CALL_FUNCTION_EX 54
+#define CALL_INTRINSIC_1 55
+#define CALL_INTRINSIC_2 56
+#define CALL_KW 57
+#define COMPARE_OP 58
+#define CONTAINS_OP 59
+#define CONVERT_VALUE 60
+#define COPY 61
+#define COPY_FREE_VARS 62
+#define DELETE_ATTR 63
+#define DELETE_DEREF 64
+#define DELETE_FAST 65
+#define DELETE_GLOBAL 66
+#define DELETE_NAME 67
+#define DICT_MERGE 68
+#define DICT_UPDATE 69
+#define ENTER_EXECUTOR 70
+#define EXTENDED_ARG 71
+#define FOR_ITER 72
+#define GET_AWAITABLE 73
+#define IMPORT_FROM 74
+#define IMPORT_NAME 75
+#define IS_OP 76
+#define JUMP_BACKWARD 77
+#define JUMP_BACKWARD_NO_INTERRUPT 78
+#define JUMP_FORWARD 79
+#define LIST_APPEND 80
+#define LIST_EXTEND 81
+#define LOAD_ATTR 82
+#define LOAD_CONST 83
+#define LOAD_DEREF 84
+#define LOAD_FAST 85
+#define LOAD_FAST_AND_CLEAR 86
+#define LOAD_FAST_CHECK 87
+#define LOAD_FAST_LOAD_FAST 88
+#define LOAD_FROM_DICT_OR_DEREF 89
+#define LOAD_FROM_DICT_OR_GLOBALS 90
+#define LOAD_GLOBAL 91
+#define LOAD_NAME 92
+#define LOAD_SUPER_ATTR 93
+#define MAKE_CELL 94
+#define MAP_ADD 95
+#define MATCH_CLASS 96
+#define POP_JUMP_IF_FALSE 97
+#define POP_JUMP_IF_NONE 98
+#define POP_JUMP_IF_NOT_NONE 99
+#define POP_JUMP_IF_TRUE 100
+#define RAISE_VARARGS 101
+#define RERAISE 102
+#define RETURN_CONST 103
+#define SEND 104
+#define SET_ADD 105
+#define SET_FUNCTION_ATTRIBUTE 106
+#define SET_UPDATE 107
+#define STORE_ATTR 108
+#define STORE_DEREF 109
+#define STORE_FAST 110
+#define STORE_FAST_LOAD_FAST 111
+#define STORE_FAST_STORE_FAST 112
+#define STORE_GLOBAL 113
+#define STORE_NAME 114
+#define SWAP 115
+#define UNPACK_EX 116
+#define UNPACK_SEQUENCE 117
+#define YIELD_VALUE 118
+#define RESUME 149
+#define BINARY_OP_ADD_FLOAT 150
+#define BINARY_OP_ADD_INT 151
+#define BINARY_OP_ADD_UNICODE 152
+#define BINARY_OP_MULTIPLY_FLOAT 153
+#define BINARY_OP_MULTIPLY_INT 154
+#define BINARY_OP_SUBTRACT_FLOAT 155
+#define BINARY_OP_SUBTRACT_INT 156
+#define BINARY_SUBSCR_DICT 157
+#define BINARY_SUBSCR_GETITEM 158
+#define BINARY_SUBSCR_LIST_INT 159
+#define BINARY_SUBSCR_STR_INT 160
+#define BINARY_SUBSCR_TUPLE_INT 161
+#define CALL_ALLOC_AND_ENTER_INIT 162
+#define CALL_BOUND_METHOD_EXACT_ARGS 163
+#define CALL_BUILTIN_CLASS 164
+#define CALL_BUILTIN_FAST 165
+#define CALL_BUILTIN_FAST_WITH_KEYWORDS 166
+#define CALL_BUILTIN_O 167
+#define CALL_ISINSTANCE 168
+#define CALL_LEN 169
+#define CALL_LIST_APPEND 170
+#define CALL_METHOD_DESCRIPTOR_FAST 171
+#define CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS 172
+#define CALL_METHOD_DESCRIPTOR_NOARGS 173
+#define CALL_METHOD_DESCRIPTOR_O 174
+#define CALL_PY_EXACT_ARGS 175
+#define CALL_PY_WITH_DEFAULTS 176
+#define CALL_STR_1 177
+#define CALL_TUPLE_1 178
+#define CALL_TYPE_1 179
+#define COMPARE_OP_FLOAT 180
+#define COMPARE_OP_INT 181
+#define COMPARE_OP_STR 182
+#define FOR_ITER_GEN 183
+#define FOR_ITER_LIST 184
+#define FOR_ITER_RANGE 185
+#define FOR_ITER_TUPLE 186
+#define LOAD_ATTR_CLASS 187
+#define LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN 188
+#define LOAD_ATTR_INSTANCE_VALUE 189
+#define LOAD_ATTR_METHOD_LAZY_DICT 190
+#define LOAD_ATTR_METHOD_NO_DICT 191
+#define LOAD_ATTR_METHOD_WITH_VALUES 192
+#define LOAD_ATTR_MODULE 193
+#define LOAD_ATTR_NONDESCRIPTOR_NO_DICT 194
+#define LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES 195
+#define LOAD_ATTR_PROPERTY 196
+#define LOAD_ATTR_SLOT 197
+#define LOAD_ATTR_WITH_HINT 198
+#define LOAD_GLOBAL_BUILTIN 199
+#define LOAD_GLOBAL_MODULE 200
+#define LOAD_SUPER_ATTR_ATTR 201
+#define LOAD_SUPER_ATTR_METHOD 202
+#define RESUME_CHECK 203
+#define SEND_GEN 204
+#define STORE_ATTR_INSTANCE_VALUE 205
+#define STORE_ATTR_SLOT 206
+#define STORE_ATTR_WITH_HINT 207
+#define STORE_SUBSCR_DICT 208
+#define STORE_SUBSCR_LIST_INT 209
+#define TO_BOOL_ALWAYS_TRUE 210
+#define TO_BOOL_BOOL 211
+#define TO_BOOL_INT 212
+#define TO_BOOL_LIST 213
+#define TO_BOOL_NONE 214
+#define TO_BOOL_STR 215
+#define UNPACK_SEQUENCE_LIST 216
+#define UNPACK_SEQUENCE_TUPLE 217
+#define UNPACK_SEQUENCE_TWO_TUPLE 218
+#define MIN_INSTRUMENTED_OPCODE 236
+#define INSTRUMENTED_RESUME 236
+#define INSTRUMENTED_END_FOR 237
+#define INSTRUMENTED_END_SEND 238
+#define INSTRUMENTED_RETURN_VALUE 239
+#define INSTRUMENTED_RETURN_CONST 240
+#define INSTRUMENTED_YIELD_VALUE 241
+#define INSTRUMENTED_LOAD_SUPER_ATTR 242
+#define INSTRUMENTED_FOR_ITER 243
+#define INSTRUMENTED_CALL 244
+#define INSTRUMENTED_CALL_KW 245
#define INSTRUMENTED_CALL_FUNCTION_EX 246
#define INSTRUMENTED_INSTRUCTION 247
#define INSTRUMENTED_JUMP_FORWARD 248
diff --git a/Include/pyatomic.h b/Include/pyatomic.h
new file mode 100644
index 00000000000000..2ce2c81cf5251a
--- /dev/null
+++ b/Include/pyatomic.h
@@ -0,0 +1,16 @@
+#ifndef Py_ATOMIC_H
+#define Py_ATOMIC_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef Py_LIMITED_API
+# define Py_CPYTHON_ATOMIC_H
+# include "cpython/pyatomic.h"
+# undef Py_CPYTHON_ATOMIC_H
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_ATOMIC_H */
diff --git a/Include/pyport.h b/Include/pyport.h
index f2046de2bbcc5a..40d580a870fc75 100644
--- a/Include/pyport.h
+++ b/Include/pyport.h
@@ -48,6 +48,10 @@
# define Py_BUILD_CORE
#endif
+#if defined(Py_LIMITED_API) && defined(Py_BUILD_CORE)
+# error "Py_LIMITED_API is not compatible with Py_BUILD_CORE"
+#endif
+
/**************************************************************************
Symbols and macros to supply platform-independent interfaces to basic
@@ -184,62 +188,6 @@ typedef Py_ssize_t Py_ssize_clean_t;
# define Py_MEMCPY memcpy
#endif
-/********************************************
- * WRAPPER FOR and/or *
- ********************************************/
-
-#ifdef HAVE_SYS_TIME_H
-#include
-#endif
-#include
-
-/******************************
- * WRAPPER FOR *
- ******************************/
-
-/* NB caller must include */
-
-#ifdef HAVE_SYS_SELECT_H
-#include
-#endif /* !HAVE_SYS_SELECT_H */
-
-/*******************************
- * stat() and fstat() fiddling *
- *******************************/
-
-#ifdef HAVE_SYS_STAT_H
-#include
-#elif defined(HAVE_STAT_H)
-#include
-#endif
-
-#ifndef S_IFMT
-/* VisualAge C/C++ Failed to Define MountType Field in sys/stat.h */
-#define S_IFMT 0170000
-#endif
-
-#ifndef S_IFLNK
-/* Windows doesn't define S_IFLNK but posixmodule.c maps
- * IO_REPARSE_TAG_SYMLINK to S_IFLNK */
-# define S_IFLNK 0120000
-#endif
-
-#ifndef S_ISREG
-#define S_ISREG(x) (((x) & S_IFMT) == S_IFREG)
-#endif
-
-#ifndef S_ISDIR
-#define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR)
-#endif
-
-#ifndef S_ISCHR
-#define S_ISCHR(x) (((x) & S_IFMT) == S_IFCHR)
-#endif
-
-#ifndef S_ISLNK
-#define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK)
-#endif
-
#ifdef __cplusplus
/* Move this down here since some C++ #include's don't like to be included
inside an extern "C" */
@@ -411,117 +359,8 @@ extern "C" {
# define Py_NO_INLINE
#endif
-/* On 4.4BSD-descendants, ctype functions serves the whole range of
- * wchar_t character set rather than single byte code points only.
- * This characteristic can break some operations of string object
- * including str.upper() and str.split() on UTF-8 locales. This
- * workaround was provided by Tim Robbins of FreeBSD project.
- */
-
-#if defined(__APPLE__)
-# define _PY_PORT_CTYPE_UTF8_ISSUE
-#endif
-
-#ifdef _PY_PORT_CTYPE_UTF8_ISSUE
-#ifndef __cplusplus
- /* The workaround below is unsafe in C++ because
- * the defines these symbols as real functions,
- * with a slightly different signature.
- * See issue #10910
- */
-#include
-#include
-#undef isalnum
-#define isalnum(c) iswalnum(btowc(c))
-#undef isalpha
-#define isalpha(c) iswalpha(btowc(c))
-#undef islower
-#define islower(c) iswlower(btowc(c))
-#undef isspace
-#define isspace(c) iswspace(btowc(c))
-#undef isupper
-#define isupper(c) iswupper(btowc(c))
-#undef tolower
-#define tolower(c) towlower(btowc(c))
-#undef toupper
-#define toupper(c) towupper(btowc(c))
-#endif
-#endif
-
-
-/* Declarations for symbol visibility.
-
- PyAPI_FUNC(type): Declares a public Python API function and return type
- PyAPI_DATA(type): Declares public Python data and its type
- PyMODINIT_FUNC: A Python module init function. If these functions are
- inside the Python core, they are private to the core.
- If in an extension module, it may be declared with
- external linkage depending on the platform.
-
- As a number of platforms support/require "__declspec(dllimport/dllexport)",
- we support a HAVE_DECLSPEC_DLL macro to save duplication.
-*/
-
-/*
- All windows ports, except cygwin, are handled in PC/pyconfig.h.
-
- Cygwin is the only other autoconf platform requiring special
- linkage handling and it uses __declspec().
-*/
-#if defined(__CYGWIN__)
-# define HAVE_DECLSPEC_DLL
-#endif
-
#include "exports.h"
-/* only get special linkage if built as shared or platform is Cygwin */
-#if defined(Py_ENABLE_SHARED) || defined(__CYGWIN__)
-# if defined(HAVE_DECLSPEC_DLL)
-# if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
-# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
-# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
- /* module init functions inside the core need no external linkage */
- /* except for Cygwin to handle embedding */
-# if defined(__CYGWIN__)
-# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
-# else /* __CYGWIN__ */
-# define PyMODINIT_FUNC PyObject*
-# endif /* __CYGWIN__ */
-# else /* Py_BUILD_CORE */
- /* Building an extension module, or an embedded situation */
- /* public Python functions and data are imported */
- /* Under Cygwin, auto-import functions to prevent compilation */
- /* failures similar to those described at the bottom of 4.1: */
- /* http://docs.python.org/extending/windows.html#a-cookbook-approach */
-# if !defined(__CYGWIN__)
-# define PyAPI_FUNC(RTYPE) Py_IMPORTED_SYMBOL RTYPE
-# endif /* !__CYGWIN__ */
-# define PyAPI_DATA(RTYPE) extern Py_IMPORTED_SYMBOL RTYPE
- /* module init functions outside the core must be exported */
-# if defined(__cplusplus)
-# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
-# else /* __cplusplus */
-# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
-# endif /* __cplusplus */
-# endif /* Py_BUILD_CORE */
-# endif /* HAVE_DECLSPEC_DLL */
-#endif /* Py_ENABLE_SHARED */
-
-/* If no external linkage macros defined by now, create defaults */
-#ifndef PyAPI_FUNC
-# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
-#endif
-#ifndef PyAPI_DATA
-# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
-#endif
-#ifndef PyMODINIT_FUNC
-# if defined(__cplusplus)
-# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
-# else /* __cplusplus */
-# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
-# endif /* __cplusplus */
-#endif
-
/* limits.h constants that may be missing */
#ifndef INT_MAX
diff --git a/Include/pystats.h b/Include/pystats.h
index b1957596745f00..acfa32201711e0 100644
--- a/Include/pystats.h
+++ b/Include/pystats.h
@@ -1,4 +1,9 @@
-
+// Statistics on Python performance (public API).
+//
+// Define _Py_INCREF_STAT_INC() and _Py_DECREF_STAT_INC() used by Py_INCREF()
+// and Py_DECREF().
+//
+// See Include/cpython/pystats.h for the full API.
#ifndef Py_PYSTATS_H
#define Py_PYSTATS_H
@@ -6,119 +11,16 @@
extern "C" {
#endif
-#ifdef Py_STATS
-
-#define SPECIALIZATION_FAILURE_KINDS 36
-
-/* Stats for determining who is calling PyEval_EvalFrame */
-#define EVAL_CALL_TOTAL 0
-#define EVAL_CALL_VECTOR 1
-#define EVAL_CALL_GENERATOR 2
-#define EVAL_CALL_LEGACY 3
-#define EVAL_CALL_FUNCTION_VECTORCALL 4
-#define EVAL_CALL_BUILD_CLASS 5
-#define EVAL_CALL_SLOT 6
-#define EVAL_CALL_FUNCTION_EX 7
-#define EVAL_CALL_API 8
-#define EVAL_CALL_METHOD 9
-
-#define EVAL_CALL_KINDS 10
-
-typedef struct _specialization_stats {
- uint64_t success;
- uint64_t failure;
- uint64_t hit;
- uint64_t deferred;
- uint64_t miss;
- uint64_t deopt;
- uint64_t failure_kinds[SPECIALIZATION_FAILURE_KINDS];
-} SpecializationStats;
-
-typedef struct _opcode_stats {
- SpecializationStats specialization;
- uint64_t execution_count;
- uint64_t pair_count[256];
-} OpcodeStats;
-
-typedef struct _call_stats {
- uint64_t inlined_py_calls;
- uint64_t pyeval_calls;
- uint64_t frames_pushed;
- uint64_t frame_objects_created;
- uint64_t eval_calls[EVAL_CALL_KINDS];
-} CallStats;
-
-typedef struct _object_stats {
- uint64_t increfs;
- uint64_t decrefs;
- uint64_t interpreter_increfs;
- uint64_t interpreter_decrefs;
- uint64_t allocations;
- uint64_t allocations512;
- uint64_t allocations4k;
- uint64_t allocations_big;
- uint64_t frees;
- uint64_t to_freelist;
- uint64_t from_freelist;
- uint64_t new_values;
- uint64_t dict_materialized_on_request;
- uint64_t dict_materialized_new_key;
- uint64_t dict_materialized_too_big;
- uint64_t dict_materialized_str_subclass;
- uint64_t dict_dematerialized;
- uint64_t type_cache_hits;
- uint64_t type_cache_misses;
- uint64_t type_cache_dunder_hits;
- uint64_t type_cache_dunder_misses;
- uint64_t type_cache_collisions;
- uint64_t optimization_attempts;
- uint64_t optimization_traces_created;
- uint64_t optimization_traces_executed;
- uint64_t optimization_uops_executed;
- /* Temporary value used during GC */
- uint64_t object_visits;
-} ObjectStats;
-
-typedef struct _gc_stats {
- uint64_t collections;
- uint64_t object_visits;
- uint64_t objects_collected;
-} GCStats;
-
-typedef struct _stats {
- OpcodeStats opcode_stats[256];
- CallStats call_stats;
- ObjectStats object_stats;
- GCStats *gc_stats;
-} PyStats;
-
-
-PyAPI_DATA(PyStats) _py_stats_struct;
-PyAPI_DATA(PyStats *) _py_stats;
-
-extern void _Py_StatsClear(void);
-extern void _Py_PrintSpecializationStats(int to_file);
-
-#ifdef _PY_INTERPRETER
-
-#define _Py_INCREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.interpreter_increfs++; } while (0)
-#define _Py_DECREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.interpreter_decrefs++; } while (0)
-
+#if defined(Py_STATS) && !defined(Py_LIMITED_API)
+# define Py_CPYTHON_PYSTATS_H
+# include "cpython/pystats.h"
+# undef Py_CPYTHON_PYSTATS_H
#else
-
-#define _Py_INCREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.increfs++; } while (0)
-#define _Py_DECREF_STAT_INC() do { if (_py_stats) _py_stats->object_stats.decrefs++; } while (0)
-
-#endif
-
-#else
-
-#define _Py_INCREF_STAT_INC() ((void)0)
-#define _Py_DECREF_STAT_INC() ((void)0)
-
+# define _Py_INCREF_STAT_INC() ((void)0)
+# define _Py_DECREF_STAT_INC() ((void)0)
#endif // !Py_STATS
#ifdef __cplusplus
}
#endif
-#endif /* !Py_PYSTATs_H */
+#endif // !Py_PYSTATS_H
diff --git a/Lib/_opcode_metadata.py b/Lib/_opcode_metadata.py
index b02aa771c347e7..5dd06ae487dfcf 100644
--- a/Lib/_opcode_metadata.py
+++ b/Lib/_opcode_metadata.py
@@ -4,6 +4,9 @@
# Do not edit!
_specializations = {
+ "RESUME": [
+ "RESUME_CHECK",
+ ],
"TO_BOOL": [
"TO_BOOL_ALWAYS_TRUE",
"TO_BOOL_BOOL",
@@ -82,21 +85,21 @@
"CALL_BOUND_METHOD_EXACT_ARGS",
"CALL_PY_EXACT_ARGS",
"CALL_PY_WITH_DEFAULTS",
- "CALL_NO_KW_TYPE_1",
- "CALL_NO_KW_STR_1",
- "CALL_NO_KW_TUPLE_1",
+ "CALL_TYPE_1",
+ "CALL_STR_1",
+ "CALL_TUPLE_1",
"CALL_BUILTIN_CLASS",
- "CALL_NO_KW_BUILTIN_O",
- "CALL_NO_KW_BUILTIN_FAST",
+ "CALL_BUILTIN_O",
+ "CALL_BUILTIN_FAST",
"CALL_BUILTIN_FAST_WITH_KEYWORDS",
- "CALL_NO_KW_LEN",
- "CALL_NO_KW_ISINSTANCE",
- "CALL_NO_KW_LIST_APPEND",
- "CALL_NO_KW_METHOD_DESCRIPTOR_O",
+ "CALL_LEN",
+ "CALL_ISINSTANCE",
+ "CALL_LIST_APPEND",
+ "CALL_METHOD_DESCRIPTOR_O",
"CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS",
- "CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS",
- "CALL_NO_KW_METHOD_DESCRIPTOR_FAST",
- "CALL_NO_KW_ALLOC_AND_ENTER_INIT",
+ "CALL_METHOD_DESCRIPTOR_NOARGS",
+ "CALL_METHOD_DESCRIPTOR_FAST",
+ "CALL_ALLOC_AND_ENTER_INIT",
],
}
@@ -104,206 +107,208 @@
_specializations["BINARY_OP"].append("BINARY_OP_INPLACE_ADD_UNICODE")
_specialized_opmap = {
- 'BINARY_OP_ADD_FLOAT': 3,
- 'BINARY_OP_ADD_INT': 4,
- 'BINARY_OP_ADD_UNICODE': 5,
- 'BINARY_OP_INPLACE_ADD_UNICODE': 6,
- 'BINARY_OP_MULTIPLY_FLOAT': 7,
- 'BINARY_OP_MULTIPLY_INT': 8,
- 'BINARY_OP_SUBTRACT_FLOAT': 9,
- 'BINARY_OP_SUBTRACT_INT': 10,
- 'BINARY_SUBSCR_DICT': 13,
- 'BINARY_SUBSCR_GETITEM': 14,
- 'BINARY_SUBSCR_LIST_INT': 15,
- 'BINARY_SUBSCR_STR_INT': 16,
- 'BINARY_SUBSCR_TUPLE_INT': 18,
- 'STORE_ATTR_INSTANCE_VALUE': 50,
- 'STORE_ATTR_SLOT': 51,
- 'STORE_SUBSCR_DICT': 54,
- 'STORE_SUBSCR_LIST_INT': 55,
- 'TO_BOOL_ALWAYS_TRUE': 57,
- 'TO_BOOL_BOOL': 58,
- 'TO_BOOL_INT': 59,
- 'TO_BOOL_LIST': 60,
- 'TO_BOOL_NONE': 61,
- 'TO_BOOL_STR': 62,
- 'CALL_BOUND_METHOD_EXACT_ARGS': 76,
- 'CALL_BUILTIN_CLASS': 77,
- 'CALL_BUILTIN_FAST_WITH_KEYWORDS': 78,
- 'CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS': 82,
- 'CALL_NO_KW_ALLOC_AND_ENTER_INIT': 83,
- 'CALL_NO_KW_BUILTIN_FAST': 84,
- 'CALL_NO_KW_BUILTIN_O': 85,
- 'CALL_NO_KW_ISINSTANCE': 86,
- 'CALL_NO_KW_LEN': 87,
- 'CALL_NO_KW_LIST_APPEND': 88,
- 'CALL_NO_KW_METHOD_DESCRIPTOR_FAST': 89,
- 'CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS': 90,
- 'CALL_NO_KW_METHOD_DESCRIPTOR_O': 91,
- 'CALL_NO_KW_STR_1': 92,
- 'CALL_NO_KW_TUPLE_1': 93,
- 'CALL_NO_KW_TYPE_1': 94,
- 'CALL_PY_EXACT_ARGS': 95,
- 'CALL_PY_WITH_DEFAULTS': 96,
- 'COMPARE_OP_FLOAT': 98,
- 'COMPARE_OP_INT': 99,
- 'COMPARE_OP_STR': 100,
- 'FOR_ITER_GEN': 115,
- 'FOR_ITER_LIST': 116,
- 'FOR_ITER_RANGE': 117,
- 'FOR_ITER_TUPLE': 118,
- 'LOAD_ATTR_CLASS': 130,
- 'LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN': 131,
- 'LOAD_ATTR_INSTANCE_VALUE': 132,
- 'LOAD_ATTR_METHOD_LAZY_DICT': 133,
- 'LOAD_ATTR_METHOD_NO_DICT': 134,
- 'LOAD_ATTR_METHOD_WITH_VALUES': 135,
- 'LOAD_ATTR_MODULE': 136,
- 'LOAD_ATTR_NONDESCRIPTOR_NO_DICT': 137,
- 'LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES': 138,
- 'LOAD_ATTR_PROPERTY': 139,
- 'LOAD_ATTR_SLOT': 140,
- 'LOAD_ATTR_WITH_HINT': 141,
- 'LOAD_GLOBAL_BUILTIN': 151,
- 'LOAD_GLOBAL_MODULE': 152,
- 'LOAD_SUPER_ATTR_ATTR': 155,
- 'LOAD_SUPER_ATTR_METHOD': 156,
- 'SEND_GEN': 169,
- 'STORE_ATTR_WITH_HINT': 174,
- 'UNPACK_SEQUENCE_LIST': 184,
- 'UNPACK_SEQUENCE_TUPLE': 185,
- 'UNPACK_SEQUENCE_TWO_TUPLE': 186,
+ 'BINARY_OP_INPLACE_ADD_UNICODE': 3,
+ 'BINARY_OP_ADD_FLOAT': 150,
+ 'BINARY_OP_ADD_INT': 151,
+ 'BINARY_OP_ADD_UNICODE': 152,
+ 'BINARY_OP_MULTIPLY_FLOAT': 153,
+ 'BINARY_OP_MULTIPLY_INT': 154,
+ 'BINARY_OP_SUBTRACT_FLOAT': 155,
+ 'BINARY_OP_SUBTRACT_INT': 156,
+ 'BINARY_SUBSCR_DICT': 157,
+ 'BINARY_SUBSCR_GETITEM': 158,
+ 'BINARY_SUBSCR_LIST_INT': 159,
+ 'BINARY_SUBSCR_STR_INT': 160,
+ 'BINARY_SUBSCR_TUPLE_INT': 161,
+ 'CALL_ALLOC_AND_ENTER_INIT': 162,
+ 'CALL_BOUND_METHOD_EXACT_ARGS': 163,
+ 'CALL_BUILTIN_CLASS': 164,
+ 'CALL_BUILTIN_FAST': 165,
+ 'CALL_BUILTIN_FAST_WITH_KEYWORDS': 166,
+ 'CALL_BUILTIN_O': 167,
+ 'CALL_ISINSTANCE': 168,
+ 'CALL_LEN': 169,
+ 'CALL_LIST_APPEND': 170,
+ 'CALL_METHOD_DESCRIPTOR_FAST': 171,
+ 'CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS': 172,
+ 'CALL_METHOD_DESCRIPTOR_NOARGS': 173,
+ 'CALL_METHOD_DESCRIPTOR_O': 174,
+ 'CALL_PY_EXACT_ARGS': 175,
+ 'CALL_PY_WITH_DEFAULTS': 176,
+ 'CALL_STR_1': 177,
+ 'CALL_TUPLE_1': 178,
+ 'CALL_TYPE_1': 179,
+ 'COMPARE_OP_FLOAT': 180,
+ 'COMPARE_OP_INT': 181,
+ 'COMPARE_OP_STR': 182,
+ 'FOR_ITER_GEN': 183,
+ 'FOR_ITER_LIST': 184,
+ 'FOR_ITER_RANGE': 185,
+ 'FOR_ITER_TUPLE': 186,
+ 'LOAD_ATTR_CLASS': 187,
+ 'LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN': 188,
+ 'LOAD_ATTR_INSTANCE_VALUE': 189,
+ 'LOAD_ATTR_METHOD_LAZY_DICT': 190,
+ 'LOAD_ATTR_METHOD_NO_DICT': 191,
+ 'LOAD_ATTR_METHOD_WITH_VALUES': 192,
+ 'LOAD_ATTR_MODULE': 193,
+ 'LOAD_ATTR_NONDESCRIPTOR_NO_DICT': 194,
+ 'LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES': 195,
+ 'LOAD_ATTR_PROPERTY': 196,
+ 'LOAD_ATTR_SLOT': 197,
+ 'LOAD_ATTR_WITH_HINT': 198,
+ 'LOAD_GLOBAL_BUILTIN': 199,
+ 'LOAD_GLOBAL_MODULE': 200,
+ 'LOAD_SUPER_ATTR_ATTR': 201,
+ 'LOAD_SUPER_ATTR_METHOD': 202,
+ 'RESUME_CHECK': 203,
+ 'SEND_GEN': 204,
+ 'STORE_ATTR_INSTANCE_VALUE': 205,
+ 'STORE_ATTR_SLOT': 206,
+ 'STORE_ATTR_WITH_HINT': 207,
+ 'STORE_SUBSCR_DICT': 208,
+ 'STORE_SUBSCR_LIST_INT': 209,
+ 'TO_BOOL_ALWAYS_TRUE': 210,
+ 'TO_BOOL_BOOL': 211,
+ 'TO_BOOL_INT': 212,
+ 'TO_BOOL_LIST': 213,
+ 'TO_BOOL_NONE': 214,
+ 'TO_BOOL_STR': 215,
+ 'UNPACK_SEQUENCE_LIST': 216,
+ 'UNPACK_SEQUENCE_TUPLE': 217,
+ 'UNPACK_SEQUENCE_TWO_TUPLE': 218,
}
opmap = {
'CACHE': 0,
'BEFORE_ASYNC_WITH': 1,
'BEFORE_WITH': 2,
- 'BINARY_SLICE': 11,
- 'BINARY_SUBSCR': 12,
+ 'BINARY_SLICE': 4,
+ 'BINARY_SUBSCR': 5,
+ 'CHECK_EG_MATCH': 6,
+ 'CHECK_EXC_MATCH': 7,
+ 'CLEANUP_THROW': 8,
+ 'DELETE_SUBSCR': 9,
+ 'END_ASYNC_FOR': 10,
+ 'END_FOR': 11,
+ 'END_SEND': 12,
+ 'EXIT_INIT_CHECK': 13,
+ 'FORMAT_SIMPLE': 14,
+ 'FORMAT_WITH_SPEC': 15,
+ 'GET_AITER': 16,
'RESERVED': 17,
- 'CHECK_EG_MATCH': 19,
- 'CHECK_EXC_MATCH': 20,
- 'CLEANUP_THROW': 21,
- 'DELETE_SUBSCR': 22,
- 'END_ASYNC_FOR': 23,
- 'END_FOR': 24,
- 'END_SEND': 25,
- 'EXIT_INIT_CHECK': 26,
- 'FORMAT_SIMPLE': 27,
- 'FORMAT_WITH_SPEC': 28,
- 'GET_AITER': 29,
- 'GET_ANEXT': 30,
- 'GET_ITER': 31,
- 'GET_LEN': 32,
- 'GET_YIELD_FROM_ITER': 33,
- 'INTERPRETER_EXIT': 34,
- 'LOAD_ASSERTION_ERROR': 35,
- 'LOAD_BUILD_CLASS': 36,
- 'LOAD_LOCALS': 37,
- 'MAKE_FUNCTION': 38,
- 'MATCH_KEYS': 39,
- 'MATCH_MAPPING': 40,
- 'MATCH_SEQUENCE': 41,
- 'NOP': 42,
- 'POP_EXCEPT': 43,
- 'POP_TOP': 44,
- 'PUSH_EXC_INFO': 45,
- 'PUSH_NULL': 46,
- 'RETURN_GENERATOR': 47,
- 'RETURN_VALUE': 48,
- 'SETUP_ANNOTATIONS': 49,
- 'STORE_SLICE': 52,
- 'STORE_SUBSCR': 53,
- 'TO_BOOL': 56,
- 'UNARY_INVERT': 63,
- 'UNARY_NEGATIVE': 64,
- 'UNARY_NOT': 65,
- 'WITH_EXCEPT_START': 66,
- 'BINARY_OP': 67,
- 'BUILD_CONST_KEY_MAP': 68,
- 'BUILD_LIST': 69,
- 'BUILD_MAP': 70,
- 'BUILD_SET': 71,
- 'BUILD_SLICE': 72,
- 'BUILD_STRING': 73,
- 'BUILD_TUPLE': 74,
- 'CALL': 75,
- 'CALL_FUNCTION_EX': 79,
- 'CALL_INTRINSIC_1': 80,
- 'CALL_INTRINSIC_2': 81,
- 'COMPARE_OP': 97,
- 'CONTAINS_OP': 101,
- 'CONVERT_VALUE': 102,
- 'COPY': 103,
- 'COPY_FREE_VARS': 104,
- 'DELETE_ATTR': 105,
- 'DELETE_DEREF': 106,
- 'DELETE_FAST': 107,
- 'DELETE_GLOBAL': 108,
- 'DELETE_NAME': 109,
- 'DICT_MERGE': 110,
- 'DICT_UPDATE': 111,
- 'ENTER_EXECUTOR': 112,
- 'EXTENDED_ARG': 113,
- 'FOR_ITER': 114,
- 'GET_AWAITABLE': 119,
- 'IMPORT_FROM': 120,
- 'IMPORT_NAME': 121,
- 'IS_OP': 122,
- 'JUMP_BACKWARD': 123,
- 'JUMP_BACKWARD_NO_INTERRUPT': 124,
- 'JUMP_FORWARD': 125,
- 'KW_NAMES': 126,
- 'LIST_APPEND': 127,
- 'LIST_EXTEND': 128,
- 'LOAD_ATTR': 129,
- 'LOAD_CONST': 142,
- 'LOAD_DEREF': 143,
- 'LOAD_FAST': 144,
- 'LOAD_FAST_AND_CLEAR': 145,
- 'LOAD_FAST_CHECK': 146,
- 'LOAD_FAST_LOAD_FAST': 147,
- 'LOAD_FROM_DICT_OR_DEREF': 148,
- 'LOAD_FROM_DICT_OR_GLOBALS': 149,
- 'LOAD_GLOBAL': 150,
- 'LOAD_NAME': 153,
- 'LOAD_SUPER_ATTR': 154,
- 'MAKE_CELL': 157,
- 'MAP_ADD': 158,
- 'MATCH_CLASS': 159,
- 'POP_JUMP_IF_FALSE': 160,
- 'POP_JUMP_IF_NONE': 161,
- 'POP_JUMP_IF_NOT_NONE': 162,
- 'POP_JUMP_IF_TRUE': 163,
- 'RAISE_VARARGS': 164,
- 'RERAISE': 165,
- 'RESUME': 166,
- 'RETURN_CONST': 167,
- 'SEND': 168,
- 'SET_ADD': 170,
- 'SET_FUNCTION_ATTRIBUTE': 171,
- 'SET_UPDATE': 172,
- 'STORE_ATTR': 173,
- 'STORE_DEREF': 175,
- 'STORE_FAST': 176,
- 'STORE_FAST_LOAD_FAST': 177,
- 'STORE_FAST_STORE_FAST': 178,
- 'STORE_GLOBAL': 179,
- 'STORE_NAME': 180,
- 'SWAP': 181,
- 'UNPACK_EX': 182,
- 'UNPACK_SEQUENCE': 183,
- 'YIELD_VALUE': 187,
- 'INSTRUMENTED_RESUME': 237,
- 'INSTRUMENTED_END_FOR': 238,
- 'INSTRUMENTED_END_SEND': 239,
- 'INSTRUMENTED_RETURN_VALUE': 240,
- 'INSTRUMENTED_RETURN_CONST': 241,
- 'INSTRUMENTED_YIELD_VALUE': 242,
- 'INSTRUMENTED_LOAD_SUPER_ATTR': 243,
- 'INSTRUMENTED_FOR_ITER': 244,
- 'INSTRUMENTED_CALL': 245,
+ 'GET_ANEXT': 18,
+ 'GET_ITER': 19,
+ 'GET_LEN': 20,
+ 'GET_YIELD_FROM_ITER': 21,
+ 'INTERPRETER_EXIT': 22,
+ 'LOAD_ASSERTION_ERROR': 23,
+ 'LOAD_BUILD_CLASS': 24,
+ 'LOAD_LOCALS': 25,
+ 'MAKE_FUNCTION': 26,
+ 'MATCH_KEYS': 27,
+ 'MATCH_MAPPING': 28,
+ 'MATCH_SEQUENCE': 29,
+ 'NOP': 30,
+ 'POP_EXCEPT': 31,
+ 'POP_TOP': 32,
+ 'PUSH_EXC_INFO': 33,
+ 'PUSH_NULL': 34,
+ 'RETURN_GENERATOR': 35,
+ 'RETURN_VALUE': 36,
+ 'SETUP_ANNOTATIONS': 37,
+ 'STORE_SLICE': 38,
+ 'STORE_SUBSCR': 39,
+ 'TO_BOOL': 40,
+ 'UNARY_INVERT': 41,
+ 'UNARY_NEGATIVE': 42,
+ 'UNARY_NOT': 43,
+ 'WITH_EXCEPT_START': 44,
+ 'BINARY_OP': 45,
+ 'BUILD_CONST_KEY_MAP': 46,
+ 'BUILD_LIST': 47,
+ 'BUILD_MAP': 48,
+ 'BUILD_SET': 49,
+ 'BUILD_SLICE': 50,
+ 'BUILD_STRING': 51,
+ 'BUILD_TUPLE': 52,
+ 'CALL': 53,
+ 'CALL_FUNCTION_EX': 54,
+ 'CALL_INTRINSIC_1': 55,
+ 'CALL_INTRINSIC_2': 56,
+ 'CALL_KW': 57,
+ 'COMPARE_OP': 58,
+ 'CONTAINS_OP': 59,
+ 'CONVERT_VALUE': 60,
+ 'COPY': 61,
+ 'COPY_FREE_VARS': 62,
+ 'DELETE_ATTR': 63,
+ 'DELETE_DEREF': 64,
+ 'DELETE_FAST': 65,
+ 'DELETE_GLOBAL': 66,
+ 'DELETE_NAME': 67,
+ 'DICT_MERGE': 68,
+ 'DICT_UPDATE': 69,
+ 'ENTER_EXECUTOR': 70,
+ 'EXTENDED_ARG': 71,
+ 'FOR_ITER': 72,
+ 'GET_AWAITABLE': 73,
+ 'IMPORT_FROM': 74,
+ 'IMPORT_NAME': 75,
+ 'IS_OP': 76,
+ 'JUMP_BACKWARD': 77,
+ 'JUMP_BACKWARD_NO_INTERRUPT': 78,
+ 'JUMP_FORWARD': 79,
+ 'LIST_APPEND': 80,
+ 'LIST_EXTEND': 81,
+ 'LOAD_ATTR': 82,
+ 'LOAD_CONST': 83,
+ 'LOAD_DEREF': 84,
+ 'LOAD_FAST': 85,
+ 'LOAD_FAST_AND_CLEAR': 86,
+ 'LOAD_FAST_CHECK': 87,
+ 'LOAD_FAST_LOAD_FAST': 88,
+ 'LOAD_FROM_DICT_OR_DEREF': 89,
+ 'LOAD_FROM_DICT_OR_GLOBALS': 90,
+ 'LOAD_GLOBAL': 91,
+ 'LOAD_NAME': 92,
+ 'LOAD_SUPER_ATTR': 93,
+ 'MAKE_CELL': 94,
+ 'MAP_ADD': 95,
+ 'MATCH_CLASS': 96,
+ 'POP_JUMP_IF_FALSE': 97,
+ 'POP_JUMP_IF_NONE': 98,
+ 'POP_JUMP_IF_NOT_NONE': 99,
+ 'POP_JUMP_IF_TRUE': 100,
+ 'RAISE_VARARGS': 101,
+ 'RERAISE': 102,
+ 'RETURN_CONST': 103,
+ 'SEND': 104,
+ 'SET_ADD': 105,
+ 'SET_FUNCTION_ATTRIBUTE': 106,
+ 'SET_UPDATE': 107,
+ 'STORE_ATTR': 108,
+ 'STORE_DEREF': 109,
+ 'STORE_FAST': 110,
+ 'STORE_FAST_LOAD_FAST': 111,
+ 'STORE_FAST_STORE_FAST': 112,
+ 'STORE_GLOBAL': 113,
+ 'STORE_NAME': 114,
+ 'SWAP': 115,
+ 'UNPACK_EX': 116,
+ 'UNPACK_SEQUENCE': 117,
+ 'YIELD_VALUE': 118,
+ 'RESUME': 149,
+ 'INSTRUMENTED_RESUME': 236,
+ 'INSTRUMENTED_END_FOR': 237,
+ 'INSTRUMENTED_END_SEND': 238,
+ 'INSTRUMENTED_RETURN_VALUE': 239,
+ 'INSTRUMENTED_RETURN_CONST': 240,
+ 'INSTRUMENTED_YIELD_VALUE': 241,
+ 'INSTRUMENTED_LOAD_SUPER_ATTR': 242,
+ 'INSTRUMENTED_FOR_ITER': 243,
+ 'INSTRUMENTED_CALL': 244,
+ 'INSTRUMENTED_CALL_KW': 245,
'INSTRUMENTED_CALL_FUNCTION_EX': 246,
'INSTRUMENTED_INSTRUCTION': 247,
'INSTRUMENTED_JUMP_FORWARD': 248,
@@ -326,5 +331,5 @@
'SETUP_WITH': 266,
'STORE_FAST_MAYBE_NULL': 267,
}
-MIN_INSTRUMENTED_OPCODE = 237
-HAVE_ARGUMENT = 67
+MIN_INSTRUMENTED_OPCODE = 236
+HAVE_ARGUMENT = 45
diff --git a/Lib/_pydatetime.py b/Lib/_pydatetime.py
index 549fcda19dccf2..88275481e7002b 100644
--- a/Lib/_pydatetime.py
+++ b/Lib/_pydatetime.py
@@ -1015,13 +1015,9 @@ def fromisocalendar(cls, year, week, day):
def __repr__(self):
"""Convert to formal string, for repr().
- >>> dt = datetime(2010, 1, 1)
- >>> repr(dt)
- 'datetime.datetime(2010, 1, 1, 0, 0)'
-
- >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
- >>> repr(dt)
- 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
+ >>> d = date(2010, 1, 1)
+ >>> repr(d)
+ 'datetime.date(2010, 1, 1)'
"""
return "%s.%s(%d, %d, %d)" % (_get_class_module(self),
self.__class__.__qualname__,
@@ -1112,6 +1108,8 @@ def replace(self, year=None, month=None, day=None):
day = self._day
return type(self)(year, month, day)
+ __replace__ = replace
+
# Comparisons of date objects with other.
def __eq__(self, other):
@@ -1236,7 +1234,7 @@ def __reduce__(self):
class tzinfo:
"""Abstract base class for time zone info classes.
- Subclasses must override the name(), utcoffset() and dst() methods.
+ Subclasses must override the tzname(), utcoffset() and dst() methods.
"""
__slots__ = ()
@@ -1637,6 +1635,8 @@ def replace(self, hour=None, minute=None, second=None, microsecond=None,
fold = self._fold
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold)
+ __replace__ = replace
+
# Pickle support.
def _getstate(self, protocol=3):
@@ -1983,6 +1983,8 @@ def replace(self, year=None, month=None, day=None, hour=None,
return type(self)(year, month, day, hour, minute, second,
microsecond, tzinfo, fold=fold)
+ __replace__ = replace
+
def _local_timezone(self):
if self.tzinfo is None:
ts = self._mktime()
diff --git a/Lib/ast.py b/Lib/ast.py
index 45b95963f81885..1f54309c8450d8 100644
--- a/Lib/ast.py
+++ b/Lib/ast.py
@@ -1225,17 +1225,7 @@ def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES):
def visit_JoinedStr(self, node):
self.write("f")
- if self._avoid_backslashes:
- with self.buffered() as buffer:
- self._write_fstring_inner(node)
- return self._write_str_avoiding_backslashes("".join(buffer))
-
- # If we don't need to avoid backslashes globally (i.e., we only need
- # to avoid them inside FormattedValues), it's cosmetically preferred
- # to use escaped whitespace. That is, it's preferred to use backslashes
- # for cases like: f"{x}\n". To accomplish this, we keep track of what
- # in our buffer corresponds to FormattedValues and what corresponds to
- # Constant parts of the f-string, and allow escapes accordingly.
+
fstring_parts = []
for value in node.values:
with self.buffered() as buffer:
@@ -1246,14 +1236,36 @@ def visit_JoinedStr(self, node):
new_fstring_parts = []
quote_types = list(_ALL_QUOTES)
+ fallback_to_repr = False
for value, is_constant in fstring_parts:
- value, quote_types = self._str_literal_helper(
- value,
- quote_types=quote_types,
- escape_special_whitespace=is_constant,
- )
+ if is_constant:
+ value, new_quote_types = self._str_literal_helper(
+ value,
+ quote_types=quote_types,
+ escape_special_whitespace=True,
+ )
+ if set(new_quote_types).isdisjoint(quote_types):
+ fallback_to_repr = True
+ break
+ quote_types = new_quote_types
+ elif "\n" in value:
+ quote_types = [q for q in quote_types if q in _MULTI_QUOTES]
+ assert quote_types
new_fstring_parts.append(value)
+ if fallback_to_repr:
+ # If we weren't able to find a quote type that works for all parts
+ # of the JoinedStr, fallback to using repr and triple single quotes.
+ quote_types = ["'''"]
+ new_fstring_parts.clear()
+ for value, is_constant in fstring_parts:
+ if is_constant:
+ value = repr('"' + value) # force repr to use single quotes
+ expected_prefix = "'\""
+ assert value.startswith(expected_prefix), repr(value)
+ value = value[len(expected_prefix):-1]
+ new_fstring_parts.append(value)
+
value = "".join(new_fstring_parts)
quote_type = quote_types[0]
self.write(f"{quote_type}{value}{quote_type}")
@@ -1273,16 +1285,12 @@ def _write_fstring_inner(self, node):
def visit_FormattedValue(self, node):
def unparse_inner(inner):
- unparser = type(self)(_avoid_backslashes=True)
+ unparser = type(self)()
unparser.set_precedence(_Precedence.TEST.next(), inner)
return unparser.visit(inner)
with self.delimit("{", "}"):
expr = unparse_inner(node.value)
- if "\\" in expr:
- raise ValueError(
- "Unable to avoid backslash in f-string expression part"
- )
if expr.startswith("{"):
# Separate pair of opening brackets as "{ {"
self.write(" ")
diff --git a/Lib/asyncio/sslproto.py b/Lib/asyncio/sslproto.py
index 488e17d8bccd5b..3eb65a8a08b5a0 100644
--- a/Lib/asyncio/sslproto.py
+++ b/Lib/asyncio/sslproto.py
@@ -539,7 +539,7 @@ def _start_handshake(self):
# start handshake timeout count down
self._handshake_timeout_handle = \
self._loop.call_later(self._ssl_handshake_timeout,
- lambda: self._check_handshake_timeout())
+ self._check_handshake_timeout)
self._do_handshake()
@@ -619,7 +619,7 @@ def _start_shutdown(self):
self._set_state(SSLProtocolState.FLUSHING)
self._shutdown_timeout_handle = self._loop.call_later(
self._ssl_shutdown_timeout,
- lambda: self._check_shutdown_timeout()
+ self._check_shutdown_timeout
)
self._do_flush()
@@ -758,7 +758,7 @@ def _do_read__buffered(self):
else:
break
else:
- self._loop.call_soon(lambda: self._do_read())
+ self._loop.call_soon(self._do_read)
except SSLAgainErrors:
pass
if offset > 0:
diff --git a/Lib/asyncio/subprocess.py b/Lib/asyncio/subprocess.py
index c4e5ba2061cffc..043359bbd03f8a 100644
--- a/Lib/asyncio/subprocess.py
+++ b/Lib/asyncio/subprocess.py
@@ -147,15 +147,17 @@ def kill(self):
async def _feed_stdin(self, input):
debug = self._loop.get_debug()
- if input is not None:
- self.stdin.write(input)
- if debug:
- logger.debug(
- '%r communicate: feed stdin (%s bytes)', self, len(input))
try:
+ if input is not None:
+ self.stdin.write(input)
+ if debug:
+ logger.debug(
+ '%r communicate: feed stdin (%s bytes)', self, len(input))
+
await self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
- # communicate() ignores BrokenPipeError and ConnectionResetError
+ # communicate() ignores BrokenPipeError and ConnectionResetError.
+ # write() and drain() can raise these exceptions.
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
diff --git a/Lib/asyncio/tasks.py b/Lib/asyncio/tasks.py
index edc64fda2a6ad6..21a1b24194bcd8 100644
--- a/Lib/asyncio/tasks.py
+++ b/Lib/asyncio/tasks.py
@@ -17,7 +17,6 @@
import itertools
import math
import types
-import warnings
import weakref
from types import GenericAlias
diff --git a/Lib/asyncio/unix_events.py b/Lib/asyncio/unix_events.py
index a2680865ed968f..28cef964debd36 100644
--- a/Lib/asyncio/unix_events.py
+++ b/Lib/asyncio/unix_events.py
@@ -226,8 +226,7 @@ async def _make_subprocess_transport(self, protocol, args, shell,
return transp
def _child_watcher_callback(self, pid, returncode, transp):
- # Skip one iteration for callbacks to be executed
- self.call_soon_threadsafe(self.call_soon, transp._process_exited, returncode)
+ self.call_soon_threadsafe(transp._process_exited, returncode)
async def create_unix_connection(
self, protocol_factory, path=None, *,
diff --git a/Lib/calendar.py b/Lib/calendar.py
index e43ba4a078bcac..2a4deb70a0111f 100644
--- a/Lib/calendar.py
+++ b/Lib/calendar.py
@@ -721,7 +721,7 @@ def main(args=None):
parser.add_argument(
"-L", "--locale",
default=None,
- help="locale to be used from month and weekday names"
+ help="locale to use for month and weekday names"
)
parser.add_argument(
"-e", "--encoding",
diff --git a/Lib/codecs.py b/Lib/codecs.py
index c1c55d8afef389..82f23983e719c2 100644
--- a/Lib/codecs.py
+++ b/Lib/codecs.py
@@ -414,6 +414,9 @@ def __enter__(self):
def __exit__(self, type, value, tb):
self.stream.close()
+ def __reduce_ex__(self, proto):
+ raise TypeError("can't serialize %s" % self.__class__.__name__)
+
###
class StreamReader(Codec):
@@ -663,6 +666,9 @@ def __enter__(self):
def __exit__(self, type, value, tb):
self.stream.close()
+ def __reduce_ex__(self, proto):
+ raise TypeError("can't serialize %s" % self.__class__.__name__)
+
###
class StreamReaderWriter:
@@ -750,6 +756,9 @@ def __enter__(self):
def __exit__(self, type, value, tb):
self.stream.close()
+ def __reduce_ex__(self, proto):
+ raise TypeError("can't serialize %s" % self.__class__.__name__)
+
###
class StreamRecoder:
@@ -866,6 +875,9 @@ def __enter__(self):
def __exit__(self, type, value, tb):
self.stream.close()
+ def __reduce_ex__(self, proto):
+ raise TypeError("can't serialize %s" % self.__class__.__name__)
+
### Shortcuts
def open(filename, mode='r', encoding=None, errors='strict', buffering=-1):
diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py
index 8652dc8a4ec450..a461550ea40da7 100644
--- a/Lib/collections/__init__.py
+++ b/Lib/collections/__init__.py
@@ -495,6 +495,7 @@ def __getnewargs__(self):
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
+ '__replace__': _replace,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py
index 301207f59de37a..48d8db3ed423a5 100644
--- a/Lib/concurrent/futures/process.py
+++ b/Lib/concurrent/futures/process.py
@@ -71,6 +71,11 @@ def __init__(self):
self._reader, self._writer = mp.Pipe(duplex=False)
def close(self):
+ # Please note that we do not take the shutdown lock when
+ # calling clear() (to avoid deadlocking) so this method can
+ # only be called safely from the same thread as all calls to
+ # clear() even if you hold the shutdown lock. Otherwise we
+ # might try to read from the closed pipe.
if not self._closed:
self._closed = True
self._writer.close()
@@ -426,8 +431,12 @@ def wait_result_broken_or_wakeup(self):
elif wakeup_reader in ready:
is_broken = False
- with self.shutdown_lock:
- self.thread_wakeup.clear()
+ # No need to hold the _shutdown_lock here because:
+ # 1. we're the only thread to use the wakeup reader
+ # 2. we're also the only thread to call thread_wakeup.close()
+ # 3. we want to avoid a possible deadlock when both reader and writer
+ # would block (gh-105829)
+ self.thread_wakeup.clear()
return result_item, is_broken, cause
@@ -489,7 +498,14 @@ def terminate_broken(self, cause):
# Mark pending tasks as failed.
for work_id, work_item in self.pending_work_items.items():
- work_item.future.set_exception(bpe)
+ try:
+ work_item.future.set_exception(bpe)
+ except _base.InvalidStateError as exc:
+ # set_exception() fails if the future is cancelled: ignore it.
+ # Trying to check if the future is cancelled before calling
+ # set_exception() would leave a race condition if the future is
+ # cancelled between the check and set_exception().
+ pass
# Delete references to object. See issue16284
del work_item
self.pending_work_items.clear()
@@ -503,6 +519,10 @@ def terminate_broken(self, cause):
# https://github.com/python/cpython/issues/94777
self.call_queue._reader.close()
+ # gh-107219: Close the connection writer which can unblock
+ # Queue._feed() if it was stuck in send_bytes().
+ self.call_queue._writer.close()
+
# clean up resources
self.join_executor_internals()
@@ -706,7 +726,10 @@ def __init__(self, max_workers=None, mp_context=None,
# as it could result in a deadlock if a worker process dies with the
# _result_queue write lock still acquired.
#
- # _shutdown_lock must be locked to access _ThreadWakeup.
+ # _shutdown_lock must be locked to access _ThreadWakeup.close() and
+ # .wakeup(). Care must also be taken to not call clear or close from
+ # more than one thread since _ThreadWakeup.clear() is not protected by
+ # the _shutdown_lock
self._executor_manager_thread_wakeup = _ThreadWakeup()
# Create communication channels for the executor
diff --git a/Lib/copy.py b/Lib/copy.py
index da2908ef623d8c..6d7bb9a111b5b4 100644
--- a/Lib/copy.py
+++ b/Lib/copy.py
@@ -290,3 +290,16 @@ def _reconstruct(x, memo, func, args,
return y
del types, weakref
+
+
+def replace(obj, /, **changes):
+ """Return a new object replacing specified fields with new values.
+
+ This is especially useful for immutable objects, like named tuples or
+ frozen dataclasses.
+ """
+ cls = obj.__class__
+ func = getattr(cls, '__replace__', None)
+ if func is None:
+ raise TypeError(f"replace() does not support {cls.__name__} objects")
+ return func(obj, **changes)
diff --git a/Lib/dataclasses.py b/Lib/dataclasses.py
index 21f3fa5c213f1f..84f8d68ce092a4 100644
--- a/Lib/dataclasses.py
+++ b/Lib/dataclasses.py
@@ -1073,6 +1073,7 @@ def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen,
globals,
slots,
))
+ _set_new_attribute(cls, '__replace__', _replace)
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
@@ -1546,13 +1547,15 @@ class C:
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
+ if not _is_dataclass_instance(obj):
+ raise TypeError("replace() should be called on dataclass instances")
+ return _replace(obj, **changes)
+
+def _replace(obj, /, **changes):
# We're going to mutate 'changes', but that's okay because it's a
# new dict, even if called with 'replace(obj, **my_changes)'.
- if not _is_dataclass_instance(obj):
- raise TypeError("replace() should be called on dataclass instances")
-
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
diff --git a/Lib/enum.py b/Lib/enum.py
index 4b99e7bda2cca5..f5448a1788e4d2 100644
--- a/Lib/enum.py
+++ b/Lib/enum.py
@@ -1,8 +1,6 @@
import sys
import builtins as bltns
from types import MappingProxyType, DynamicClassAttribute
-from operator import or_ as _or_
-from functools import reduce
__all__ = [
@@ -730,6 +728,11 @@ def __call__(cls, value, names=None, *values, module=None, qualname=None, type=N
value = (value, names) + values
return cls.__new__(cls, value)
# otherwise, functional API: we're creating a new Enum type
+ if names is None and type is None:
+ # no body? no data-type? possibly wrong usage
+ raise TypeError(
+ f"{cls} has no members; specify `names=()` if you meant to create a new, empty, enum"
+ )
return cls._create_(
class_name=value,
names=names,
@@ -1879,7 +1882,8 @@ def __call__(self, enumeration):
missed = [v for v in values if v not in member_values]
if missed:
missing_names.append(name)
- missing_value |= reduce(_or_, missed)
+ for val in missed:
+ missing_value |= val
if missing_names:
if len(missing_names) == 1:
alias = 'alias %s is missing' % missing_names[0]
diff --git a/Lib/functools.py b/Lib/functools.py
index a2fc28779dbddc..55990e742bf23f 100644
--- a/Lib/functools.py
+++ b/Lib/functools.py
@@ -19,8 +19,9 @@
# import types, weakref # Deferred to single_dispatch()
from reprlib import recursive_repr
from _thread import RLock
-from types import GenericAlias
+# Avoid importing types, so we can speedup import time
+GenericAlias = type(list[int])
################################################################################
### update_wrapper() and wraps() decorator
@@ -236,7 +237,7 @@ def __ge__(self, other):
def reduce(function, sequence, initial=_initial_missing):
"""
- reduce(function, iterable[, initial]) -> value
+ reduce(function, iterable[, initial], /) -> value
Apply a function of two arguments cumulatively to the items of a sequence
or iterable, from left to right, so as to reduce the iterable to a single
diff --git a/Lib/http/server.py b/Lib/http/server.py
index ca6240d9a921e6..ee7a9b6aa55b88 100644
--- a/Lib/http/server.py
+++ b/Lib/http/server.py
@@ -2,18 +2,18 @@
Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see
SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST,
-and CGIHTTPRequestHandler for CGI scripts.
+and (deprecated) CGIHTTPRequestHandler for CGI scripts.
-It does, however, optionally implement HTTP/1.1 persistent connections,
-as of version 0.3.
+It does, however, optionally implement HTTP/1.1 persistent connections.
Notes on CGIHTTPRequestHandler
------------------------------
-This class implements GET and POST requests to cgi-bin scripts.
+This class is deprecated. It implements GET and POST requests to cgi-bin scripts.
-If the os.fork() function is not present (e.g. on Windows),
-subprocess.Popen() is used as a fallback, with slightly altered semantics.
+If the os.fork() function is not present (Windows), subprocess.Popen() is used,
+with slightly altered but never documented semantics. Use from a threaded
+process is likely to trigger a warning at os.fork() time.
In all cases, the implementation is intentionally naive -- all
requests are executed synchronously.
@@ -986,6 +986,12 @@ class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
"""
+ def __init__(self, *args, **kwargs):
+ import warnings
+ warnings._deprecated("http.server.CGIHTTPRequestHandler",
+ remove=(3, 15))
+ super().__init__(*args, **kwargs)
+
# Determine platform specifics
have_fork = hasattr(os, 'fork')
diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py
index 0717e202ecbcde..0019897c943e14 100644
--- a/Lib/importlib/_bootstrap_external.py
+++ b/Lib/importlib/_bootstrap_external.py
@@ -455,6 +455,10 @@ def _write_atomic(path, data, mode=0o666):
# Python 3.13a1 3557 (Make the conversion to boolean in jumps explicit)
# Python 3.13a1 3558 (Reorder the stack items for CALL)
# Python 3.13a1 3559 (Generate opcode IDs from bytecodes.c)
+# Python 3.13a1 3560 (Add RESUME_CHECK instruction)
+# Python 3.13a1 3561 (Add cache entry to branch instructions)
+# Python 3.13a1 3562 (Assign opcode IDs for internal ops in separate range)
+# Python 3.13a1 3563 (Add CALL_KW and remove KW_NAMES)
# Python 3.14 will start with 3600
@@ -471,7 +475,7 @@ def _write_atomic(path, data, mode=0o666):
# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array
# in PC/launcher.c must also be updated.
-MAGIC_NUMBER = (3559).to_bytes(2, 'little') + b'\r\n'
+MAGIC_NUMBER = (3563).to_bytes(2, 'little') + b'\r\n'
_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c
diff --git a/Lib/inspect.py b/Lib/inspect.py
index c8211833dd0831..aaa22bef896602 100644
--- a/Lib/inspect.py
+++ b/Lib/inspect.py
@@ -2870,6 +2870,8 @@ def __str__(self):
return formatted
+ __replace__ = replace
+
def __repr__(self):
return '<{} "{}">'.format(self.__class__.__name__, self)
@@ -3130,6 +3132,8 @@ def replace(self, *, parameters=_void, return_annotation=_void):
return type(self)(parameters,
return_annotation=return_annotation)
+ __replace__ = replace
+
def _hash_basis(self):
params = tuple(param for param in self.parameters.values()
if param.kind != _KEYWORD_ONLY)
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py
index 04eaea811cfbbe..7c425a2d8e7034 100644
--- a/Lib/multiprocessing/connection.py
+++ b/Lib/multiprocessing/connection.py
@@ -9,6 +9,7 @@
__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
+import errno
import io
import os
import sys
@@ -41,6 +42,7 @@
BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.
+WSA_OPERATION_ABORTED = 995
_mmap_counter = itertools.count()
@@ -271,12 +273,22 @@ class PipeConnection(_ConnectionBase):
with FILE_FLAG_OVERLAPPED.
"""
_got_empty_message = False
+ _send_ov = None
def _close(self, _CloseHandle=_winapi.CloseHandle):
+ ov = self._send_ov
+ if ov is not None:
+ # Interrupt WaitForMultipleObjects() in _send_bytes()
+ ov.cancel()
_CloseHandle(self._handle)
def _send_bytes(self, buf):
+ if self._send_ov is not None:
+ # A connection should only be used by a single thread
+ raise ValueError("concurrent send_bytes() calls "
+ "are not supported")
ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
+ self._send_ov = ov
try:
if err == _winapi.ERROR_IO_PENDING:
waitres = _winapi.WaitForMultipleObjects(
@@ -286,7 +298,13 @@ def _send_bytes(self, buf):
ov.cancel()
raise
finally:
+ self._send_ov = None
nwritten, err = ov.GetOverlappedResult(True)
+ if err == WSA_OPERATION_ABORTED:
+ # close() was called by another thread while
+ # WaitForMultipleObjects() was waiting for the overlapped
+ # operation.
+ raise OSError(errno.EPIPE, "handle is closed")
assert err == 0
assert nwritten == len(buf)
diff --git a/Lib/opcode.py b/Lib/opcode.py
index 386a2fba396a6a..88f4df7c0e8c38 100644
--- a/Lib/opcode.py
+++ b/Lib/opcode.py
@@ -93,6 +93,18 @@
"counter": 1,
"version": 2,
},
+ "POP_JUMP_IF_TRUE": {
+ "counter": 1,
+ },
+ "POP_JUMP_IF_FALSE": {
+ "counter": 1,
+ },
+ "POP_JUMP_IF_NONE": {
+ "counter": 1,
+ },
+ "POP_JUMP_IF_NOT_NONE": {
+ "counter": 1,
+ },
}
_inline_cache_entries = {
diff --git a/Lib/pdb.py b/Lib/pdb.py
index 90f26a2eb99848..fd62d246f124ab 100755
--- a/Lib/pdb.py
+++ b/Lib/pdb.py
@@ -237,6 +237,9 @@ def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None,
pass
self.allow_kbdint = False
self.nosigint = nosigint
+ # Consider these characters as part of the command so when the users type
+ # c.a or c['a'], it won't be recognized as a c(ontinue) command
+ self.identchars = cmd.Cmd.identchars + '=.[](),"\'+-*/%@&|<>~^'
# Read ~/.pdbrc and ./.pdbrc
self.rcLines = []
@@ -491,6 +494,8 @@ def interaction(self, frame, tb_or_exc):
Pdb._previous_sigint_handler = None
_chained_exceptions, tb = self._get_tb_and_exceptions(tb_or_exc)
+ if isinstance(tb_or_exc, BaseException):
+ assert tb is not None, "main exception must have a traceback"
with self._hold_exceptions(_chained_exceptions):
if self.setup(frame, tb):
# no interaction desired at this time (happens if .pdbrc contains
@@ -509,6 +514,22 @@ def displayhook(self, obj):
if obj is not None:
self.message(repr(obj))
+ @contextmanager
+ def _disable_tab_completion(self):
+ if self.use_rawinput and self.completekey == 'tab':
+ try:
+ import readline
+ except ImportError:
+ yield
+ return
+ try:
+ readline.parse_and_bind('tab: self-insert')
+ yield
+ finally:
+ readline.parse_and_bind('tab: complete')
+ else:
+ yield
+
def default(self, line):
if line[:1] == '!': line = line[1:].strip()
locals = self.curframe_locals
@@ -516,28 +537,29 @@ def default(self, line):
try:
if (code := codeop.compile_command(line + '\n', '', 'single')) is None:
# Multi-line mode
- buffer = line
- continue_prompt = "... "
- while (code := codeop.compile_command(buffer, '', 'single')) is None:
- if self.use_rawinput:
- try:
- line = input(continue_prompt)
- except (EOFError, KeyboardInterrupt):
- self.lastcmd = ""
- print('\n')
- return
- else:
- self.stdout.write(continue_prompt)
- self.stdout.flush()
- line = self.stdin.readline()
- if not len(line):
- self.lastcmd = ""
- self.stdout.write('\n')
- self.stdout.flush()
- return
+ with self._disable_tab_completion():
+ buffer = line
+ continue_prompt = "... "
+ while (code := codeop.compile_command(buffer, '', 'single')) is None:
+ if self.use_rawinput:
+ try:
+ line = input(continue_prompt)
+ except (EOFError, KeyboardInterrupt):
+ self.lastcmd = ""
+ print('\n')
+ return
else:
- line = line.rstrip('\r\n')
- buffer += '\n' + line
+ self.stdout.write(continue_prompt)
+ self.stdout.flush()
+ line = self.stdin.readline()
+ if not len(line):
+ self.lastcmd = ""
+ self.stdout.write('\n')
+ self.stdout.flush()
+ return
+ else:
+ line = line.rstrip('\r\n')
+ buffer += '\n' + line
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
@@ -1166,7 +1188,12 @@ def do_exceptions(self, arg):
rep = repr(exc)
if len(rep) > 80:
rep = rep[:77] + "..."
- self.message(f"{prompt} {ix:>3} {rep}")
+ indicator = (
+ " -"
+ if self._chained_exceptions[ix].__traceback__ is None
+ else f"{ix:>3}"
+ )
+ self.message(f"{prompt} {indicator} {rep}")
else:
try:
number = int(arg)
@@ -1174,6 +1201,10 @@ def do_exceptions(self, arg):
self.error("Argument must be an integer")
return
if 0 <= number < len(self._chained_exceptions):
+ if self._chained_exceptions[number].__traceback__ is None:
+ self.error("This exception does not have a traceback, cannot jump to it")
+ return
+
self._chained_exception_index = number
self.setup(None, self._chained_exceptions[number].__traceback__)
self.print_stack_entry(self.stack[self.curindex])
@@ -1722,8 +1753,11 @@ def do_alias(self, arg):
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
- if args[0] in self.aliases and len(args) == 1:
- self.message("%s = %s" % (args[0], self.aliases[args[0]]))
+ if len(args) == 1:
+ if args[0] in self.aliases:
+ self.message("%s = %s" % (args[0], self.aliases[args[0]]))
+ else:
+ self.error(f"Unknown alias '{args[0]}'")
else:
self.aliases[args[0]] = ' '.join(args[1:])
@@ -2007,19 +2041,27 @@ def post_mortem(t=None):
If `t` is an exception object, the `exceptions` command makes it possible to
list and inspect its chained exceptions (if any).
"""
+ return _post_mortem(t, Pdb())
+
+
+def _post_mortem(t, pdb_instance):
+ """
+ Private version of post_mortem, which allow to pass a pdb instance
+ for testing purposes.
+ """
# handling the default
if t is None:
exc = sys.exception()
if exc is not None:
t = exc.__traceback__
- if t is None:
+ if t is None or (isinstance(t, BaseException) and t.__traceback__ is None):
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
- p = Pdb()
- p.reset()
- p.interaction(None, t)
+ pdb_instance.reset()
+ pdb_instance.interaction(None, t)
+
def pm():
"""Enter post-mortem debugging of the traceback found in sys.last_exc."""
@@ -2039,8 +2081,6 @@ def help():
pydoc.pager(__doc__)
_usage = """\
-usage: pdb.py [-c command] ... [-m module | pyfile] [arg] ...
-
Debug the Python program given by pyfile. Alternatively,
an executable module or package to debug can be specified using
the -m switch.
@@ -2055,34 +2095,44 @@ def help():
def main():
- import getopt
-
- opts, args = getopt.getopt(sys.argv[1:], 'mhc:', ['help', 'command='])
-
- if not args:
- print(_usage)
+ import argparse
+
+ parser = argparse.ArgumentParser(prog="pdb",
+ description=_usage,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ allow_abbrev=False)
+
+ parser.add_argument('-c', '--command', action='append', default=[], metavar='command')
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('-m', metavar='module')
+ group.add_argument('pyfile', nargs='?')
+ parser.add_argument('args', nargs="*")
+
+ if len(sys.argv) == 1:
+ # If no arguments were given (python -m pdb), print the whole help message.
+ # Without this check, argparse would only complain about missing required arguments.
+ parser.print_help()
sys.exit(2)
- if any(opt in ['-h', '--help'] for opt, optarg in opts):
- print(_usage)
- sys.exit()
-
- commands = [optarg for opt, optarg in opts if opt in ['-c', '--command']]
+ opts = parser.parse_args()
- module_indicated = any(opt in ['-m'] for opt, optarg in opts)
- cls = _ModuleTarget if module_indicated else _ScriptTarget
- target = cls(args[0])
+ if opts.m:
+ file = opts.m
+ target = _ModuleTarget(file)
+ else:
+ file = opts.pyfile
+ target = _ScriptTarget(file)
target.check()
- sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
+ sys.argv[:] = [file] + opts.args # Hide "pdb.py" and pdb options from argument list
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
- pdb.rcLines.extend(commands)
+ pdb.rcLines.extend(opts.command)
while True:
try:
pdb._run(target)
diff --git a/Lib/pickle.py b/Lib/pickle.py
index fe86f80f51d3b9..4f5ad5b71e8899 100644
--- a/Lib/pickle.py
+++ b/Lib/pickle.py
@@ -396,6 +396,8 @@ def decode_long(data):
return int.from_bytes(data, byteorder='little', signed=True)
+_NoValue = object()
+
# Pickling machinery
class _Pickler:
@@ -542,8 +544,8 @@ def save(self, obj, save_persistent_id=True):
return
rv = NotImplemented
- reduce = getattr(self, "reducer_override", None)
- if reduce is not None:
+ reduce = getattr(self, "reducer_override", _NoValue)
+ if reduce is not _NoValue:
rv = reduce(obj)
if rv is NotImplemented:
@@ -556,8 +558,8 @@ def save(self, obj, save_persistent_id=True):
# Check private dispatch table if any, or else
# copyreg.dispatch_table
- reduce = getattr(self, 'dispatch_table', dispatch_table).get(t)
- if reduce is not None:
+ reduce = getattr(self, 'dispatch_table', dispatch_table).get(t, _NoValue)
+ if reduce is not _NoValue:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular
@@ -567,12 +569,12 @@ def save(self, obj, save_persistent_id=True):
return
# Check for a __reduce_ex__ method, fall back to __reduce__
- reduce = getattr(obj, "__reduce_ex__", None)
- if reduce is not None:
+ reduce = getattr(obj, "__reduce_ex__", _NoValue)
+ if reduce is not _NoValue:
rv = reduce(self.proto)
else:
- reduce = getattr(obj, "__reduce__", None)
- if reduce is not None:
+ reduce = getattr(obj, "__reduce__", _NoValue)
+ if reduce is not _NoValue:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
@@ -1705,8 +1707,8 @@ def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
- setstate = getattr(inst, "__setstate__", None)
- if setstate is not None:
+ setstate = getattr(inst, "__setstate__", _NoValue)
+ if setstate is not _NoValue:
setstate(state)
return
slotstate = None
diff --git a/Lib/random.py b/Lib/random.py
index 586c3f7f9da938..84bbfc5df1bf23 100644
--- a/Lib/random.py
+++ b/Lib/random.py
@@ -827,7 +827,7 @@ def binomialvariate(self, n=1, p=0.5):
return k
# Acceptance-rejection test.
- # Note, the original paper errorneously omits the call to log(v)
+ # Note, the original paper erroneously omits the call to log(v)
# when comparing to the log of the rescaled binomial distribution.
if not setup_complete:
alpha = (2.83 + 5.1 / b) * spq
diff --git a/Lib/ssl.py b/Lib/ssl.py
index c4c5a4ca894ee5..62e55857141dfc 100644
--- a/Lib/ssl.py
+++ b/Lib/ssl.py
@@ -876,6 +876,31 @@ def getpeercert(self, binary_form=False):
"""
return self._sslobj.getpeercert(binary_form)
+ def get_verified_chain(self):
+ """Returns verified certificate chain provided by the other
+ end of the SSL channel as a list of DER-encoded bytes.
+
+ If certificate verification was disabled method acts the same as
+ ``SSLSocket.get_unverified_chain``.
+ """
+ chain = self._sslobj.get_verified_chain()
+
+ if chain is None:
+ return []
+
+ return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain]
+
+ def get_unverified_chain(self):
+ """Returns raw certificate chain provided by the other
+ end of the SSL channel as a list of DER-encoded bytes.
+ """
+ chain = self._sslobj.get_unverified_chain()
+
+ if chain is None:
+ return []
+
+ return [cert.public_bytes(_ssl.ENCODING_DER) for cert in chain]
+
def selected_npn_protocol(self):
"""Return the currently selected NPN protocol as a string, or ``None``
if a next protocol was not negotiated or if NPN is not supported by one
@@ -1129,6 +1154,14 @@ def getpeercert(self, binary_form=False):
self._check_connected()
return self._sslobj.getpeercert(binary_form)
+ @_sslcopydoc
+ def get_verified_chain(self):
+ return self._sslobj.get_verified_chain()
+
+ @_sslcopydoc
+ def get_unverified_chain(self):
+ return self._sslobj.get_unverified_chain()
+
@_sslcopydoc
def selected_npn_protocol(self):
self._checkClosed()
diff --git a/Lib/test/.ruff.toml b/Lib/test/.ruff.toml
new file mode 100644
index 00000000000000..e202766b147e6d
--- /dev/null
+++ b/Lib/test/.ruff.toml
@@ -0,0 +1,35 @@
+fix = true
+select = [
+ "F811", # Redefinition of unused variable (useful for finding test methods with the same name)
+]
+extend-exclude = [
+ # Excluded (these aren't actually executed, they're just "data files")
+ "tokenizedata/*.py",
+ # Failed to lint
+ "encoded_modules/module_iso_8859_1.py",
+ "encoded_modules/module_koi8_r.py",
+ # Failed to parse
+ "support/socket_helper.py",
+ "test_fstring.py",
+ # TODO Fix: F811 Redefinition of unused name
+ "test__opcode.py",
+ "test_buffer.py",
+ "test_ctypes/test_arrays.py",
+ "test_ctypes/test_functions.py",
+ "test_dataclasses/__init__.py",
+ "test_descr.py",
+ "test_enum.py",
+ "test_functools.py",
+ "test_genericclass.py",
+ "test_grammar.py",
+ "test_import/__init__.py",
+ "test_keywordonlyarg.py",
+ "test_pkg.py",
+ "test_subclassinit.py",
+ "test_typing.py",
+ "test_unittest/testmock/testpatch.py",
+ "test_yield_from.py",
+ "time_hashlib.py",
+ # Pending https://github.com/python/cpython/pull/109139
+ "test_monitoring.py",
+]
diff --git a/Lib/test/__main__.py b/Lib/test/__main__.py
index 19a6b2b8904526..e5780b784b4b05 100644
--- a/Lib/test/__main__.py
+++ b/Lib/test/__main__.py
@@ -1,2 +1,2 @@
-from test.libregrtest import main
+from test.libregrtest.main import main
main()
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index bcbb4c2929d69e..730b887dd4bcac 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -675,6 +675,7 @@ def test_close(self):
close_queue(q)
+ @support.requires_resource('walltime')
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
@@ -4991,6 +4992,7 @@ def test_wait_slow(self):
def test_wait_socket_slow(self):
self.test_wait_socket(True)
+ @support.requires_resource('walltime')
def test_wait_timeout(self):
from multiprocessing.connection import wait
@@ -5019,6 +5021,7 @@ def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
+ @support.requires_resource('walltime')
def test_wait_integer(self):
from multiprocessing.connection import wait
@@ -5469,7 +5472,9 @@ def test_nested_startmethod(self):
while not queue.empty():
results.append(queue.get())
- self.assertEqual(results, [2, 1])
+ # gh-109706: queue.put(1) can write into the queue before queue.put(2),
+ # there is no synchronization in the test.
+ self.assertSetEqual(set(results), set([2, 1]))
@unittest.skipIf(sys.platform == "win32",
diff --git a/Lib/test/audit-tests.py b/Lib/test/audit-tests.py
index ad8f72f556331d..f0cedde308d53b 100644
--- a/Lib/test/audit-tests.py
+++ b/Lib/test/audit-tests.py
@@ -186,7 +186,7 @@ class C(A):
)
-def test_open():
+def test_open(testfn):
# SSLContext.load_dh_params uses _Py_fopen_obj rather than normal open()
try:
import ssl
@@ -199,11 +199,11 @@ def test_open():
# All of them should fail
with TestHook(raise_on_events={"open"}) as hook:
for fn, *args in [
- (open, sys.argv[2], "r"),
+ (open, testfn, "r"),
(open, sys.executable, "rb"),
(open, 3, "wb"),
- (open, sys.argv[2], "w", -1, None, None, None, False, lambda *a: 1),
- (load_dh_params, sys.argv[2]),
+ (open, testfn, "w", -1, None, None, None, False, lambda *a: 1),
+ (load_dh_params, testfn),
]:
if not fn:
continue
@@ -216,11 +216,11 @@ def test_open():
[
i
for i in [
- (sys.argv[2], "r"),
+ (testfn, "r"),
(sys.executable, "r"),
(3, "w"),
- (sys.argv[2], "w"),
- (sys.argv[2], "rb") if load_dh_params else None,
+ (testfn, "w"),
+ (testfn, "rb") if load_dh_params else None,
]
if i is not None
],
@@ -289,7 +289,7 @@ def hook(event, args):
def test_unraisablehook():
- from _testcapi import write_unraisable_exc
+ from _testinternalcapi import write_unraisable_exc
def unraisablehook(hookargs):
pass
@@ -517,12 +517,15 @@ def test_not_in_gc():
assert hook not in o
-def test_time():
+def test_time(mode):
import time
def hook(event, args):
if event.startswith("time."):
- print(event, *args)
+ if mode == 'print':
+ print(event, *args)
+ elif mode == 'fail':
+ raise AssertionError('hook failed')
sys.addaudithook(hook)
time.sleep(0)
@@ -549,4 +552,4 @@ def hook(event, args):
suppress_msvcrt_asserts()
test = sys.argv[1]
- globals()[test]()
+ globals()[test](*sys.argv[2:])
diff --git a/Lib/test/autotest.py b/Lib/test/autotest.py
index fa85cc153a133a..b5a1fab404c72d 100644
--- a/Lib/test/autotest.py
+++ b/Lib/test/autotest.py
@@ -1,5 +1,5 @@
# This should be equivalent to running regrtest.py from the cmdline.
# It can be especially handy if you're in an interactive shell, e.g.,
# from test import autotest.
-from test.libregrtest import main
+from test.libregrtest.main import main
main()
diff --git a/Lib/test/badsyntax_future3.py b/Lib/test/badsyntax_future3.py
deleted file mode 100644
index f1c8417edaa297..00000000000000
--- a/Lib/test/badsyntax_future3.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This is a test"""
-from __future__ import nested_scopes
-from __future__ import rested_snopes
-
-def f(x):
- def g(y):
- return x + y
- return g
-
-result = f(2)(4)
diff --git a/Lib/test/badsyntax_future4.py b/Lib/test/badsyntax_future4.py
deleted file mode 100644
index b5f4c98e922ac2..00000000000000
--- a/Lib/test/badsyntax_future4.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This is a test"""
-import __future__
-from __future__ import nested_scopes
-
-def f(x):
- def g(y):
- return x + y
- return g
-
-result = f(2)(4)
diff --git a/Lib/test/badsyntax_future5.py b/Lib/test/badsyntax_future5.py
deleted file mode 100644
index 8a7e5fcb70ff2e..00000000000000
--- a/Lib/test/badsyntax_future5.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""This is a test"""
-from __future__ import nested_scopes
-import foo
-from __future__ import nested_scopes
-
-
-def f(x):
- def g(y):
- return x + y
- return g
-
-result = f(2)(4)
diff --git a/Lib/test/badsyntax_future6.py b/Lib/test/badsyntax_future6.py
deleted file mode 100644
index 5a8b55a02c41bf..00000000000000
--- a/Lib/test/badsyntax_future6.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This is a test"""
-"this isn't a doc string"
-from __future__ import nested_scopes
-
-def f(x):
- def g(y):
- return x + y
- return g
-
-result = f(2)(4)
diff --git a/Lib/test/badsyntax_future7.py b/Lib/test/badsyntax_future7.py
deleted file mode 100644
index 131db2c2164cf2..00000000000000
--- a/Lib/test/badsyntax_future7.py
+++ /dev/null
@@ -1,11 +0,0 @@
-"""This is a test"""
-
-from __future__ import nested_scopes; import string; from __future__ import \
- nested_scopes
-
-def f(x):
- def g(y):
- return x + y
- return g
-
-result = f(2)(4)
diff --git a/Lib/test/badsyntax_future8.py b/Lib/test/badsyntax_future8.py
deleted file mode 100644
index ca45289e2e5a4f..00000000000000
--- a/Lib/test/badsyntax_future8.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This is a test"""
-
-from __future__ import *
-
-def f(x):
- def g(y):
- return x + y
- return g
-
-print(f(2)(4))
diff --git a/Lib/test/badsyntax_future9.py b/Lib/test/badsyntax_future9.py
deleted file mode 100644
index 916de06ab71e97..00000000000000
--- a/Lib/test/badsyntax_future9.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""This is a test"""
-
-from __future__ import nested_scopes, braces
-
-def f(x):
- def g(y):
- return x + y
- return g
-
-print(f(2)(4))
diff --git a/Lib/test/bisect_cmd.py b/Lib/test/bisect_cmd.py
index 0bdd7a43c03f7b..5cb804bd469dc3 100755
--- a/Lib/test/bisect_cmd.py
+++ b/Lib/test/bisect_cmd.py
@@ -109,9 +109,10 @@ def parse_args():
def main():
args = parse_args()
- if '-w' in args.test_args or '--verbose2' in args.test_args:
- print("WARNING: -w/--verbose2 option should not be used to bisect!")
- print()
+ for opt in ('-w', '--rerun', '--verbose2'):
+ if opt in args.test_args:
+ print(f"WARNING: {opt} option should not be used to bisect!")
+ print()
if args.input:
with open(args.input) as fp:
diff --git a/Lib/test/allsans.pem b/Lib/test/certdata/allsans.pem
similarity index 100%
rename from Lib/test/allsans.pem
rename to Lib/test/certdata/allsans.pem
diff --git a/Lib/test/badcert.pem b/Lib/test/certdata/badcert.pem
similarity index 100%
rename from Lib/test/badcert.pem
rename to Lib/test/certdata/badcert.pem
diff --git a/Lib/test/badkey.pem b/Lib/test/certdata/badkey.pem
similarity index 100%
rename from Lib/test/badkey.pem
rename to Lib/test/certdata/badkey.pem
diff --git a/Lib/test/capath/4e1295a3.0 b/Lib/test/certdata/capath/4e1295a3.0
similarity index 100%
rename from Lib/test/capath/4e1295a3.0
rename to Lib/test/certdata/capath/4e1295a3.0
diff --git a/Lib/test/capath/5ed36f99.0 b/Lib/test/certdata/capath/5ed36f99.0
similarity index 100%
rename from Lib/test/capath/5ed36f99.0
rename to Lib/test/certdata/capath/5ed36f99.0
diff --git a/Lib/test/capath/6e88d7b8.0 b/Lib/test/certdata/capath/6e88d7b8.0
similarity index 100%
rename from Lib/test/capath/6e88d7b8.0
rename to Lib/test/certdata/capath/6e88d7b8.0
diff --git a/Lib/test/capath/99d0fa06.0 b/Lib/test/certdata/capath/99d0fa06.0
similarity index 100%
rename from Lib/test/capath/99d0fa06.0
rename to Lib/test/certdata/capath/99d0fa06.0
diff --git a/Lib/test/capath/b1930218.0 b/Lib/test/certdata/capath/b1930218.0
similarity index 100%
rename from Lib/test/capath/b1930218.0
rename to Lib/test/certdata/capath/b1930218.0
diff --git a/Lib/test/capath/ceff1710.0 b/Lib/test/certdata/capath/ceff1710.0
similarity index 100%
rename from Lib/test/capath/ceff1710.0
rename to Lib/test/certdata/capath/ceff1710.0
diff --git a/Lib/test/ffdh3072.pem b/Lib/test/certdata/ffdh3072.pem
similarity index 100%
rename from Lib/test/ffdh3072.pem
rename to Lib/test/certdata/ffdh3072.pem
diff --git a/Lib/test/idnsans.pem b/Lib/test/certdata/idnsans.pem
similarity index 100%
rename from Lib/test/idnsans.pem
rename to Lib/test/certdata/idnsans.pem
diff --git a/Lib/test/keycert.passwd.pem b/Lib/test/certdata/keycert.passwd.pem
similarity index 100%
rename from Lib/test/keycert.passwd.pem
rename to Lib/test/certdata/keycert.passwd.pem
diff --git a/Lib/test/keycert.pem b/Lib/test/certdata/keycert.pem
similarity index 100%
rename from Lib/test/keycert.pem
rename to Lib/test/certdata/keycert.pem
diff --git a/Lib/test/keycert2.pem b/Lib/test/certdata/keycert2.pem
similarity index 100%
rename from Lib/test/keycert2.pem
rename to Lib/test/certdata/keycert2.pem
diff --git a/Lib/test/keycert3.pem b/Lib/test/certdata/keycert3.pem
similarity index 100%
rename from Lib/test/keycert3.pem
rename to Lib/test/certdata/keycert3.pem
diff --git a/Lib/test/keycert4.pem b/Lib/test/certdata/keycert4.pem
similarity index 100%
rename from Lib/test/keycert4.pem
rename to Lib/test/certdata/keycert4.pem
diff --git a/Lib/test/keycertecc.pem b/Lib/test/certdata/keycertecc.pem
similarity index 100%
rename from Lib/test/keycertecc.pem
rename to Lib/test/certdata/keycertecc.pem
diff --git a/Lib/test/make_ssl_certs.py b/Lib/test/certdata/make_ssl_certs.py
similarity index 100%
rename from Lib/test/make_ssl_certs.py
rename to Lib/test/certdata/make_ssl_certs.py
diff --git a/Lib/test/nokia.pem b/Lib/test/certdata/nokia.pem
similarity index 100%
rename from Lib/test/nokia.pem
rename to Lib/test/certdata/nokia.pem
diff --git a/Lib/test/nosan.pem b/Lib/test/certdata/nosan.pem
similarity index 100%
rename from Lib/test/nosan.pem
rename to Lib/test/certdata/nosan.pem
diff --git a/Lib/test/nullbytecert.pem b/Lib/test/certdata/nullbytecert.pem
similarity index 100%
rename from Lib/test/nullbytecert.pem
rename to Lib/test/certdata/nullbytecert.pem
diff --git a/Lib/test/nullcert.pem b/Lib/test/certdata/nullcert.pem
similarity index 100%
rename from Lib/test/nullcert.pem
rename to Lib/test/certdata/nullcert.pem
diff --git a/Lib/test/pycacert.pem b/Lib/test/certdata/pycacert.pem
similarity index 100%
rename from Lib/test/pycacert.pem
rename to Lib/test/certdata/pycacert.pem
diff --git a/Lib/test/pycakey.pem b/Lib/test/certdata/pycakey.pem
similarity index 100%
rename from Lib/test/pycakey.pem
rename to Lib/test/certdata/pycakey.pem
diff --git a/Lib/test/revocation.crl b/Lib/test/certdata/revocation.crl
similarity index 100%
rename from Lib/test/revocation.crl
rename to Lib/test/certdata/revocation.crl
diff --git a/Lib/test/secp384r1.pem b/Lib/test/certdata/secp384r1.pem
similarity index 100%
rename from Lib/test/secp384r1.pem
rename to Lib/test/certdata/secp384r1.pem
diff --git a/Lib/test/selfsigned_pythontestdotnet.pem b/Lib/test/certdata/selfsigned_pythontestdotnet.pem
similarity index 100%
rename from Lib/test/selfsigned_pythontestdotnet.pem
rename to Lib/test/certdata/selfsigned_pythontestdotnet.pem
diff --git a/Lib/test/ssl_cert.pem b/Lib/test/certdata/ssl_cert.pem
similarity index 100%
rename from Lib/test/ssl_cert.pem
rename to Lib/test/certdata/ssl_cert.pem
diff --git a/Lib/test/ssl_key.passwd.pem b/Lib/test/certdata/ssl_key.passwd.pem
similarity index 100%
rename from Lib/test/ssl_key.passwd.pem
rename to Lib/test/certdata/ssl_key.passwd.pem
diff --git a/Lib/test/ssl_key.pem b/Lib/test/certdata/ssl_key.pem
similarity index 100%
rename from Lib/test/ssl_key.pem
rename to Lib/test/certdata/ssl_key.pem
diff --git a/Lib/test/talos-2019-0758.pem b/Lib/test/certdata/talos-2019-0758.pem
similarity index 100%
rename from Lib/test/talos-2019-0758.pem
rename to Lib/test/certdata/talos-2019-0758.pem
diff --git a/Lib/test/datetimetester.py b/Lib/test/datetimetester.py
index 55e061950ff280..8bda17358db87f 100644
--- a/Lib/test/datetimetester.py
+++ b/Lib/test/datetimetester.py
@@ -1699,22 +1699,23 @@ def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
- self.assertEqual(base, base.replace())
+ self.assertEqual(base.replace(), base)
+ self.assertEqual(copy.replace(base), base)
- i = 0
- for name, newval in (("year", 2),
- ("month", 3),
- ("day", 4)):
+ changes = (("year", 2),
+ ("month", 3),
+ ("day", 4))
+ for i, (name, newval) in enumerate(changes):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
- got = base.replace(**{name: newval})
- self.assertEqual(expected, got)
- i += 1
+ self.assertEqual(base.replace(**{name: newval}), expected)
+ self.assertEqual(copy.replace(base, **{name: newval}), expected)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
+ self.assertRaises(ValueError, copy.replace, base, year=2001)
def test_subclass_replace(self):
class DateSubclass(self.theclass):
@@ -1722,6 +1723,7 @@ class DateSubclass(self.theclass):
dt = DateSubclass(2012, 1, 1)
self.assertIs(type(dt.replace(year=2013)), DateSubclass)
+ self.assertIs(type(copy.replace(dt, year=2013)), DateSubclass)
def test_subclass_date(self):
@@ -2856,26 +2858,27 @@ def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
- self.assertEqual(base, base.replace())
-
- i = 0
- for name, newval in (("year", 2),
- ("month", 3),
- ("day", 4),
- ("hour", 5),
- ("minute", 6),
- ("second", 7),
- ("microsecond", 8)):
+ self.assertEqual(base.replace(), base)
+ self.assertEqual(copy.replace(base), base)
+
+ changes = (("year", 2),
+ ("month", 3),
+ ("day", 4),
+ ("hour", 5),
+ ("minute", 6),
+ ("second", 7),
+ ("microsecond", 8))
+ for i, (name, newval) in enumerate(changes):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
- got = base.replace(**{name: newval})
- self.assertEqual(expected, got)
- i += 1
+ self.assertEqual(base.replace(**{name: newval}), expected)
+ self.assertEqual(copy.replace(base, **{name: newval}), expected)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
+ self.assertRaises(ValueError, copy.replace, base, year=2001)
@support.run_with_tz('EDT4')
def test_astimezone(self):
@@ -3671,19 +3674,19 @@ def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
- self.assertEqual(base, base.replace())
-
- i = 0
- for name, newval in (("hour", 5),
- ("minute", 6),
- ("second", 7),
- ("microsecond", 8)):
+ self.assertEqual(base.replace(), base)
+ self.assertEqual(copy.replace(base), base)
+
+ changes = (("hour", 5),
+ ("minute", 6),
+ ("second", 7),
+ ("microsecond", 8))
+ for i, (name, newval) in enumerate(changes):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
- got = base.replace(**{name: newval})
- self.assertEqual(expected, got)
- i += 1
+ self.assertEqual(base.replace(**{name: newval}), expected)
+ self.assertEqual(copy.replace(base, **{name: newval}), expected)
# Out of bounds.
base = cls(1)
@@ -3691,6 +3694,10 @@ def test_replace(self):
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
+ self.assertRaises(ValueError, copy.replace, base, hour=24)
+ self.assertRaises(ValueError, copy.replace, base, minute=-1)
+ self.assertRaises(ValueError, copy.replace, base, second=100)
+ self.assertRaises(ValueError, copy.replace, base, microsecond=1000000)
def test_subclass_replace(self):
class TimeSubclass(self.theclass):
@@ -3698,6 +3705,7 @@ class TimeSubclass(self.theclass):
ctime = TimeSubclass(12, 30)
self.assertIs(type(ctime.replace(hour=10)), TimeSubclass)
+ self.assertIs(type(copy.replace(ctime, hour=10)), TimeSubclass)
def test_subclass_time(self):
@@ -4085,31 +4093,37 @@ def test_replace(self):
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
- self.assertEqual(base, base.replace())
-
- i = 0
- for name, newval in (("hour", 5),
- ("minute", 6),
- ("second", 7),
- ("microsecond", 8),
- ("tzinfo", zm200)):
+ self.assertEqual(base.replace(), base)
+ self.assertEqual(copy.replace(base), base)
+
+ changes = (("hour", 5),
+ ("minute", 6),
+ ("second", 7),
+ ("microsecond", 8),
+ ("tzinfo", zm200))
+ for i, (name, newval) in enumerate(changes):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
- got = base.replace(**{name: newval})
- self.assertEqual(expected, got)
- i += 1
+ self.assertEqual(base.replace(**{name: newval}), expected)
+ self.assertEqual(copy.replace(base, **{name: newval}), expected)
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
+ base22 = copy.replace(base, tzinfo=None)
+ self.assertIsNone(base22.tzinfo)
+ self.assertIsNone(base22.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
+ base32 = copy.replace(base22, tzinfo=z100)
+ self.assertEqual(base, base32)
+ self.assertIs(base.tzinfo, base32.tzinfo)
# Out of bounds.
base = cls(1)
@@ -4117,6 +4131,10 @@ def test_replace(self):
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
+ self.assertRaises(ValueError, copy.replace, base, hour=24)
+ self.assertRaises(ValueError, copy.replace, base, minute=-1)
+ self.assertRaises(ValueError, copy.replace, base, second=100)
+ self.assertRaises(ValueError, copy.replace, base, microsecond=1000000)
def test_mixed_compare(self):
t1 = self.theclass(1, 2, 3)
@@ -4885,38 +4903,45 @@ def test_replace(self):
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
- self.assertEqual(base, base.replace())
-
- i = 0
- for name, newval in (("year", 2),
- ("month", 3),
- ("day", 4),
- ("hour", 5),
- ("minute", 6),
- ("second", 7),
- ("microsecond", 8),
- ("tzinfo", zm200)):
+ self.assertEqual(base.replace(), base)
+ self.assertEqual(copy.replace(base), base)
+
+ changes = (("year", 2),
+ ("month", 3),
+ ("day", 4),
+ ("hour", 5),
+ ("minute", 6),
+ ("second", 7),
+ ("microsecond", 8),
+ ("tzinfo", zm200))
+ for i, (name, newval) in enumerate(changes):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
- got = base.replace(**{name: newval})
- self.assertEqual(expected, got)
- i += 1
+ self.assertEqual(base.replace(**{name: newval}), expected)
+ self.assertEqual(copy.replace(base, **{name: newval}), expected)
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
+ base22 = copy.replace(base, tzinfo=None)
+ self.assertIsNone(base22.tzinfo)
+ self.assertIsNone(base22.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
+ base32 = copy.replace(base22, tzinfo=z100)
+ self.assertEqual(base, base32)
+ self.assertIs(base.tzinfo, base32.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
+ self.assertRaises(ValueError, copy.replace, base, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
diff --git a/Lib/test/libregrtest/__init__.py b/Lib/test/libregrtest/__init__.py
index 5e8dba5dbde71a..e69de29bb2d1d6 100644
--- a/Lib/test/libregrtest/__init__.py
+++ b/Lib/test/libregrtest/__init__.py
@@ -1,2 +0,0 @@
-from test.libregrtest.cmdline import _parse_args, RESOURCE_NAMES, ALL_RESOURCES
-from test.libregrtest.main import main
diff --git a/Lib/test/libregrtest/cmdline.py b/Lib/test/libregrtest/cmdline.py
index ebe57920d9185c..99f28152f1a1c7 100644
--- a/Lib/test/libregrtest/cmdline.py
+++ b/Lib/test/libregrtest/cmdline.py
@@ -1,5 +1,5 @@
import argparse
-import os
+import os.path
import shlex
import sys
from test.support import os_helper
@@ -107,6 +107,8 @@
cpu - Used for certain CPU-heavy tests.
+ walltime - Long running but not CPU-bound tests.
+
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
@@ -129,7 +131,7 @@
ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network',
- 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
+ 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui', 'walltime')
# Other resources excluded from --use=all:
#
@@ -147,6 +149,10 @@ def __init__(self, **kwargs) -> None:
self.verbose = 0
self.quiet = False
self.exclude = False
+ self.cleanup = False
+ self.wait = False
+ self.list_cases = False
+ self.list_tests = False
self.single = False
self.randomize = False
self.fromfile = None
@@ -155,8 +161,8 @@ def __init__(self, **kwargs) -> None:
self.trace = False
self.coverdir = 'coverage'
self.runleaks = False
- self.huntrleaks = False
- self.verbose2 = False
+ self.huntrleaks: tuple[int, int, str] | None = None
+ self.rerun = False
self.verbose3 = False
self.print_slow = False
self.random_seed = None
@@ -168,6 +174,13 @@ def __init__(self, **kwargs) -> None:
self.ignore_tests = None
self.pgo = False
self.pgo_extended = False
+ self.worker_json = None
+ self.start = None
+ self.timeout = None
+ self.memlimit = None
+ self.threshold = None
+ self.fail_rerun = False
+ self.tempdir = None
super().__init__(**kwargs)
@@ -203,7 +216,6 @@ def _create_parser():
group.add_argument('--wait', action='store_true',
help='wait for user input, e.g., allow a debugger '
'to be attached')
- group.add_argument('--worker-args', metavar='ARGS')
group.add_argument('-S', '--start', metavar='START',
help='the name of the test at which to start.' +
more_details)
@@ -213,8 +225,10 @@ def _create_parser():
group = parser.add_argument_group('Verbosity')
group.add_argument('-v', '--verbose', action='count',
help='run tests in verbose mode with output to stdout')
- group.add_argument('-w', '--verbose2', action='store_true',
+ group.add_argument('-w', '--rerun', action='store_true',
help='re-run failed tests in verbose mode')
+ group.add_argument('--verbose2', action='store_true', dest='rerun',
+ help='deprecated alias to --rerun')
group.add_argument('-W', '--verbose3', action='store_true',
help='display test output on failure')
group.add_argument('-q', '--quiet', action='store_true',
@@ -309,6 +323,9 @@ def _create_parser():
group.add_argument('--fail-env-changed', action='store_true',
help='if a test file alters the environment, mark '
'the test as failed')
+ group.add_argument('--fail-rerun', action='store_true',
+ help='if a test failed and then passed when re-run, '
+ 'mark the tests as failed')
group.add_argument('--junit-xml', dest='xmlpath', metavar='FILENAME',
help='writes JUnit-style XML results to the specified '
@@ -380,7 +397,7 @@ def _parse_args(args, **kwargs):
ns.python = shlex.split(ns.python)
if ns.failfast and not (ns.verbose or ns.verbose3):
parser.error("-G/--failfast needs either -v or -W")
- if ns.pgo and (ns.verbose or ns.verbose2 or ns.verbose3):
+ if ns.pgo and (ns.verbose or ns.rerun or ns.verbose3):
parser.error("--pgo/-v don't go together!")
if ns.pgo_extended:
ns.pgo = True # pgo_extended implies pgo
@@ -394,10 +411,6 @@ def _parse_args(args, **kwargs):
if ns.timeout is not None:
if ns.timeout <= 0:
ns.timeout = None
- if ns.use_mp is not None:
- if ns.use_mp <= 0:
- # Use all cores + extras for tests that like to sleep
- ns.use_mp = 2 + (os.cpu_count() or 1)
if ns.use:
for a in ns.use:
for r in a:
@@ -441,4 +454,13 @@ def _parse_args(args, **kwargs):
# --forever implies --failfast
ns.failfast = True
+ if ns.huntrleaks:
+ warmup, repetitions, _ = ns.huntrleaks
+ if warmup < 1 or repetitions < 1:
+ msg = ("Invalid values for the --huntrleaks/-R parameters. The "
+ "number of warmups and repetitions must be at least 1 "
+ "each (1:1).")
+ print(msg, file=sys.stderr, flush=True)
+ sys.exit(2)
+
return ns
diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py
new file mode 100644
index 00000000000000..60f21980c10dd0
--- /dev/null
+++ b/Lib/test/libregrtest/findtests.py
@@ -0,0 +1,104 @@
+import os
+import sys
+import unittest
+
+from test import support
+
+from .utils import (
+ StrPath, TestName, TestTuple, TestList, FilterTuple,
+ abs_module_name, count, printlist)
+
+
+# If these test directories are encountered recurse into them and treat each
+# "test_*.py" file or each sub-directory as a separate test module. This can
+# increase parallelism.
+#
+# Beware this can't generally be done for any directory with sub-tests as the
+# __init__.py may do things which alter what tests are to be run.
+SPLITTESTDIRS: set[TestName] = {
+ "test_asyncio",
+ "test_concurrent_futures",
+ "test_future_stmt",
+ "test_multiprocessing_fork",
+ "test_multiprocessing_forkserver",
+ "test_multiprocessing_spawn",
+}
+
+
+def findtestdir(path: StrPath | None = None) -> StrPath:
+ return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
+
+
+def findtests(*, testdir: StrPath | None = None, exclude=(),
+ split_test_dirs: set[TestName] = SPLITTESTDIRS,
+ base_mod: str = "") -> TestList:
+ """Return a list of all applicable test modules."""
+ testdir = findtestdir(testdir)
+ tests = []
+ for name in os.listdir(testdir):
+ mod, ext = os.path.splitext(name)
+ if (not mod.startswith("test_")) or (mod in exclude):
+ continue
+ if base_mod:
+ fullname = f"{base_mod}.{mod}"
+ else:
+ fullname = mod
+ if fullname in split_test_dirs:
+ subdir = os.path.join(testdir, mod)
+ if not base_mod:
+ fullname = f"test.{mod}"
+ tests.extend(findtests(testdir=subdir, exclude=exclude,
+ split_test_dirs=split_test_dirs,
+ base_mod=fullname))
+ elif ext in (".py", ""):
+ tests.append(fullname)
+ return sorted(tests)
+
+
+def split_test_packages(tests, *, testdir: StrPath | None = None, exclude=(),
+ split_test_dirs=SPLITTESTDIRS):
+ testdir = findtestdir(testdir)
+ splitted = []
+ for name in tests:
+ if name in split_test_dirs:
+ subdir = os.path.join(testdir, name)
+ splitted.extend(findtests(testdir=subdir, exclude=exclude,
+ split_test_dirs=split_test_dirs,
+ base_mod=name))
+ else:
+ splitted.append(name)
+ return splitted
+
+
+def _list_cases(suite):
+ for test in suite:
+ if isinstance(test, unittest.loader._FailedTest):
+ continue
+ if isinstance(test, unittest.TestSuite):
+ _list_cases(test)
+ elif isinstance(test, unittest.TestCase):
+ if support.match_test(test):
+ print(test.id())
+
+def list_cases(tests: TestTuple, *,
+ match_tests: FilterTuple | None = None,
+ ignore_tests: FilterTuple | None = None,
+ test_dir: StrPath | None = None):
+ support.verbose = False
+ support.set_match_tests(match_tests, ignore_tests)
+
+ skipped = []
+ for test_name in tests:
+ module_name = abs_module_name(test_name, test_dir)
+ try:
+ suite = unittest.defaultTestLoader.loadTestsFromName(module_name)
+ _list_cases(suite)
+ except unittest.SkipTest:
+ skipped.append(test_name)
+
+ if skipped:
+ sys.stdout.flush()
+ stderr = sys.stderr
+ print(file=stderr)
+ print(count(len(skipped), "test"), "skipped:", file=stderr)
+ printlist(skipped, file=stderr)
diff --git a/Lib/test/libregrtest/logger.py b/Lib/test/libregrtest/logger.py
new file mode 100644
index 00000000000000..2f0c4bf1c84b5c
--- /dev/null
+++ b/Lib/test/libregrtest/logger.py
@@ -0,0 +1,85 @@
+import os
+import time
+
+from .results import TestResults
+from .runtests import RunTests
+from .utils import print_warning, MS_WINDOWS
+
+if MS_WINDOWS:
+ from .win_utils import WindowsLoadTracker
+
+
+class Logger:
+ def __init__(self, results: TestResults, quiet: bool, pgo: bool):
+ self.start_time = time.perf_counter()
+ self.test_count_text = ''
+ self.test_count_width = 3
+ self.win_load_tracker: WindowsLoadTracker | None = None
+ self._results: TestResults = results
+ self._quiet: bool = quiet
+ self._pgo: bool = pgo
+
+ def log(self, line: str = '') -> None:
+ empty = not line
+
+ # add the system load prefix: "load avg: 1.80 "
+ load_avg = self.get_load_avg()
+ if load_avg is not None:
+ line = f"load avg: {load_avg:.2f} {line}"
+
+ # add the timestamp prefix: "0:01:05 "
+ log_time = time.perf_counter() - self.start_time
+
+ mins, secs = divmod(int(log_time), 60)
+ hours, mins = divmod(mins, 60)
+ formatted_log_time = "%d:%02d:%02d" % (hours, mins, secs)
+
+ line = f"{formatted_log_time} {line}"
+ if empty:
+ line = line[:-1]
+
+ print(line, flush=True)
+
+ def get_load_avg(self) -> float | None:
+ if hasattr(os, 'getloadavg'):
+ return os.getloadavg()[0]
+ if self.win_load_tracker is not None:
+ return self.win_load_tracker.getloadavg()
+ return None
+
+ def display_progress(self, test_index: int, text: str) -> None:
+ if self._quiet:
+ return
+ results = self._results
+
+ # "[ 51/405/1] test_tcl passed"
+ line = f"{test_index:{self.test_count_width}}{self.test_count_text}"
+ fails = len(results.bad) + len(results.env_changed)
+ if fails and not self._pgo:
+ line = f"{line}/{fails}"
+ self.log(f"[{line}] {text}")
+
+ def set_tests(self, runtests: RunTests) -> None:
+ if runtests.forever:
+ self.test_count_text = ''
+ self.test_count_width = 3
+ else:
+ self.test_count_text = '/{}'.format(len(runtests.tests))
+ self.test_count_width = len(self.test_count_text) - 1
+
+ def start_load_tracker(self) -> None:
+ if not MS_WINDOWS:
+ return
+
+ try:
+ self.win_load_tracker = WindowsLoadTracker()
+ except PermissionError as error:
+ # Standard accounts may not have access to the performance
+ # counters.
+ print_warning(f'Failed to create WindowsLoadTracker: {error}')
+
+ def stop_load_tracker(self) -> None:
+ if self.win_load_tracker is None:
+ return
+ self.win_load_tracker.close()
+ self.win_load_tracker = None
diff --git a/Lib/test/libregrtest/main.py b/Lib/test/libregrtest/main.py
index 3d290c849b43ed..a9dd08702deb59 100644
--- a/Lib/test/libregrtest/main.py
+++ b/Lib/test/libregrtest/main.py
@@ -1,46 +1,26 @@
-import faulthandler
-import locale
import os
-import platform
import random
import re
import sys
-import sysconfig
-import tempfile
import time
-import unittest
-from test.libregrtest.cmdline import _parse_args
-from test.libregrtest.runtest import (
- findtests, split_test_packages, runtest, get_abs_module, is_failed,
- PROGRESS_MIN_TIME,
- Passed, Failed, EnvChanged, Skipped, ResourceDenied, Interrupted,
- ChildError, DidNotRun)
-from test.libregrtest.setup import setup_tests
-from test.libregrtest.pgo import setup_pgo_tests
-from test.libregrtest.utils import (removepy, count, format_duration,
- printlist, get_build_info)
+
from test import support
from test.support import os_helper
-from test.support import threading_helper
-
-
-# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()).
-# Used to protect against threading._shutdown() hang.
-# Must be smaller than buildbot "1200 seconds without output" limit.
-EXIT_TIMEOUT = 120.0
-# gh-90681: When rerunning tests, we might need to rerun the whole
-# class or module suite if some its life-cycle hooks fail.
-# Test level hooks are not affected.
-_TEST_LIFECYCLE_HOOKS = frozenset((
- 'setUpClass', 'tearDownClass',
- 'setUpModule', 'tearDownModule',
-))
-
-EXITCODE_BAD_TEST = 2
-EXITCODE_INTERRUPTED = 130
-EXITCODE_ENV_CHANGED = 3
-EXITCODE_NO_TESTS_RAN = 4
+from .cmdline import _parse_args, Namespace
+from .findtests import findtests, split_test_packages, list_cases
+from .logger import Logger
+from .result import State
+from .runtests import RunTests, HuntRefleak
+from .setup import setup_process, setup_test_dir
+from .single import run_single_test, PROGRESS_MIN_TIME
+from .pgo import setup_pgo_tests
+from .results import TestResults
+from .utils import (
+ StrPath, StrJSON, TestName, TestList, TestTuple, FilterTuple,
+ strip_py_suffix, count, format_duration,
+ printlist, get_temp_dir, get_work_dir, exit_timeout,
+ display_header, cleanup_temp_dir)
class Regrtest:
@@ -66,260 +46,201 @@ class Regrtest:
directly to set the values that would normally be set by flags
on the command line.
"""
- def __init__(self):
- # Namespace of command line options
- self.ns = None
+ def __init__(self, ns: Namespace):
+ # Log verbosity
+ self.verbose: int = int(ns.verbose)
+ self.quiet: bool = ns.quiet
+ self.pgo: bool = ns.pgo
+ self.pgo_extended: bool = ns.pgo_extended
+
+ # Test results
+ self.results: TestResults = TestResults()
+ self.first_state: str | None = None
+
+ # Logger
+ self.logger = Logger(self.results, self.quiet, self.pgo)
+
+ # Actions
+ self.want_header: bool = ns.header
+ self.want_list_tests: bool = ns.list_tests
+ self.want_list_cases: bool = ns.list_cases
+ self.want_wait: bool = ns.wait
+ self.want_cleanup: bool = ns.cleanup
+ self.want_rerun: bool = ns.rerun
+ self.want_run_leaks: bool = ns.runleaks
+
+ # Select tests
+ if ns.match_tests:
+ self.match_tests: FilterTuple | None = tuple(ns.match_tests)
+ else:
+ self.match_tests = None
+ if ns.ignore_tests:
+ self.ignore_tests: FilterTuple | None = tuple(ns.ignore_tests)
+ else:
+ self.ignore_tests = None
+ self.exclude: bool = ns.exclude
+ self.fromfile: StrPath | None = ns.fromfile
+ self.starting_test: TestName | None = ns.start
+ self.cmdline_args: TestList = ns.args
+
+ # Workers
+ if ns.use_mp is None:
+ num_workers = 0 # run sequentially
+ elif ns.use_mp <= 0:
+ num_workers = -1 # use the number of CPUs
+ else:
+ num_workers = ns.use_mp
+ self.num_workers: int = num_workers
+ self.worker_json: StrJSON | None = ns.worker_json
+
+ # Options to run tests
+ self.fail_fast: bool = ns.failfast
+ self.fail_env_changed: bool = ns.fail_env_changed
+ self.fail_rerun: bool = ns.fail_rerun
+ self.forever: bool = ns.forever
+ self.randomize: bool = ns.randomize
+ self.random_seed: int | None = ns.random_seed
+ self.output_on_failure: bool = ns.verbose3
+ self.timeout: float | None = ns.timeout
+ if ns.huntrleaks:
+ warmups, runs, filename = ns.huntrleaks
+ filename = os.path.abspath(filename)
+ self.hunt_refleak: HuntRefleak | None = HuntRefleak(warmups, runs, filename)
+ else:
+ self.hunt_refleak = None
+ self.test_dir: StrPath | None = ns.testdir
+ self.junit_filename: StrPath | None = ns.xmlpath
+ self.memory_limit: str | None = ns.memlimit
+ self.gc_threshold: int | None = ns.threshold
+ self.use_resources: tuple[str, ...] = tuple(ns.use_resources)
+ if ns.python:
+ self.python_cmd: tuple[str, ...] | None = tuple(ns.python)
+ else:
+ self.python_cmd = None
+ self.coverage: bool = ns.trace
+ self.coverage_dir: StrPath | None = ns.coverdir
+ self.tmp_dir: StrPath | None = ns.tempdir
# tests
- self.tests = []
- self.selected = []
-
- # test results
- self.good = []
- self.bad = []
- self.skipped = []
- self.resource_denieds = []
- self.environment_changed = []
- self.run_no_tests = []
- self.need_rerun = []
- self.rerun = []
- self.first_result = None
- self.interrupted = False
-
- # used by --slow
- self.test_times = []
-
- # used by --coverage, trace.Trace instance
- self.tracer = None
+ self.first_runtests: RunTests | None = None
+
+ # used by --slowest
+ self.print_slowest: bool = ns.print_slow
# used to display the progress bar "[ 3/100]"
- self.start_time = time.monotonic()
- self.test_count = ''
- self.test_count_width = 1
+ self.start_time = time.perf_counter()
# used by --single
- self.next_single_test = None
- self.next_single_filename = None
-
- # used by --junit-xml
- self.testsuite_xml = None
-
- # misc
- self.win_load_tracker = None
- self.tmp_dir = None
- self.worker_test_name = None
-
- def get_executed(self):
- return (set(self.good) | set(self.bad) | set(self.skipped)
- | set(self.resource_denieds) | set(self.environment_changed)
- | set(self.run_no_tests))
-
- def accumulate_result(self, result, rerun=False):
- test_name = result.name
-
- if not isinstance(result, (ChildError, Interrupted)) and not rerun:
- self.test_times.append((result.duration_sec, test_name))
-
- if isinstance(result, Passed):
- self.good.append(test_name)
- elif isinstance(result, ResourceDenied):
- self.skipped.append(test_name)
- self.resource_denieds.append(test_name)
- elif isinstance(result, Skipped):
- self.skipped.append(test_name)
- elif isinstance(result, EnvChanged):
- self.environment_changed.append(test_name)
- elif isinstance(result, Failed):
- if not rerun:
- self.bad.append(test_name)
- self.need_rerun.append(result)
- elif isinstance(result, DidNotRun):
- self.run_no_tests.append(test_name)
- elif isinstance(result, Interrupted):
- self.interrupted = True
- else:
- raise ValueError("invalid test result: %r" % result)
-
- if rerun and not isinstance(result, (Failed, Interrupted)):
- self.bad.remove(test_name)
-
- xml_data = result.xml_data
- if xml_data:
- import xml.etree.ElementTree as ET
- for e in xml_data:
- try:
- self.testsuite_xml.append(ET.fromstring(e))
- except ET.ParseError:
- print(xml_data, file=sys.__stderr__)
- raise
+ self.single_test_run: bool = ns.single
+ self.next_single_test: TestName | None = None
+ self.next_single_filename: StrPath | None = None
def log(self, line=''):
- empty = not line
-
- # add the system load prefix: "load avg: 1.80 "
- load_avg = self.getloadavg()
- if load_avg is not None:
- line = f"load avg: {load_avg:.2f} {line}"
-
- # add the timestamp prefix: "0:01:05 "
- test_time = time.monotonic() - self.start_time
-
- mins, secs = divmod(int(test_time), 60)
- hours, mins = divmod(mins, 60)
- test_time = "%d:%02d:%02d" % (hours, mins, secs)
-
- line = f"{test_time} {line}"
- if empty:
- line = line[:-1]
-
- print(line, flush=True)
-
- def display_progress(self, test_index, text):
- if self.ns.quiet:
- return
-
- # "[ 51/405/1] test_tcl passed"
- line = f"{test_index:{self.test_count_width}}{self.test_count}"
- fails = len(self.bad) + len(self.environment_changed)
- if fails and not self.ns.pgo:
- line = f"{line}/{fails}"
- self.log(f"[{line}] {text}")
-
- def parse_args(self, kwargs):
- ns = _parse_args(sys.argv[1:], **kwargs)
-
- if ns.xmlpath:
- support.junit_xml_list = self.testsuite_xml = []
+ self.logger.log(line)
- worker_args = ns.worker_args
- if worker_args is not None:
- from test.libregrtest.runtest_mp import parse_worker_args
- ns, test_name = parse_worker_args(ns.worker_args)
- ns.worker_args = worker_args
- self.worker_test_name = test_name
-
- # Strip .py extensions.
- removepy(ns.args)
-
- if ns.huntrleaks:
- warmup, repetitions, _ = ns.huntrleaks
- if warmup < 1 or repetitions < 1:
- msg = ("Invalid values for the --huntrleaks/-R parameters. The "
- "number of warmups and repetitions must be at least 1 "
- "each (1:1).")
- print(msg, file=sys.stderr, flush=True)
- sys.exit(2)
-
- if ns.tempdir:
- ns.tempdir = os.path.expanduser(ns.tempdir)
-
- self.ns = ns
-
- def find_tests(self, tests):
- self.tests = tests
-
- if self.ns.single:
+ def find_tests(self, tests: TestList | None = None) -> tuple[TestTuple, TestList | None]:
+ if self.single_test_run:
self.next_single_filename = os.path.join(self.tmp_dir, 'pynexttest')
try:
with open(self.next_single_filename, 'r') as fp:
next_test = fp.read().strip()
- self.tests = [next_test]
+ tests = [next_test]
except OSError:
pass
- if self.ns.fromfile:
- self.tests = []
+ if self.fromfile:
+ tests = []
# regex to match 'test_builtin' in line:
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
- with open(os.path.join(os_helper.SAVEDCWD, self.ns.fromfile)) as fp:
+ with open(os.path.join(os_helper.SAVEDCWD, self.fromfile)) as fp:
for line in fp:
line = line.split('#', 1)[0]
line = line.strip()
match = regex.search(line)
if match is not None:
- self.tests.append(match.group())
+ tests.append(match.group())
- removepy(self.tests)
+ strip_py_suffix(tests)
- if self.ns.pgo:
+ if self.pgo:
# add default PGO tests if no tests are specified
- setup_pgo_tests(self.ns)
+ setup_pgo_tests(self.cmdline_args, self.pgo_extended)
- exclude = set()
- if self.ns.exclude:
- for arg in self.ns.args:
- exclude.add(arg)
- self.ns.args = []
+ exclude_tests = set()
+ if self.exclude:
+ for arg in self.cmdline_args:
+ exclude_tests.add(arg)
+ self.cmdline_args = []
- alltests = findtests(testdir=self.ns.testdir, exclude=exclude)
+ alltests = findtests(testdir=self.test_dir,
+ exclude=exclude_tests)
- if not self.ns.fromfile:
- self.selected = self.tests or self.ns.args
- if self.selected:
- self.selected = split_test_packages(self.selected)
+ if not self.fromfile:
+ selected = tests or self.cmdline_args
+ if selected:
+ selected = split_test_packages(selected)
else:
- self.selected = alltests
+ selected = alltests
else:
- self.selected = self.tests
+ selected = tests
- if self.ns.single:
- self.selected = self.selected[:1]
+ if self.single_test_run:
+ selected = selected[:1]
try:
- pos = alltests.index(self.selected[0])
+ pos = alltests.index(selected[0])
self.next_single_test = alltests[pos + 1]
except IndexError:
pass
# Remove all the selected tests that precede start if it's set.
- if self.ns.start:
+ if self.starting_test:
try:
- del self.selected[:self.selected.index(self.ns.start)]
+ del selected[:selected.index(self.starting_test)]
except ValueError:
- print("Couldn't find starting test (%s), using all tests"
- % self.ns.start, file=sys.stderr)
-
- if self.ns.randomize:
- if self.ns.random_seed is None:
- self.ns.random_seed = random.randrange(10000000)
- random.seed(self.ns.random_seed)
- random.shuffle(self.selected)
-
- def list_tests(self):
- for name in self.selected:
- print(name)
+ print(f"Cannot find starting test: {self.starting_test}")
+ sys.exit(1)
- def _list_cases(self, suite):
- for test in suite:
- if isinstance(test, unittest.loader._FailedTest):
- continue
- if isinstance(test, unittest.TestSuite):
- self._list_cases(test)
- elif isinstance(test, unittest.TestCase):
- if support.match_test(test):
- print(test.id())
-
- def list_cases(self):
- support.verbose = False
- support.set_match_tests(self.ns.match_tests, self.ns.ignore_tests)
-
- for test_name in self.selected:
- abstest = get_abs_module(self.ns, test_name)
- try:
- suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
- self._list_cases(suite)
- except unittest.SkipTest:
- self.skipped.append(test_name)
+ if self.randomize:
+ if self.random_seed is None:
+ self.random_seed = random.randrange(100_000_000)
+ random.seed(self.random_seed)
+ random.shuffle(selected)
- if self.skipped:
- print(file=sys.stderr)
- print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr)
- printlist(self.skipped, file=sys.stderr)
+ return (tuple(selected), tests)
- def rerun_failed_tests(self):
- self.log()
+ @staticmethod
+ def list_tests(tests: TestTuple):
+ for name in tests:
+ print(name)
- if self.ns.python:
+ def _rerun_failed_tests(self, runtests: RunTests):
+ # Configure the runner to re-run tests
+ if self.num_workers == 0:
+ # Always run tests in fresh processes to have more deterministic
+ # initial state. Don't re-run tests in parallel but limit to a
+ # single worker process to have side effects (on the system load
+ # and timings) between tests.
+ self.num_workers = 1
+
+ tests, match_tests_dict = self.results.prepare_rerun()
+
+ # Re-run failed tests
+ self.log(f"Re-running {len(tests)} failed tests in verbose mode in subprocesses")
+ runtests = runtests.copy(
+ tests=tests,
+ rerun=True,
+ verbose=True,
+ forever=False,
+ fail_fast=False,
+ match_tests_dict=match_tests_dict,
+ output_on_failure=False)
+ self.logger.set_tests(runtests)
+ self._run_tests_mp(runtests, self.num_workers)
+ return runtests
+
+ def rerun_failed_tests(self, runtests: RunTests):
+ if self.python_cmd:
# Temp patch for https://github.com/python/cpython/issues/94052
self.log(
"Re-running failed tests is not supported with --python "
@@ -327,304 +248,107 @@ def rerun_failed_tests(self):
)
return
- self.ns.verbose = True
- self.ns.failfast = False
- self.ns.verbose3 = False
-
- self.first_result = self.get_tests_result()
-
- self.log("Re-running failed tests in verbose mode")
- rerun_list = list(self.need_rerun)
- self.need_rerun.clear()
- for result in rerun_list:
- test_name = result.name
- self.rerun.append(test_name)
-
- errors = result.errors or []
- failures = result.failures or []
- error_names = [
- self.normalize_test_name(test_full_name, is_error=True)
- for (test_full_name, *_) in errors]
- failure_names = [
- self.normalize_test_name(test_full_name)
- for (test_full_name, *_) in failures]
- self.ns.verbose = True
- orig_match_tests = self.ns.match_tests
- if errors or failures:
- if self.ns.match_tests is None:
- self.ns.match_tests = []
- self.ns.match_tests.extend(error_names)
- self.ns.match_tests.extend(failure_names)
- matching = "matching: " + ", ".join(self.ns.match_tests)
- self.log(f"Re-running {test_name} in verbose mode ({matching})")
- else:
- self.log(f"Re-running {test_name} in verbose mode")
- result = runtest(self.ns, test_name)
- self.ns.match_tests = orig_match_tests
+ self.first_state = self.get_state()
- self.accumulate_result(result, rerun=True)
+ print()
+ rerun_runtests = self._rerun_failed_tests(runtests)
- if isinstance(result, Interrupted):
- break
+ if self.results.bad:
+ print(count(len(self.results.bad), 'test'), "failed again:")
+ printlist(self.results.bad)
- if self.bad:
- print(count(len(self.bad), 'test'), "failed again:")
- printlist(self.bad)
-
- self.display_result()
-
- def normalize_test_name(self, test_full_name, *, is_error=False):
- short_name = test_full_name.split(" ")[0]
- if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
- # This means that we have a failure in a life-cycle hook,
- # we need to rerun the whole module or class suite.
- # Basically the error looks like this:
- # ERROR: setUpClass (test.test_reg_ex.RegTest)
- # or
- # ERROR: setUpModule (test.test_reg_ex)
- # So, we need to parse the class / module name.
- lpar = test_full_name.index('(')
- rpar = test_full_name.index(')')
- return test_full_name[lpar + 1: rpar].split('.')[-1]
- return short_name
-
- def display_result(self):
+ self.display_result(rerun_runtests)
+
+ def display_result(self, runtests):
# If running the test suite for PGO then no one cares about results.
- if self.ns.pgo:
+ if runtests.pgo:
return
+ state = self.get_state()
print()
- print("== Tests result: %s ==" % self.get_tests_result())
-
- if self.interrupted:
- print("Test suite interrupted by signal SIGINT.")
-
- omitted = set(self.selected) - self.get_executed()
- if omitted:
- print()
- print(count(len(omitted), "test"), "omitted:")
- printlist(omitted)
-
- if self.good and not self.ns.quiet:
- print()
- if (not self.bad
- and not self.skipped
- and not self.interrupted
- and len(self.good) > 1):
- print("All", end=' ')
- print(count(len(self.good), "test"), "OK.")
-
- if self.ns.print_slow:
- self.test_times.sort(reverse=True)
- print()
- print("10 slowest tests:")
- for test_time, test in self.test_times[:10]:
- print("- %s: %s" % (test, format_duration(test_time)))
-
- if self.bad:
- print()
- print(count(len(self.bad), "test"), "failed:")
- printlist(self.bad)
-
- if self.environment_changed:
- print()
- print("{} altered the execution environment:".format(
- count(len(self.environment_changed), "test")))
- printlist(self.environment_changed)
-
- if self.skipped and not self.ns.quiet:
- print()
- print(count(len(self.skipped), "test"), "skipped:")
- printlist(self.skipped)
-
- if self.rerun:
- print()
- print("%s:" % count(len(self.rerun), "re-run test"))
- printlist(self.rerun)
-
- if self.run_no_tests:
- print()
- print(count(len(self.run_no_tests), "test"), "run no tests:")
- printlist(self.run_no_tests)
-
- def run_tests_sequential(self):
- if self.ns.trace:
+ print(f"== Tests result: {state} ==")
+
+ self.results.display_result(runtests.tests,
+ self.quiet, self.print_slowest)
+
+ def run_test(self, test_name: TestName, runtests: RunTests, tracer):
+ if tracer is not None:
+ # If we're tracing code coverage, then we don't exit with status
+ # if on a false return value from main.
+ cmd = ('result = run_single_test(test_name, runtests)')
+ namespace = dict(locals())
+ tracer.runctx(cmd, globals=globals(), locals=namespace)
+ result = namespace['result']
+ else:
+ result = run_single_test(test_name, runtests)
+
+ self.results.accumulate_result(result, runtests)
+
+ return result
+
+ def run_tests_sequentially(self, runtests):
+ if self.coverage:
import trace
- self.tracer = trace.Trace(trace=False, count=True)
+ tracer = trace.Trace(trace=False, count=True)
+ else:
+ tracer = None
save_modules = sys.modules.keys()
- msg = "Run tests sequentially"
- if self.ns.timeout:
- msg += " (timeout: %s)" % format_duration(self.ns.timeout)
+ jobs = runtests.get_jobs()
+ if jobs is not None:
+ tests = count(jobs, 'test')
+ else:
+ tests = 'tests'
+ msg = f"Run {tests} sequentially"
+ if runtests.timeout:
+ msg += " (timeout: %s)" % format_duration(runtests.timeout)
self.log(msg)
previous_test = None
- for test_index, test_name in enumerate(self.tests, 1):
- start_time = time.monotonic()
+ tests_iter = runtests.iter_tests()
+ for test_index, test_name in enumerate(tests_iter, 1):
+ start_time = time.perf_counter()
text = test_name
if previous_test:
text = '%s -- %s' % (text, previous_test)
- self.display_progress(test_index, text)
-
- if self.tracer:
- # If we're tracing code coverage, then we don't exit with status
- # if on a false return value from main.
- cmd = ('result = runtest(self.ns, test_name); '
- 'self.accumulate_result(result)')
- ns = dict(locals())
- self.tracer.runctx(cmd, globals=globals(), locals=ns)
- result = ns['result']
- else:
- result = runtest(self.ns, test_name)
- self.accumulate_result(result)
+ self.logger.display_progress(test_index, text)
- if isinstance(result, Interrupted):
- break
-
- previous_test = str(result)
- test_time = time.monotonic() - start_time
- if test_time >= PROGRESS_MIN_TIME:
- previous_test = "%s in %s" % (previous_test, format_duration(test_time))
- elif isinstance(result, Passed):
- # be quiet: say nothing if the test passed shortly
- previous_test = None
+ result = self.run_test(test_name, runtests, tracer)
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
- if self.ns.failfast and is_failed(result, self.ns):
+ if result.must_stop(self.fail_fast, self.fail_env_changed):
break
+ previous_test = str(result)
+ test_time = time.perf_counter() - start_time
+ if test_time >= PROGRESS_MIN_TIME:
+ previous_test = "%s in %s" % (previous_test, format_duration(test_time))
+ elif result.state == State.PASSED:
+ # be quiet: say nothing if the test passed shortly
+ previous_test = None
+
if previous_test:
print(previous_test)
- def _test_forever(self, tests):
- while True:
- for test_name in tests:
- yield test_name
- if self.bad:
- return
- if self.ns.fail_env_changed and self.environment_changed:
- return
-
- def display_header(self):
- # Print basic platform information
- print("==", platform.python_implementation(), *sys.version.split())
- print("==", platform.platform(aliased=True),
- "%s-endian" % sys.byteorder)
- print("== Python build:", ' '.join(get_build_info()))
- print("== cwd:", os.getcwd())
- cpu_count = os.cpu_count()
- if cpu_count:
- print("== CPU count:", cpu_count)
- print("== encodings: locale=%s, FS=%s"
- % (locale.getencoding(), sys.getfilesystemencoding()))
- self.display_sanitizers()
-
- def display_sanitizers(self):
- # This makes it easier to remember what to set in your local
- # environment when trying to reproduce a sanitizer failure.
- asan = support.check_sanitizer(address=True)
- msan = support.check_sanitizer(memory=True)
- ubsan = support.check_sanitizer(ub=True)
- sanitizers = []
- if asan:
- sanitizers.append("address")
- if msan:
- sanitizers.append("memory")
- if ubsan:
- sanitizers.append("undefined behavior")
- if not sanitizers:
- return
-
- print(f"== sanitizers: {', '.join(sanitizers)}")
- for sanitizer, env_var in (
- (asan, "ASAN_OPTIONS"),
- (msan, "MSAN_OPTIONS"),
- (ubsan, "UBSAN_OPTIONS"),
- ):
- options= os.environ.get(env_var)
- if sanitizer and options is not None:
- print(f"== {env_var}={options!r}")
-
- def no_tests_run(self):
- return not any((self.good, self.bad, self.skipped, self.interrupted,
- self.environment_changed))
-
- def get_tests_result(self):
- result = []
- if self.bad:
- result.append("FAILURE")
- elif self.ns.fail_env_changed and self.environment_changed:
- result.append("ENV CHANGED")
- elif self.no_tests_run():
- result.append("NO TESTS RAN")
-
- if self.interrupted:
- result.append("INTERRUPTED")
-
- if not result:
- result.append("SUCCESS")
-
- result = ', '.join(result)
- if self.first_result:
- result = '%s then %s' % (self.first_result, result)
- return result
+ return tracer
- def run_tests(self):
- # For a partial run, we do not need to clutter the output.
- if (self.ns.header
- or not(self.ns.pgo or self.ns.quiet or self.ns.single
- or self.tests or self.ns.args)):
- self.display_header()
-
- if self.ns.huntrleaks:
- warmup, repetitions, _ = self.ns.huntrleaks
- if warmup < 3:
- msg = ("WARNING: Running tests with --huntrleaks/-R and less than "
- "3 warmup repetitions can give false positives!")
- print(msg, file=sys.stdout, flush=True)
-
- if self.ns.randomize:
- print("Using random seed", self.ns.random_seed)
-
- if self.ns.forever:
- self.tests = self._test_forever(list(self.selected))
- self.test_count = ''
- self.test_count_width = 3
- else:
- self.tests = iter(self.selected)
- self.test_count = '/{}'.format(len(self.selected))
- self.test_count_width = len(self.test_count) - 1
-
- if self.ns.use_mp:
- from test.libregrtest.runtest_mp import run_tests_multiprocess
- # If we're on windows and this is the parent runner (not a worker),
- # track the load average.
- if sys.platform == 'win32' and self.worker_test_name is None:
- from test.libregrtest.win_utils import WindowsLoadTracker
-
- try:
- self.win_load_tracker = WindowsLoadTracker()
- except PermissionError as error:
- # Standard accounts may not have access to the performance
- # counters.
- print(f'Failed to create WindowsLoadTracker: {error}')
+ def get_state(self):
+ state = self.results.get_state(self.fail_env_changed)
+ if self.first_state:
+ state = f'{self.first_state} then {state}'
+ return state
- try:
- run_tests_multiprocess(self)
- finally:
- if self.win_load_tracker is not None:
- self.win_load_tracker.close()
- self.win_load_tracker = None
- else:
- self.run_tests_sequential()
+ def _run_tests_mp(self, runtests: RunTests, num_workers: int) -> None:
+ from .run_workers import RunWorkers
+ RunWorkers(num_workers, runtests, self.logger, self.results).run()
- def finalize(self):
+ def finalize_tests(self, tracer):
if self.next_single_filename:
if self.next_single_test:
with open(self.next_single_filename, 'w') as fp:
@@ -632,195 +356,156 @@ def finalize(self):
else:
os.unlink(self.next_single_filename)
- if self.tracer:
- r = self.tracer.results()
- r.write_results(show_missing=True, summary=True,
- coverdir=self.ns.coverdir)
-
- print()
- duration = time.monotonic() - self.start_time
- print("Total duration: %s" % format_duration(duration))
- print("Tests result: %s" % self.get_tests_result())
+ if tracer is not None:
+ results = tracer.results()
+ results.write_results(show_missing=True, summary=True,
+ coverdir=self.coverage_dir)
- if self.ns.runleaks:
+ if self.want_run_leaks:
os.system("leaks %d" % os.getpid())
- def save_xml_result(self):
- if not self.ns.xmlpath and not self.testsuite_xml:
- return
-
- import xml.etree.ElementTree as ET
- root = ET.Element("testsuites")
-
- # Manually count the totals for the overall summary
- totals = {'tests': 0, 'errors': 0, 'failures': 0}
- for suite in self.testsuite_xml:
- root.append(suite)
- for k in totals:
- try:
- totals[k] += int(suite.get(k, 0))
- except ValueError:
- pass
-
- for k, v in totals.items():
- root.set(k, str(v))
-
- xmlpath = os.path.join(os_helper.SAVEDCWD, self.ns.xmlpath)
- with open(xmlpath, 'wb') as f:
- for s in ET.tostringlist(root):
- f.write(s)
-
- def fix_umask(self):
- if support.is_emscripten:
- # Emscripten has default umask 0o777, which breaks some tests.
- # see https://github.com/emscripten-core/emscripten/issues/17269
- old_mask = os.umask(0)
- if old_mask == 0o777:
- os.umask(0o027)
- else:
- os.umask(old_mask)
-
- def set_temp_dir(self):
- if self.ns.tempdir:
- self.tmp_dir = self.ns.tempdir
-
- if not self.tmp_dir:
- # When tests are run from the Python build directory, it is best practice
- # to keep the test files in a subfolder. This eases the cleanup of leftover
- # files using the "make distclean" command.
- if sysconfig.is_python_build():
- self.tmp_dir = sysconfig.get_config_var('abs_builddir')
- if self.tmp_dir is None:
- # bpo-30284: On Windows, only srcdir is available. Using
- # abs_builddir mostly matters on UNIX when building Python
- # out of the source tree, especially when the source tree
- # is read only.
- self.tmp_dir = sysconfig.get_config_var('srcdir')
- self.tmp_dir = os.path.join(self.tmp_dir, 'build')
- else:
- self.tmp_dir = tempfile.gettempdir()
-
- self.tmp_dir = os.path.abspath(self.tmp_dir)
+ if self.junit_filename:
+ self.results.write_junit(self.junit_filename)
- def create_temp_dir(self):
- os.makedirs(self.tmp_dir, exist_ok=True)
+ def display_summary(self):
+ duration = time.perf_counter() - self.logger.start_time
+ filtered = bool(self.match_tests) or bool(self.ignore_tests)
- # Define a writable temp dir that will be used as cwd while running
- # the tests. The name of the dir includes the pid to allow parallel
- # testing (see the -j option).
- # Emscripten and WASI have stubbed getpid(), Emscripten has only
- # milisecond clock resolution. Use randint() instead.
- if sys.platform in {"emscripten", "wasi"}:
- nounce = random.randint(0, 1_000_000)
- else:
- nounce = os.getpid()
- if self.worker_test_name is not None:
- test_cwd = 'test_python_worker_{}'.format(nounce)
- else:
- test_cwd = 'test_python_{}'.format(nounce)
- test_cwd += os_helper.FS_NONASCII
- test_cwd = os.path.join(self.tmp_dir, test_cwd)
- return test_cwd
-
- def cleanup(self):
- import glob
-
- path = os.path.join(glob.escape(self.tmp_dir), 'test_python_*')
- print("Cleanup %s directory" % self.tmp_dir)
- for name in glob.glob(path):
- if os.path.isdir(name):
- print("Remove directory: %s" % name)
- os_helper.rmtree(name)
- else:
- print("Remove file: %s" % name)
- os_helper.unlink(name)
+ # Total duration
+ print()
+ print("Total duration: %s" % format_duration(duration))
- def main(self, tests=None, **kwargs):
- self.parse_args(kwargs)
+ self.results.display_summary(self.first_runtests, filtered)
+
+ # Result
+ state = self.get_state()
+ print(f"Result: {state}")
+
+ def create_run_tests(self, tests: TestTuple):
+ return RunTests(
+ tests,
+ fail_fast=self.fail_fast,
+ fail_env_changed=self.fail_env_changed,
+ match_tests=self.match_tests,
+ ignore_tests=self.ignore_tests,
+ match_tests_dict=None,
+ rerun=False,
+ forever=self.forever,
+ pgo=self.pgo,
+ pgo_extended=self.pgo_extended,
+ output_on_failure=self.output_on_failure,
+ timeout=self.timeout,
+ verbose=self.verbose,
+ quiet=self.quiet,
+ hunt_refleak=self.hunt_refleak,
+ test_dir=self.test_dir,
+ use_junit=(self.junit_filename is not None),
+ memory_limit=self.memory_limit,
+ gc_threshold=self.gc_threshold,
+ use_resources=self.use_resources,
+ python_cmd=self.python_cmd,
+ randomize=self.randomize,
+ random_seed=self.random_seed,
+ json_file=None,
+ )
+
+ def _run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
+ if self.hunt_refleak and self.hunt_refleak.warmups < 3:
+ msg = ("WARNING: Running tests with --huntrleaks/-R and "
+ "less than 3 warmup repetitions can give false positives!")
+ print(msg, file=sys.stdout, flush=True)
+
+ if self.num_workers < 0:
+ # Use all CPUs + 2 extra worker processes for tests
+ # that like to sleep
+ self.num_workers = (os.cpu_count() or 1) + 2
- self.set_temp_dir()
+ # For a partial run, we do not need to clutter the output.
+ if (self.want_header
+ or not(self.pgo or self.quiet or self.single_test_run
+ or tests or self.cmdline_args)):
+ display_header()
- self.fix_umask()
+ if self.randomize:
+ print("Using random seed", self.random_seed)
- if self.ns.cleanup:
- self.cleanup()
- sys.exit(0)
+ runtests = self.create_run_tests(selected)
+ self.first_runtests = runtests
+ self.logger.set_tests(runtests)
- test_cwd = self.create_temp_dir()
+ setup_process()
+ self.logger.start_load_tracker()
try:
- # Run the tests in a context manager that temporarily changes the CWD
- # to a temporary and writable directory. If it's not possible to
- # create or change the CWD, the original CWD will be used.
- # The original CWD is available from os_helper.SAVEDCWD.
- with os_helper.temp_cwd(test_cwd, quiet=True):
- # When using multiprocessing, worker processes will use test_cwd
- # as their parent temporary directory. So when the main process
- # exit, it removes also subdirectories of worker processes.
- self.ns.tempdir = test_cwd
-
- self._main(tests, kwargs)
- except SystemExit as exc:
- # bpo-38203: Python can hang at exit in Py_Finalize(), especially
- # on threading._shutdown() call: put a timeout
- if threading_helper.can_start_thread:
- faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True)
-
- sys.exit(exc.code)
+ if self.num_workers:
+ self._run_tests_mp(runtests, self.num_workers)
+ tracer = None
+ else:
+ tracer = self.run_tests_sequentially(runtests)
- def getloadavg(self):
- if self.win_load_tracker is not None:
- return self.win_load_tracker.getloadavg()
+ self.display_result(runtests)
- if hasattr(os, 'getloadavg'):
- return os.getloadavg()[0]
+ if self.want_rerun and self.results.need_rerun():
+ self.rerun_failed_tests(runtests)
+ finally:
+ self.logger.stop_load_tracker()
- return None
+ self.display_summary()
+ self.finalize_tests(tracer)
- def _main(self, tests, kwargs):
- if self.worker_test_name is not None:
- from test.libregrtest.runtest_mp import run_tests_worker
- run_tests_worker(self.ns, self.worker_test_name)
+ return self.results.get_exitcode(self.fail_env_changed,
+ self.fail_rerun)
- if self.ns.wait:
- input("Press any key to continue...")
+ def run_tests(self, selected: TestTuple, tests: TestList | None) -> int:
+ os.makedirs(self.tmp_dir, exist_ok=True)
+ work_dir = get_work_dir(self.tmp_dir)
- support.PGO = self.ns.pgo
- support.PGO_EXTENDED = self.ns.pgo_extended
+ # Put a timeout on Python exit
+ with exit_timeout():
+ # Run the tests in a context manager that temporarily changes the
+ # CWD to a temporary and writable directory. If it's not possible
+ # to create or change the CWD, the original CWD will be used.
+ # The original CWD is available from os_helper.SAVEDCWD.
+ with os_helper.temp_cwd(work_dir, quiet=True):
+ # When using multiprocessing, worker processes will use
+ # work_dir as their parent temporary directory. So when the
+ # main process exit, it removes also subdirectories of worker
+ # processes.
+ return self._run_tests(selected, tests)
- setup_tests(self.ns)
+ def main(self, tests: TestList | None = None):
+ if self.junit_filename and not os.path.isabs(self.junit_filename):
+ self.junit_filename = os.path.abspath(self.junit_filename)
- self.find_tests(tests)
+ strip_py_suffix(self.cmdline_args)
- if self.ns.list_tests:
- self.list_tests()
- sys.exit(0)
+ self.tmp_dir = get_temp_dir(self.tmp_dir)
- if self.ns.list_cases:
- self.list_cases()
+ if self.want_cleanup:
+ cleanup_temp_dir(self.tmp_dir)
sys.exit(0)
- self.run_tests()
- self.display_result()
-
- if self.ns.verbose2 and self.bad:
- self.rerun_failed_tests()
-
- self.finalize()
+ if self.want_wait:
+ input("Press any key to continue...")
- self.save_xml_result()
+ setup_test_dir(self.test_dir)
+ selected, tests = self.find_tests(tests)
+
+ exitcode = 0
+ if self.want_list_tests:
+ self.list_tests(selected)
+ elif self.want_list_cases:
+ list_cases(selected,
+ match_tests=self.match_tests,
+ ignore_tests=self.ignore_tests,
+ test_dir=self.test_dir)
+ else:
+ exitcode = self.run_tests(selected, tests)
- if self.bad:
- sys.exit(EXITCODE_BAD_TEST)
- if self.interrupted:
- sys.exit(EXITCODE_INTERRUPTED)
- if self.ns.fail_env_changed and self.environment_changed:
- sys.exit(EXITCODE_ENV_CHANGED)
- if self.no_tests_run():
- sys.exit(EXITCODE_NO_TESTS_RAN)
- sys.exit(0)
+ sys.exit(exitcode)
def main(tests=None, **kwargs):
"""Run the Python suite."""
- Regrtest().main(tests=tests, **kwargs)
+ ns = _parse_args(sys.argv[1:], **kwargs)
+ Regrtest(ns).main(tests=tests)
diff --git a/Lib/test/libregrtest/mypy.ini b/Lib/test/libregrtest/mypy.ini
new file mode 100644
index 00000000000000..fefc347728a701
--- /dev/null
+++ b/Lib/test/libregrtest/mypy.ini
@@ -0,0 +1,33 @@
+# Config file for running mypy on libregrtest.
+# Run mypy by invoking `mypy --config-file Lib/test/libregrtest/mypy.ini`
+# on the command-line from the repo root
+
+[mypy]
+files = Lib/test/libregrtest
+explicit_package_bases = True
+python_version = 3.11
+platform = linux
+pretty = True
+
+# Enable most stricter settings
+enable_error_code = ignore-without-code
+strict = True
+
+# Various stricter settings that we can't yet enable
+# Try to enable these in the following order:
+disallow_any_generics = False
+disallow_incomplete_defs = False
+disallow_untyped_calls = False
+disallow_untyped_defs = False
+check_untyped_defs = False
+warn_return_any = False
+
+disable_error_code = return
+
+# Enable --strict-optional for these ASAP:
+[mypy-Lib.test.libregrtest.main.*,Lib.test.libregrtest.run_workers.*,Lib.test.libregrtest.worker.*,Lib.test.libregrtest.single.*,Lib.test.libregrtest.results.*,Lib.test.libregrtest.utils.*]
+strict_optional = False
+
+# Various internal modules that typeshed deliberately doesn't have stubs for:
+[mypy-_abc.*,_opcode.*,_overlapped.*,_testcapi.*,_testinternalcapi.*,test.*]
+ignore_missing_imports = True
diff --git a/Lib/test/libregrtest/pgo.py b/Lib/test/libregrtest/pgo.py
index 42ce5fba7a97c3..cabbba73d5eff5 100644
--- a/Lib/test/libregrtest/pgo.py
+++ b/Lib/test/libregrtest/pgo.py
@@ -50,7 +50,7 @@
'test_xml_etree_c',
]
-def setup_pgo_tests(ns):
- if not ns.args and not ns.pgo_extended:
+def setup_pgo_tests(cmdline_args, pgo_extended: bool):
+ if not cmdline_args and not pgo_extended:
# run default set of tests for PGO training
- ns.args = PGO_TESTS[:]
+ cmdline_args[:] = PGO_TESTS[:]
diff --git a/Lib/test/libregrtest/refleak.py b/Lib/test/libregrtest/refleak.py
index cd11d385591f80..ada1a65b867ee6 100644
--- a/Lib/test/libregrtest/refleak.py
+++ b/Lib/test/libregrtest/refleak.py
@@ -1,10 +1,13 @@
-import os
import sys
import warnings
from inspect import isabstract
+from typing import Any
+
from test import support
from test.support import os_helper
-from test.libregrtest.utils import clear_caches
+
+from .runtests import HuntRefleak
+from .utils import clear_caches
try:
from _abc import _get_dump
@@ -19,7 +22,9 @@ def _get_dump(cls):
cls._abc_negative_cache, cls._abc_negative_cache_version)
-def dash_R(ns, test_name, test_func):
+def runtest_refleak(test_name, test_func,
+ hunt_refleak: HuntRefleak,
+ quiet: bool):
"""Run a test multiple times, looking for reference leaks.
Returns:
@@ -41,6 +46,7 @@ def dash_R(ns, test_name, test_func):
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
+ zdc: dict[str, Any] | None
try:
import zipimport
except ImportError:
@@ -62,9 +68,10 @@ def dash_R(ns, test_name, test_func):
def get_pooled_int(value):
return int_pool.setdefault(value, value)
- nwarmup, ntracked, fname = ns.huntrleaks
- fname = os.path.join(os_helper.SAVEDCWD, fname)
- repcount = nwarmup + ntracked
+ warmups = hunt_refleak.warmups
+ runs = hunt_refleak.runs
+ filename = hunt_refleak.filename
+ repcount = warmups + runs
# Pre-allocate to ensure that the loop doesn't allocate anything new
rep_range = list(range(repcount))
@@ -78,16 +85,17 @@ def get_pooled_int(value):
# initialize variables to make pyflakes quiet
rc_before = alloc_before = fd_before = interned_before = 0
- if not ns.quiet:
+ if not quiet:
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
flush=True)
+ results = None
dash_R_cleanup(fs, ps, pic, zdc, abcs)
support.gc_collect()
for i in rep_range:
- test_func()
+ results = test_func()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
support.gc_collect()
@@ -101,7 +109,7 @@ def get_pooled_int(value):
rc_after = gettotalrefcount() - interned_after * 2
fd_after = fd_count()
- if not ns.quiet:
+ if not quiet:
print('.', end='', file=sys.stderr, flush=True)
rc_deltas[i] = get_pooled_int(rc_after - rc_before)
@@ -113,7 +121,7 @@ def get_pooled_int(value):
fd_before = fd_after
interned_before = interned_after
- if not ns.quiet:
+ if not quiet:
print(file=sys.stderr)
# These checkers return False on success, True on failure
@@ -142,16 +150,16 @@ def check_fd_deltas(deltas):
(fd_deltas, 'file descriptors', check_fd_deltas)
]:
# ignore warmup runs
- deltas = deltas[nwarmup:]
+ deltas = deltas[warmups:]
if checker(deltas):
msg = '%s leaked %s %s, sum=%s' % (
test_name, deltas, item_name, sum(deltas))
print(msg, file=sys.stderr, flush=True)
- with open(fname, "a", encoding="utf-8") as refrep:
+ with open(filename, "a", encoding="utf-8") as refrep:
print(msg, file=refrep)
refrep.flush()
failed = True
- return failed
+ return (failed, results)
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
diff --git a/Lib/test/libregrtest/result.py b/Lib/test/libregrtest/result.py
new file mode 100644
index 00000000000000..bf885264657d5c
--- /dev/null
+++ b/Lib/test/libregrtest/result.py
@@ -0,0 +1,184 @@
+import dataclasses
+import json
+from typing import Any
+
+from test.support import TestStats
+
+from .utils import (
+ StrJSON, TestName, FilterTuple,
+ format_duration, normalize_test_name, print_warning)
+
+
+# Avoid enum.Enum to reduce the number of imports when tests are run
+class State:
+ PASSED = "PASSED"
+ FAILED = "FAILED"
+ SKIPPED = "SKIPPED"
+ UNCAUGHT_EXC = "UNCAUGHT_EXC"
+ REFLEAK = "REFLEAK"
+ ENV_CHANGED = "ENV_CHANGED"
+ RESOURCE_DENIED = "RESOURCE_DENIED"
+ INTERRUPTED = "INTERRUPTED"
+ MULTIPROCESSING_ERROR = "MULTIPROCESSING_ERROR"
+ DID_NOT_RUN = "DID_NOT_RUN"
+ TIMEOUT = "TIMEOUT"
+
+ @staticmethod
+ def is_failed(state):
+ return state in {
+ State.FAILED,
+ State.UNCAUGHT_EXC,
+ State.REFLEAK,
+ State.MULTIPROCESSING_ERROR,
+ State.TIMEOUT}
+
+ @staticmethod
+ def has_meaningful_duration(state):
+ # Consider that the duration is meaningless for these cases.
+ # For example, if a whole test file is skipped, its duration
+ # is unlikely to be the duration of executing its tests,
+ # but just the duration to execute code which skips the test.
+ return state not in {
+ State.SKIPPED,
+ State.RESOURCE_DENIED,
+ State.INTERRUPTED,
+ State.MULTIPROCESSING_ERROR,
+ State.DID_NOT_RUN}
+
+ @staticmethod
+ def must_stop(state):
+ return state in {
+ State.INTERRUPTED,
+ State.MULTIPROCESSING_ERROR}
+
+
+@dataclasses.dataclass(slots=True)
+class TestResult:
+ test_name: TestName
+ state: str | None = None
+ # Test duration in seconds
+ duration: float | None = None
+ xml_data: list[str] | None = None
+ stats: TestStats | None = None
+
+ # errors and failures copied from support.TestFailedWithDetails
+ errors: list[tuple[str, str]] | None = None
+ failures: list[tuple[str, str]] | None = None
+
+ def is_failed(self, fail_env_changed: bool) -> bool:
+ if self.state == State.ENV_CHANGED:
+ return fail_env_changed
+ return State.is_failed(self.state)
+
+ def _format_failed(self):
+ if self.errors and self.failures:
+ le = len(self.errors)
+ lf = len(self.failures)
+ error_s = "error" + ("s" if le > 1 else "")
+ failure_s = "failure" + ("s" if lf > 1 else "")
+ return f"{self.test_name} failed ({le} {error_s}, {lf} {failure_s})"
+
+ if self.errors:
+ le = len(self.errors)
+ error_s = "error" + ("s" if le > 1 else "")
+ return f"{self.test_name} failed ({le} {error_s})"
+
+ if self.failures:
+ lf = len(self.failures)
+ failure_s = "failure" + ("s" if lf > 1 else "")
+ return f"{self.test_name} failed ({lf} {failure_s})"
+
+ return f"{self.test_name} failed"
+
+ def __str__(self) -> str:
+ match self.state:
+ case State.PASSED:
+ return f"{self.test_name} passed"
+ case State.FAILED:
+ return self._format_failed()
+ case State.SKIPPED:
+ return f"{self.test_name} skipped"
+ case State.UNCAUGHT_EXC:
+ return f"{self.test_name} failed (uncaught exception)"
+ case State.REFLEAK:
+ return f"{self.test_name} failed (reference leak)"
+ case State.ENV_CHANGED:
+ return f"{self.test_name} failed (env changed)"
+ case State.RESOURCE_DENIED:
+ return f"{self.test_name} skipped (resource denied)"
+ case State.INTERRUPTED:
+ return f"{self.test_name} interrupted"
+ case State.MULTIPROCESSING_ERROR:
+ return f"{self.test_name} process crashed"
+ case State.DID_NOT_RUN:
+ return f"{self.test_name} ran no tests"
+ case State.TIMEOUT:
+ return f"{self.test_name} timed out ({format_duration(self.duration)})"
+ case _:
+ raise ValueError("unknown result state: {state!r}")
+
+ def has_meaningful_duration(self):
+ return State.has_meaningful_duration(self.state)
+
+ def set_env_changed(self):
+ if self.state is None or self.state == State.PASSED:
+ self.state = State.ENV_CHANGED
+
+ def must_stop(self, fail_fast: bool, fail_env_changed: bool) -> bool:
+ if State.must_stop(self.state):
+ return True
+ if fail_fast and self.is_failed(fail_env_changed):
+ return True
+ return False
+
+ def get_rerun_match_tests(self) -> FilterTuple | None:
+ match_tests = []
+
+ errors = self.errors or []
+ failures = self.failures or []
+ for error_list, is_error in (
+ (errors, True),
+ (failures, False),
+ ):
+ for full_name, *_ in error_list:
+ match_name = normalize_test_name(full_name, is_error=is_error)
+ if match_name is None:
+ # 'setUpModule (test.test_sys)': don't filter tests
+ return None
+ if not match_name:
+ error_type = "ERROR" if is_error else "FAIL"
+ print_warning(f"rerun failed to parse {error_type} test name: "
+ f"{full_name!r}: don't filter tests")
+ return None
+ match_tests.append(match_name)
+
+ if not match_tests:
+ return None
+ return tuple(match_tests)
+
+ def write_json_into(self, file) -> None:
+ json.dump(self, file, cls=_EncodeTestResult)
+
+ @staticmethod
+ def from_json(worker_json: StrJSON) -> 'TestResult':
+ return json.loads(worker_json, object_hook=_decode_test_result)
+
+
+class _EncodeTestResult(json.JSONEncoder):
+ def default(self, o: Any) -> dict[str, Any]:
+ if isinstance(o, TestResult):
+ result = dataclasses.asdict(o)
+ result["__test_result__"] = o.__class__.__name__
+ return result
+ else:
+ return super().default(o)
+
+
+def _decode_test_result(data: dict[str, Any]) -> TestResult | dict[str, Any]:
+ if "__test_result__" in data:
+ data.pop('__test_result__')
+ if data['stats'] is not None:
+ data['stats'] = TestStats(**data['stats'])
+ return TestResult(**data)
+ else:
+ return data
diff --git a/Lib/test/libregrtest/results.py b/Lib/test/libregrtest/results.py
new file mode 100644
index 00000000000000..6e7d65880f7347
--- /dev/null
+++ b/Lib/test/libregrtest/results.py
@@ -0,0 +1,258 @@
+import sys
+from test.support import TestStats
+
+from .runtests import RunTests
+from .result import State, TestResult
+from .utils import (
+ StrPath, TestName, TestTuple, TestList, FilterDict,
+ printlist, count, format_duration)
+
+
+EXITCODE_BAD_TEST = 2
+EXITCODE_ENV_CHANGED = 3
+EXITCODE_NO_TESTS_RAN = 4
+EXITCODE_RERUN_FAIL = 5
+EXITCODE_INTERRUPTED = 130
+
+
+class TestResults:
+ def __init__(self):
+ self.bad: TestList = []
+ self.good: TestList = []
+ self.rerun_bad: TestList = []
+ self.skipped: TestList = []
+ self.resource_denied: TestList = []
+ self.env_changed: TestList = []
+ self.run_no_tests: TestList = []
+ self.rerun: TestList = []
+ self.bad_results: list[TestResult] = []
+
+ self.interrupted: bool = False
+ self.test_times: list[tuple[float, TestName]] = []
+ self.stats = TestStats()
+ # used by --junit-xml
+ self.testsuite_xml: list[str] = []
+
+ def get_executed(self):
+ return (set(self.good) | set(self.bad) | set(self.skipped)
+ | set(self.resource_denied) | set(self.env_changed)
+ | set(self.run_no_tests))
+
+ def no_tests_run(self):
+ return not any((self.good, self.bad, self.skipped, self.interrupted,
+ self.env_changed))
+
+ def get_state(self, fail_env_changed):
+ state = []
+ if self.bad:
+ state.append("FAILURE")
+ elif fail_env_changed and self.env_changed:
+ state.append("ENV CHANGED")
+ elif self.no_tests_run():
+ state.append("NO TESTS RAN")
+
+ if self.interrupted:
+ state.append("INTERRUPTED")
+ if not state:
+ state.append("SUCCESS")
+
+ return ', '.join(state)
+
+ def get_exitcode(self, fail_env_changed, fail_rerun):
+ exitcode = 0
+ if self.bad:
+ exitcode = EXITCODE_BAD_TEST
+ elif self.interrupted:
+ exitcode = EXITCODE_INTERRUPTED
+ elif fail_env_changed and self.env_changed:
+ exitcode = EXITCODE_ENV_CHANGED
+ elif self.no_tests_run():
+ exitcode = EXITCODE_NO_TESTS_RAN
+ elif fail_rerun and self.rerun:
+ exitcode = EXITCODE_RERUN_FAIL
+ return exitcode
+
+ def accumulate_result(self, result: TestResult, runtests: RunTests):
+ test_name = result.test_name
+ rerun = runtests.rerun
+ fail_env_changed = runtests.fail_env_changed
+
+ match result.state:
+ case State.PASSED:
+ self.good.append(test_name)
+ case State.ENV_CHANGED:
+ self.env_changed.append(test_name)
+ case State.SKIPPED:
+ self.skipped.append(test_name)
+ case State.RESOURCE_DENIED:
+ self.resource_denied.append(test_name)
+ case State.INTERRUPTED:
+ self.interrupted = True
+ case State.DID_NOT_RUN:
+ self.run_no_tests.append(test_name)
+ case _:
+ if result.is_failed(fail_env_changed):
+ self.bad.append(test_name)
+ self.bad_results.append(result)
+ else:
+ raise ValueError(f"invalid test state: {result.state!r}")
+
+ if result.has_meaningful_duration() and not rerun:
+ self.test_times.append((result.duration, test_name))
+ if result.stats is not None:
+ self.stats.accumulate(result.stats)
+ if rerun:
+ self.rerun.append(test_name)
+
+ xml_data = result.xml_data
+ if xml_data:
+ self.add_junit(xml_data)
+
+ def need_rerun(self):
+ return bool(self.bad_results)
+
+ def prepare_rerun(self) -> tuple[TestTuple, FilterDict]:
+ tests: TestList = []
+ match_tests_dict = {}
+ for result in self.bad_results:
+ tests.append(result.test_name)
+
+ match_tests = result.get_rerun_match_tests()
+ # ignore empty match list
+ if match_tests:
+ match_tests_dict[result.test_name] = match_tests
+
+ # Clear previously failed tests
+ self.rerun_bad.extend(self.bad)
+ self.bad.clear()
+ self.bad_results.clear()
+
+ return (tuple(tests), match_tests_dict)
+
+ def add_junit(self, xml_data: list[str]):
+ import xml.etree.ElementTree as ET
+ for e in xml_data:
+ try:
+ self.testsuite_xml.append(ET.fromstring(e))
+ except ET.ParseError:
+ print(xml_data, file=sys.__stderr__)
+ raise
+
+ def write_junit(self, filename: StrPath):
+ if not self.testsuite_xml:
+ # Don't create empty XML file
+ return
+
+ import xml.etree.ElementTree as ET
+ root = ET.Element("testsuites")
+
+ # Manually count the totals for the overall summary
+ totals = {'tests': 0, 'errors': 0, 'failures': 0}
+ for suite in self.testsuite_xml:
+ root.append(suite)
+ for k in totals:
+ try:
+ totals[k] += int(suite.get(k, 0))
+ except ValueError:
+ pass
+
+ for k, v in totals.items():
+ root.set(k, str(v))
+
+ with open(filename, 'wb') as f:
+ for s in ET.tostringlist(root):
+ f.write(s)
+
+ def display_result(self, tests: TestTuple, quiet: bool, print_slowest: bool):
+ if self.interrupted:
+ print("Test suite interrupted by signal SIGINT.")
+
+ omitted = set(tests) - self.get_executed()
+ if omitted:
+ print()
+ print(count(len(omitted), "test"), "omitted:")
+ printlist(omitted)
+
+ if self.good and not quiet:
+ print()
+ if (not self.bad
+ and not self.skipped
+ and not self.interrupted
+ and len(self.good) > 1):
+ print("All", end=' ')
+ print(count(len(self.good), "test"), "OK.")
+
+ if print_slowest:
+ self.test_times.sort(reverse=True)
+ print()
+ print("10 slowest tests:")
+ for test_time, test in self.test_times[:10]:
+ print("- %s: %s" % (test, format_duration(test_time)))
+
+ if self.bad:
+ print()
+ print(count(len(self.bad), "test"), "failed:")
+ printlist(self.bad)
+
+ if self.env_changed:
+ print()
+ print("{} altered the execution environment:".format(
+ count(len(self.env_changed), "test")))
+ printlist(self.env_changed)
+
+ if self.skipped and not quiet:
+ print()
+ print(count(len(self.skipped), "test"), "skipped:")
+ printlist(self.skipped)
+
+ if self.resource_denied and not quiet:
+ print()
+ print(count(len(self.resource_denied), "test"), "skipped (resource denied):")
+ printlist(self.resource_denied)
+
+ if self.rerun:
+ print()
+ print("%s:" % count(len(self.rerun), "re-run test"))
+ printlist(self.rerun)
+
+ if self.run_no_tests:
+ print()
+ print(count(len(self.run_no_tests), "test"), "run no tests:")
+ printlist(self.run_no_tests)
+
+ def display_summary(self, first_runtests: RunTests, filtered: bool):
+ # Total tests
+ stats = self.stats
+ text = f'run={stats.tests_run:,}'
+ if filtered:
+ text = f"{text} (filtered)"
+ report = [text]
+ if stats.failures:
+ report.append(f'failures={stats.failures:,}')
+ if stats.skipped:
+ report.append(f'skipped={stats.skipped:,}')
+ print(f"Total tests: {' '.join(report)}")
+
+ # Total test files
+ all_tests = [self.good, self.bad, self.rerun,
+ self.skipped,
+ self.env_changed, self.run_no_tests]
+ run = sum(map(len, all_tests))
+ text = f'run={run}'
+ if not first_runtests.forever:
+ ntest = len(first_runtests.tests)
+ text = f"{text}/{ntest}"
+ if filtered:
+ text = f"{text} (filtered)"
+ report = [text]
+ for name, tests in (
+ ('failed', self.bad),
+ ('env_changed', self.env_changed),
+ ('skipped', self.skipped),
+ ('resource_denied', self.resource_denied),
+ ('rerun', self.rerun),
+ ('run_no_tests', self.run_no_tests),
+ ):
+ if tests:
+ report.append(f'{name}={len(tests)}')
+ print(f"Total test files: {' '.join(report)}")
diff --git a/Lib/test/libregrtest/run_workers.py b/Lib/test/libregrtest/run_workers.py
new file mode 100644
index 00000000000000..89cc50b7c158d2
--- /dev/null
+++ b/Lib/test/libregrtest/run_workers.py
@@ -0,0 +1,584 @@
+import contextlib
+import dataclasses
+import faulthandler
+import os.path
+import queue
+import signal
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+import traceback
+from typing import Literal, TextIO
+
+from test import support
+from test.support import os_helper
+
+from .logger import Logger
+from .result import TestResult, State
+from .results import TestResults
+from .runtests import RunTests, JsonFile, JsonFileType
+from .single import PROGRESS_MIN_TIME
+from .utils import (
+ StrPath, TestName, MS_WINDOWS,
+ format_duration, print_warning, count, plural)
+from .worker import create_worker_process, USE_PROCESS_GROUP
+
+if MS_WINDOWS:
+ import locale
+ import msvcrt
+
+
+
+# Display the running tests if nothing happened last N seconds
+PROGRESS_UPDATE = 30.0 # seconds
+assert PROGRESS_UPDATE >= PROGRESS_MIN_TIME
+
+# Kill the main process after 5 minutes. It is supposed to write an update
+# every PROGRESS_UPDATE seconds. Tolerate 5 minutes for Python slowest
+# buildbot workers.
+MAIN_PROCESS_TIMEOUT = 5 * 60.0
+assert MAIN_PROCESS_TIMEOUT >= PROGRESS_UPDATE
+
+# Time to wait until a worker completes: should be immediate
+JOIN_TIMEOUT = 30.0 # seconds
+
+
+# We do not use a generator so multiple threads can call next().
+class MultiprocessIterator:
+
+ """A thread-safe iterator over tests for multiprocess mode."""
+
+ def __init__(self, tests_iter):
+ self.lock = threading.Lock()
+ self.tests_iter = tests_iter
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ with self.lock:
+ if self.tests_iter is None:
+ raise StopIteration
+ return next(self.tests_iter)
+
+ def stop(self):
+ with self.lock:
+ self.tests_iter = None
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class MultiprocessResult:
+ result: TestResult
+ # bpo-45410: stderr is written into stdout to keep messages order
+ worker_stdout: str | None = None
+ err_msg: str | None = None
+
+
+ExcStr = str
+QueueOutput = tuple[Literal[False], MultiprocessResult] | tuple[Literal[True], ExcStr]
+
+
+class ExitThread(Exception):
+ pass
+
+
+class WorkerError(Exception):
+ def __init__(self,
+ test_name: TestName,
+ err_msg: str | None,
+ stdout: str | None,
+ state: str = State.MULTIPROCESSING_ERROR):
+ result = TestResult(test_name, state=state)
+ self.mp_result = MultiprocessResult(result, stdout, err_msg)
+ super().__init__()
+
+
+class WorkerThread(threading.Thread):
+ def __init__(self, worker_id: int, runner: "RunWorkers") -> None:
+ super().__init__()
+ self.worker_id = worker_id
+ self.runtests = runner.runtests
+ self.pending = runner.pending
+ self.output = runner.output
+ self.timeout = runner.worker_timeout
+ self.log = runner.log
+ self.test_name: TestName | None = None
+ self.start_time: float | None = None
+ self._popen: subprocess.Popen[str] | None = None
+ self._killed = False
+ self._stopped = False
+
+ def __repr__(self) -> str:
+ info = [f'WorkerThread #{self.worker_id}']
+ if self.is_alive():
+ info.append("running")
+ else:
+ info.append('stopped')
+ test = self.test_name
+ if test:
+ info.append(f'test={test}')
+ popen = self._popen
+ if popen is not None:
+ dt = time.monotonic() - self.start_time
+ info.extend((f'pid={self._popen.pid}',
+ f'time={format_duration(dt)}'))
+ return '<%s>' % ' '.join(info)
+
+ def _kill(self) -> None:
+ popen = self._popen
+ if popen is None:
+ return
+
+ if self._killed:
+ return
+ self._killed = True
+
+ if USE_PROCESS_GROUP:
+ what = f"{self} process group"
+ else:
+ what = f"{self}"
+
+ print(f"Kill {what}", file=sys.stderr, flush=True)
+ try:
+ if USE_PROCESS_GROUP:
+ os.killpg(popen.pid, signal.SIGKILL)
+ else:
+ popen.kill()
+ except ProcessLookupError:
+ # popen.kill(): the process completed, the WorkerThread thread
+ # read its exit status, but Popen.send_signal() read the returncode
+ # just before Popen.wait() set returncode.
+ pass
+ except OSError as exc:
+ print_warning(f"Failed to kill {what}: {exc!r}")
+
+ def stop(self) -> None:
+ # Method called from a different thread to stop this thread
+ self._stopped = True
+ self._kill()
+
+ def _run_process(self, runtests: RunTests, output_fd: int,
+ tmp_dir: StrPath | None = None) -> int | None:
+ popen = create_worker_process(runtests, output_fd, tmp_dir)
+ self._popen = popen
+ self._killed = False
+
+ try:
+ if self._stopped:
+ # If kill() has been called before self._popen is set,
+ # self._popen is still running. Call again kill()
+ # to ensure that the process is killed.
+ self._kill()
+ raise ExitThread
+
+ try:
+ # gh-94026: stdout+stderr are written to tempfile
+ retcode = popen.wait(timeout=self.timeout)
+ assert retcode is not None
+ return retcode
+ except subprocess.TimeoutExpired:
+ if self._stopped:
+ # kill() has been called: communicate() fails on reading
+ # closed stdout
+ raise ExitThread
+
+ # On timeout, kill the process
+ self._kill()
+
+ # None means TIMEOUT for the caller
+ retcode = None
+ # bpo-38207: Don't attempt to call communicate() again: on it
+ # can hang until all child processes using stdout
+ # pipes completes.
+ except OSError:
+ if self._stopped:
+ # kill() has been called: communicate() fails
+ # on reading closed stdout
+ raise ExitThread
+ raise
+ except:
+ self._kill()
+ raise
+ finally:
+ self._wait_completed()
+ self._popen = None
+
+ def create_stdout(self, stack: contextlib.ExitStack) -> TextIO:
+ """Create stdout temporay file (file descriptor)."""
+
+ if MS_WINDOWS:
+ # gh-95027: When stdout is not a TTY, Python uses the ANSI code
+ # page for the sys.stdout encoding. If the main process runs in a
+ # terminal, sys.stdout uses WindowsConsoleIO with UTF-8 encoding.
+ encoding = locale.getencoding()
+ else:
+ encoding = sys.stdout.encoding
+
+ # gh-94026: Write stdout+stderr to a tempfile as workaround for
+ # non-blocking pipes on Emscripten with NodeJS.
+ # gh-109425: Use "backslashreplace" error handler: log corrupted
+ # stdout+stderr, instead of failing with a UnicodeDecodeError and not
+ # logging stdout+stderr at all.
+ stdout_file = tempfile.TemporaryFile('w+',
+ encoding=encoding,
+ errors='backslashreplace')
+ stack.enter_context(stdout_file)
+ return stdout_file
+
+ def create_json_file(self, stack: contextlib.ExitStack) -> tuple[JsonFile, TextIO | None]:
+ """Create JSON file."""
+
+ json_file_use_stdout = self.runtests.json_file_use_stdout()
+ if json_file_use_stdout:
+ json_file = JsonFile(None, JsonFileType.STDOUT)
+ json_tmpfile = None
+ else:
+ json_tmpfile = tempfile.TemporaryFile('w+', encoding='utf8')
+ stack.enter_context(json_tmpfile)
+
+ json_fd = json_tmpfile.fileno()
+ if MS_WINDOWS:
+ json_handle = msvcrt.get_osfhandle(json_fd)
+ json_file = JsonFile(json_handle,
+ JsonFileType.WINDOWS_HANDLE)
+ else:
+ json_file = JsonFile(json_fd, JsonFileType.UNIX_FD)
+ return (json_file, json_tmpfile)
+
+ def create_worker_runtests(self, test_name: TestName, json_file: JsonFile) -> RunTests:
+ """Create the worker RunTests."""
+
+ tests = (test_name,)
+ if self.runtests.rerun:
+ match_tests = self.runtests.get_match_tests(test_name)
+ else:
+ match_tests = None
+
+ kwargs = {}
+ if match_tests:
+ kwargs['match_tests'] = match_tests
+ return self.runtests.copy(
+ tests=tests,
+ json_file=json_file,
+ **kwargs)
+
+ def run_tmp_files(self, worker_runtests: RunTests,
+ stdout_fd: int) -> tuple[int | None, list[StrPath]]:
+ # gh-93353: Check for leaked temporary files in the parent process,
+ # since the deletion of temporary files can happen late during
+ # Python finalization: too late for libregrtest.
+ if not support.is_wasi:
+ # Don't check for leaked temporary files and directories if Python is
+ # run on WASI. WASI don't pass environment variables like TMPDIR to
+ # worker processes.
+ tmp_dir = tempfile.mkdtemp(prefix="test_python_")
+ tmp_dir = os.path.abspath(tmp_dir)
+ try:
+ retcode = self._run_process(worker_runtests,
+ stdout_fd, tmp_dir)
+ finally:
+ tmp_files = os.listdir(tmp_dir)
+ os_helper.rmtree(tmp_dir)
+ else:
+ retcode = self._run_process(worker_runtests, stdout_fd)
+ tmp_files = []
+
+ return (retcode, tmp_files)
+
+ def read_stdout(self, stdout_file: TextIO) -> str:
+ stdout_file.seek(0)
+ try:
+ return stdout_file.read().strip()
+ except Exception as exc:
+ # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+ # decoded from encoding
+ raise WorkerError(self.test_name,
+ f"Cannot read process stdout: {exc}", None)
+
+ def read_json(self, json_file: JsonFile, json_tmpfile: TextIO | None,
+ stdout: str) -> tuple[TestResult, str]:
+ try:
+ if json_tmpfile is not None:
+ json_tmpfile.seek(0)
+ worker_json = json_tmpfile.read()
+ elif json_file.file_type == JsonFileType.STDOUT:
+ stdout, _, worker_json = stdout.rpartition("\n")
+ stdout = stdout.rstrip()
+ else:
+ with json_file.open(encoding='utf8') as json_fp:
+ worker_json = json_fp.read()
+ except Exception as exc:
+ # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+ # decoded from encoding
+ err_msg = f"Failed to read worker process JSON: {exc}"
+ raise WorkerError(self.test_name, err_msg, stdout,
+ state=State.MULTIPROCESSING_ERROR)
+
+ if not worker_json:
+ raise WorkerError(self.test_name, "empty JSON", stdout)
+
+ try:
+ result = TestResult.from_json(worker_json)
+ except Exception as exc:
+ # gh-101634: Catch UnicodeDecodeError if stdout cannot be
+ # decoded from encoding
+ err_msg = f"Failed to parse worker process JSON: {exc}"
+ raise WorkerError(self.test_name, err_msg, stdout,
+ state=State.MULTIPROCESSING_ERROR)
+
+ return (result, stdout)
+
+ def _runtest(self, test_name: TestName) -> MultiprocessResult:
+ with contextlib.ExitStack() as stack:
+ stdout_file = self.create_stdout(stack)
+ json_file, json_tmpfile = self.create_json_file(stack)
+ worker_runtests = self.create_worker_runtests(test_name, json_file)
+
+ retcode, tmp_files = self.run_tmp_files(worker_runtests,
+ stdout_file.fileno())
+
+ stdout = self.read_stdout(stdout_file)
+
+ if retcode is None:
+ raise WorkerError(self.test_name, None, stdout, state=State.TIMEOUT)
+ if retcode != 0:
+ raise WorkerError(self.test_name, f"Exit code {retcode}", stdout)
+
+ result, stdout = self.read_json(json_file, json_tmpfile, stdout)
+
+ if tmp_files:
+ msg = (f'\n\n'
+ f'Warning -- {test_name} leaked temporary files '
+ f'({len(tmp_files)}): {", ".join(sorted(tmp_files))}')
+ stdout += msg
+ result.set_env_changed()
+
+ return MultiprocessResult(result, stdout)
+
+ def run(self) -> None:
+ fail_fast = self.runtests.fail_fast
+ fail_env_changed = self.runtests.fail_env_changed
+ while not self._stopped:
+ try:
+ try:
+ test_name = next(self.pending)
+ except StopIteration:
+ break
+
+ self.start_time = time.monotonic()
+ self.test_name = test_name
+ try:
+ mp_result = self._runtest(test_name)
+ except WorkerError as exc:
+ mp_result = exc.mp_result
+ finally:
+ self.test_name = None
+ mp_result.result.duration = time.monotonic() - self.start_time
+ self.output.put((False, mp_result))
+
+ if mp_result.result.must_stop(fail_fast, fail_env_changed):
+ break
+ except ExitThread:
+ break
+ except BaseException:
+ self.output.put((True, traceback.format_exc()))
+ break
+
+ def _wait_completed(self) -> None:
+ popen = self._popen
+
+ try:
+ popen.wait(JOIN_TIMEOUT)
+ except (subprocess.TimeoutExpired, OSError) as exc:
+ print_warning(f"Failed to wait for {self} completion "
+ f"(timeout={format_duration(JOIN_TIMEOUT)}): "
+ f"{exc!r}")
+
+ def wait_stopped(self, start_time: float) -> None:
+ # bpo-38207: RunWorkers.stop_workers() called self.stop()
+ # which killed the process. Sometimes, killing the process from the
+ # main thread does not interrupt popen.communicate() in
+ # WorkerThread thread. This loop with a timeout is a workaround
+ # for that.
+ #
+ # Moreover, if this method fails to join the thread, it is likely
+ # that Python will hang at exit while calling threading._shutdown()
+ # which tries again to join the blocked thread. Regrtest.main()
+ # uses EXIT_TIMEOUT to workaround this second bug.
+ while True:
+ # Write a message every second
+ self.join(1.0)
+ if not self.is_alive():
+ break
+ dt = time.monotonic() - start_time
+ self.log(f"Waiting for {self} thread for {format_duration(dt)}")
+ if dt > JOIN_TIMEOUT:
+ print_warning(f"Failed to join {self} in {format_duration(dt)}")
+ break
+
+
+def get_running(workers: list[WorkerThread]) -> str | None:
+ running: list[str] = []
+ for worker in workers:
+ test_name = worker.test_name
+ if not test_name:
+ continue
+ dt = time.monotonic() - worker.start_time
+ if dt >= PROGRESS_MIN_TIME:
+ text = f'{test_name} ({format_duration(dt)})'
+ running.append(text)
+ if not running:
+ return None
+ return f"running ({len(running)}): {', '.join(running)}"
+
+
+class RunWorkers:
+ def __init__(self, num_workers: int, runtests: RunTests,
+ logger: Logger, results: TestResults) -> None:
+ self.num_workers = num_workers
+ self.runtests = runtests
+ self.log = logger.log
+ self.display_progress = logger.display_progress
+ self.results: TestResults = results
+
+ self.output: queue.Queue[QueueOutput] = queue.Queue()
+ tests_iter = runtests.iter_tests()
+ self.pending = MultiprocessIterator(tests_iter)
+ self.timeout = runtests.timeout
+ if self.timeout is not None:
+ # Rely on faulthandler to kill a worker process. This timouet is
+ # when faulthandler fails to kill a worker process. Give a maximum
+ # of 5 minutes to faulthandler to kill the worker.
+ self.worker_timeout: float | None = min(self.timeout * 1.5, self.timeout + 5 * 60)
+ else:
+ self.worker_timeout = None
+ self.workers: list[WorkerThread] | None = None
+
+ jobs = self.runtests.get_jobs()
+ if jobs is not None:
+ # Don't spawn more threads than the number of jobs:
+ # these worker threads would never get anything to do.
+ self.num_workers = min(self.num_workers, jobs)
+
+ def start_workers(self) -> None:
+ self.workers = [WorkerThread(index, self)
+ for index in range(1, self.num_workers + 1)]
+ jobs = self.runtests.get_jobs()
+ if jobs is not None:
+ tests = count(jobs, 'test')
+ else:
+ tests = 'tests'
+ nworkers = len(self.workers)
+ processes = plural(nworkers, "process", "processes")
+ msg = (f"Run {tests} in parallel using "
+ f"{nworkers} worker {processes}")
+ if self.timeout:
+ msg += (" (timeout: %s, worker timeout: %s)"
+ % (format_duration(self.timeout),
+ format_duration(self.worker_timeout)))
+ self.log(msg)
+ for worker in self.workers:
+ worker.start()
+
+ def stop_workers(self) -> None:
+ start_time = time.monotonic()
+ for worker in self.workers:
+ worker.stop()
+ for worker in self.workers:
+ worker.wait_stopped(start_time)
+
+ def _get_result(self) -> QueueOutput | None:
+ pgo = self.runtests.pgo
+ use_faulthandler = (self.timeout is not None)
+
+ # bpo-46205: check the status of workers every iteration to avoid
+ # waiting forever on an empty queue.
+ while any(worker.is_alive() for worker in self.workers):
+ if use_faulthandler:
+ faulthandler.dump_traceback_later(MAIN_PROCESS_TIMEOUT,
+ exit=True)
+
+ # wait for a thread
+ try:
+ return self.output.get(timeout=PROGRESS_UPDATE)
+ except queue.Empty:
+ pass
+
+ if not pgo:
+ # display progress
+ running = get_running(self.workers)
+ if running:
+ self.log(running)
+
+ # all worker threads are done: consume pending results
+ try:
+ return self.output.get(timeout=0)
+ except queue.Empty:
+ return None
+
+ def display_result(self, mp_result: MultiprocessResult) -> None:
+ result = mp_result.result
+ pgo = self.runtests.pgo
+
+ text = str(result)
+ if mp_result.err_msg:
+ # MULTIPROCESSING_ERROR
+ text += ' (%s)' % mp_result.err_msg
+ elif (result.duration >= PROGRESS_MIN_TIME and not pgo):
+ text += ' (%s)' % format_duration(result.duration)
+ if not pgo:
+ running = get_running(self.workers)
+ if running:
+ text += f' -- {running}'
+ self.display_progress(self.test_index, text)
+
+ def _process_result(self, item: QueueOutput) -> TestResult:
+ """Returns True if test runner must stop."""
+ if item[0]:
+ # Thread got an exception
+ format_exc = item[1]
+ print_warning(f"regrtest worker thread failed: {format_exc}")
+ result = TestResult("", state=State.MULTIPROCESSING_ERROR)
+ self.results.accumulate_result(result, self.runtests)
+ return result
+
+ self.test_index += 1
+ mp_result = item[1]
+ result = mp_result.result
+ self.results.accumulate_result(result, self.runtests)
+ self.display_result(mp_result)
+
+ if mp_result.worker_stdout:
+ print(mp_result.worker_stdout, flush=True)
+
+ return result
+
+ def run(self) -> None:
+ fail_fast = self.runtests.fail_fast
+ fail_env_changed = self.runtests.fail_env_changed
+
+ self.start_workers()
+
+ self.test_index = 0
+ try:
+ while True:
+ item = self._get_result()
+ if item is None:
+ break
+
+ result = self._process_result(item)
+ if result.must_stop(fail_fast, fail_env_changed):
+ break
+ except KeyboardInterrupt:
+ print()
+ self.results.interrupted = True
+ finally:
+ if self.timeout is not None:
+ faulthandler.cancel_dump_traceback_later()
+
+ # Always ensure that all worker processes are no longer
+ # worker when we exit this function
+ self.pending.stop()
+ self.stop_workers()
diff --git a/Lib/test/libregrtest/runtest.py b/Lib/test/libregrtest/runtest.py
deleted file mode 100644
index fd49927679bdea..00000000000000
--- a/Lib/test/libregrtest/runtest.py
+++ /dev/null
@@ -1,446 +0,0 @@
-import faulthandler
-import functools
-import gc
-import importlib
-import io
-import os
-import sys
-import time
-import traceback
-import unittest
-
-from test import support
-from test.support import os_helper
-from test.support import threading_helper
-from test.libregrtest.cmdline import Namespace
-from test.libregrtest.save_env import saved_test_environment
-from test.libregrtest.utils import clear_caches, format_duration, print_warning
-
-
-class TestResult:
- def __init__(
- self,
- name: str,
- duration_sec: float = 0.0,
- xml_data: list[str] | None = None,
- ) -> None:
- self.name = name
- self.duration_sec = duration_sec
- self.xml_data = xml_data
-
- def __str__(self) -> str:
- return f"{self.name} finished"
-
-
-class Passed(TestResult):
- def __str__(self) -> str:
- return f"{self.name} passed"
-
-
-class Failed(TestResult):
- def __init__(
- self,
- name: str,
- duration_sec: float = 0.0,
- xml_data: list[str] | None = None,
- errors: list[tuple[str, str]] | None = None,
- failures: list[tuple[str, str]] | None = None,
- ) -> None:
- super().__init__(name, duration_sec=duration_sec, xml_data=xml_data)
- self.errors = errors
- self.failures = failures
-
- def __str__(self) -> str:
- if self.errors and self.failures:
- le = len(self.errors)
- lf = len(self.failures)
- error_s = "error" + ("s" if le > 1 else "")
- failure_s = "failure" + ("s" if lf > 1 else "")
- return f"{self.name} failed ({le} {error_s}, {lf} {failure_s})"
-
- if self.errors:
- le = len(self.errors)
- error_s = "error" + ("s" if le > 1 else "")
- return f"{self.name} failed ({le} {error_s})"
-
- if self.failures:
- lf = len(self.failures)
- failure_s = "failure" + ("s" if lf > 1 else "")
- return f"{self.name} failed ({lf} {failure_s})"
-
- return f"{self.name} failed"
-
-
-class UncaughtException(Failed):
- def __str__(self) -> str:
- return f"{self.name} failed (uncaught exception)"
-
-
-class EnvChanged(Failed):
- def __str__(self) -> str:
- return f"{self.name} failed (env changed)"
-
- # Convert Passed to EnvChanged
- @staticmethod
- def from_passed(other):
- return EnvChanged(other.name, other.duration_sec, other.xml_data)
-
-
-class RefLeak(Failed):
- def __str__(self) -> str:
- return f"{self.name} failed (reference leak)"
-
-
-class Skipped(TestResult):
- def __str__(self) -> str:
- return f"{self.name} skipped"
-
-
-class ResourceDenied(Skipped):
- def __str__(self) -> str:
- return f"{self.name} skipped (resource denied)"
-
-
-class Interrupted(TestResult):
- def __str__(self) -> str:
- return f"{self.name} interrupted"
-
-
-class ChildError(Failed):
- def __str__(self) -> str:
- return f"{self.name} crashed"
-
-
-class DidNotRun(TestResult):
- def __str__(self) -> str:
- return f"{self.name} ran no tests"
-
-
-class Timeout(Failed):
- def __str__(self) -> str:
- return f"{self.name} timed out ({format_duration(self.duration_sec)})"
-
-
-# Minimum duration of a test to display its duration or to mention that
-# the test is running in background
-PROGRESS_MIN_TIME = 30.0 # seconds
-
-#If these test directories are encountered recurse into them and treat each
-# test_ .py or dir as a separate test module. This can increase parallelism.
-# Beware this can't generally be done for any directory with sub-tests as the
-# __init__.py may do things which alter what tests are to be run.
-
-SPLITTESTDIRS = {
- "test_asyncio",
- "test_concurrent_futures",
- "test_multiprocessing_fork",
- "test_multiprocessing_forkserver",
- "test_multiprocessing_spawn",
-}
-
-# Storage of uncollectable objects
-FOUND_GARBAGE = []
-
-
-def is_failed(result: TestResult, ns: Namespace) -> bool:
- if isinstance(result, EnvChanged):
- return ns.fail_env_changed
- return isinstance(result, Failed)
-
-
-def findtestdir(path=None):
- return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
-
-
-def findtests(*, testdir=None, exclude=(),
- split_test_dirs=SPLITTESTDIRS, base_mod=""):
- """Return a list of all applicable test modules."""
- testdir = findtestdir(testdir)
- tests = []
- for name in os.listdir(testdir):
- mod, ext = os.path.splitext(name)
- if (not mod.startswith("test_")) or (mod in exclude):
- continue
- if mod in split_test_dirs:
- subdir = os.path.join(testdir, mod)
- mod = f"{base_mod or 'test'}.{mod}"
- tests.extend(findtests(testdir=subdir, exclude=exclude,
- split_test_dirs=split_test_dirs, base_mod=mod))
- elif ext in (".py", ""):
- tests.append(f"{base_mod}.{mod}" if base_mod else mod)
- return sorted(tests)
-
-
-def split_test_packages(tests, *, testdir=None, exclude=(),
- split_test_dirs=SPLITTESTDIRS):
- testdir = findtestdir(testdir)
- splitted = []
- for name in tests:
- if name in split_test_dirs:
- subdir = os.path.join(testdir, name)
- splitted.extend(findtests(testdir=subdir, exclude=exclude,
- split_test_dirs=split_test_dirs,
- base_mod=name))
- else:
- splitted.append(name)
- return splitted
-
-
-def get_abs_module(ns: Namespace, test_name: str) -> str:
- if test_name.startswith('test.') or ns.testdir:
- return test_name
- else:
- # Import it from the test package
- return 'test.' + test_name
-
-
-def _runtest(ns: Namespace, test_name: str) -> TestResult:
- # Handle faulthandler timeout, capture stdout+stderr, XML serialization
- # and measure time.
-
- output_on_failure = ns.verbose3
-
- use_timeout = (
- ns.timeout is not None and threading_helper.can_start_thread
- )
- if use_timeout:
- faulthandler.dump_traceback_later(ns.timeout, exit=True)
-
- start_time = time.perf_counter()
- try:
- support.set_match_tests(ns.match_tests, ns.ignore_tests)
- support.junit_xml_list = xml_list = [] if ns.xmlpath else None
- if ns.failfast:
- support.failfast = True
-
- if output_on_failure:
- support.verbose = True
-
- stream = io.StringIO()
- orig_stdout = sys.stdout
- orig_stderr = sys.stderr
- print_warning = support.print_warning
- orig_print_warnings_stderr = print_warning.orig_stderr
-
- output = None
- try:
- sys.stdout = stream
- sys.stderr = stream
- # print_warning() writes into the temporary stream to preserve
- # messages order. If support.environment_altered becomes true,
- # warnings will be written to sys.stderr below.
- print_warning.orig_stderr = stream
-
- result = _runtest_inner(ns, test_name,
- display_failure=False)
- if not isinstance(result, Passed):
- output = stream.getvalue()
- finally:
- sys.stdout = orig_stdout
- sys.stderr = orig_stderr
- print_warning.orig_stderr = orig_print_warnings_stderr
-
- if output is not None:
- sys.stderr.write(output)
- sys.stderr.flush()
- else:
- # Tell tests to be moderately quiet
- support.verbose = ns.verbose
-
- result = _runtest_inner(ns, test_name,
- display_failure=not ns.verbose)
-
- if xml_list:
- import xml.etree.ElementTree as ET
- result.xml_data = [
- ET.tostring(x).decode('us-ascii')
- for x in xml_list
- ]
-
- result.duration_sec = time.perf_counter() - start_time
- return result
- finally:
- if use_timeout:
- faulthandler.cancel_dump_traceback_later()
- support.junit_xml_list = None
-
-
-def runtest(ns: Namespace, test_name: str) -> TestResult:
- """Run a single test.
-
- ns -- regrtest namespace of options
- test_name -- the name of the test
-
- Returns a TestResult sub-class depending on the kind of result received.
-
- If ns.xmlpath is not None, xml_data is a list containing each
- generated testsuite element.
- """
- try:
- return _runtest(ns, test_name)
- except:
- if not ns.pgo:
- msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
- file=sys.stderr, flush=True)
- return Failed(test_name)
-
-
-def _test_module(the_module):
- loader = unittest.TestLoader()
- tests = loader.loadTestsFromModule(the_module)
- for error in loader.errors:
- print(error, file=sys.stderr)
- if loader.errors:
- raise Exception("errors while loading tests")
- support.run_unittest(tests)
-
-
-def save_env(ns: Namespace, test_name: str):
- return saved_test_environment(test_name, ns.verbose, ns.quiet, pgo=ns.pgo)
-
-
-def _runtest_inner2(ns: Namespace, test_name: str) -> bool:
- # Load the test function, run the test function, handle huntrleaks
- # to detect leaks.
-
- abstest = get_abs_module(ns, test_name)
-
- # remove the module from sys.module to reload it if it was already imported
- try:
- del sys.modules[abstest]
- except KeyError:
- pass
-
- the_module = importlib.import_module(abstest)
-
- if ns.huntrleaks:
- from test.libregrtest.refleak import dash_R
-
- # If the test has a test_main, that will run the appropriate
- # tests. If not, use normal unittest test loading.
- test_runner = getattr(the_module, "test_main", None)
- if test_runner is None:
- test_runner = functools.partial(_test_module, the_module)
-
- try:
- with save_env(ns, test_name):
- if ns.huntrleaks:
- # Return True if the test leaked references
- refleak = dash_R(ns, test_name, test_runner)
- else:
- test_runner()
- refleak = False
- finally:
- # First kill any dangling references to open files etc.
- # This can also issue some ResourceWarnings which would otherwise get
- # triggered during the following test run, and possibly produce
- # failures.
- support.gc_collect()
-
- cleanup_test_droppings(test_name, ns.verbose)
-
- if gc.garbage:
- support.environment_altered = True
- print_warning(f"{test_name} created {len(gc.garbage)} "
- f"uncollectable object(s).")
-
- # move the uncollectable objects somewhere,
- # so we don't see them again
- FOUND_GARBAGE.extend(gc.garbage)
- gc.garbage.clear()
-
- support.reap_children()
-
- return refleak
-
-
-def _runtest_inner(
- ns: Namespace, test_name: str, display_failure: bool = True
-) -> TestResult:
- # Detect environment changes, handle exceptions.
-
- # Reset the environment_altered flag to detect if a test altered
- # the environment
- support.environment_altered = False
-
- if ns.pgo:
- display_failure = False
-
- try:
- clear_caches()
- support.gc_collect()
-
- with save_env(ns, test_name):
- refleak = _runtest_inner2(ns, test_name)
- except support.ResourceDenied as msg:
- if not ns.quiet and not ns.pgo:
- print(f"{test_name} skipped -- {msg}", flush=True)
- return ResourceDenied(test_name)
- except unittest.SkipTest as msg:
- if not ns.quiet and not ns.pgo:
- print(f"{test_name} skipped -- {msg}", flush=True)
- return Skipped(test_name)
- except support.TestFailedWithDetails as exc:
- msg = f"test {test_name} failed"
- if display_failure:
- msg = f"{msg} -- {exc}"
- print(msg, file=sys.stderr, flush=True)
- return Failed(test_name, errors=exc.errors, failures=exc.failures)
- except support.TestFailed as exc:
- msg = f"test {test_name} failed"
- if display_failure:
- msg = f"{msg} -- {exc}"
- print(msg, file=sys.stderr, flush=True)
- return Failed(test_name)
- except support.TestDidNotRun:
- return DidNotRun(test_name)
- except KeyboardInterrupt:
- print()
- return Interrupted(test_name)
- except:
- if not ns.pgo:
- msg = traceback.format_exc()
- print(f"test {test_name} crashed -- {msg}",
- file=sys.stderr, flush=True)
- return UncaughtException(test_name)
-
- if refleak:
- return RefLeak(test_name)
- if support.environment_altered:
- return EnvChanged(test_name)
- return Passed(test_name)
-
-
-def cleanup_test_droppings(test_name: str, verbose: int) -> None:
- # Try to clean up junk commonly left behind. While tests shouldn't leave
- # any files or directories behind, when a test fails that can be tedious
- # for it to arrange. The consequences can be especially nasty on Windows,
- # since if a test leaves a file open, it cannot be deleted by name (while
- # there's nothing we can do about that here either, we can display the
- # name of the offending test, which is a real help).
- for name in (os_helper.TESTFN,):
- if not os.path.exists(name):
- continue
-
- if os.path.isdir(name):
- import shutil
- kind, nuker = "directory", shutil.rmtree
- elif os.path.isfile(name):
- kind, nuker = "file", os.unlink
- else:
- raise RuntimeError(f"os.path says {name!r} exists but is neither "
- f"directory nor file")
-
- if verbose:
- print_warning(f"{test_name} left behind {kind} {name!r}")
- support.environment_altered = True
-
- try:
- import stat
- # fix possible permissions problems that might prevent cleanup
- os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
- nuker(name)
- except Exception as exc:
- print_warning(f"{test_name} left behind {kind} {name!r} "
- f"and it couldn't be removed: {exc}")
diff --git a/Lib/test/libregrtest/runtest_mp.py b/Lib/test/libregrtest/runtest_mp.py
deleted file mode 100644
index 62e6c6df36518c..00000000000000
--- a/Lib/test/libregrtest/runtest_mp.py
+++ /dev/null
@@ -1,572 +0,0 @@
-import faulthandler
-import json
-import os.path
-import queue
-import signal
-import subprocess
-import sys
-import tempfile
-import threading
-import time
-import traceback
-from typing import NamedTuple, NoReturn, Literal, Any, TextIO
-
-from test import support
-from test.support import os_helper
-
-from test.libregrtest.cmdline import Namespace
-from test.libregrtest.main import Regrtest
-from test.libregrtest.runtest import (
- runtest, is_failed, TestResult, Interrupted, Timeout, ChildError,
- PROGRESS_MIN_TIME, Passed, EnvChanged)
-from test.libregrtest.setup import setup_tests
-from test.libregrtest.utils import format_duration, print_warning
-
-if sys.platform == 'win32':
- import locale
-
-
-# Display the running tests if nothing happened last N seconds
-PROGRESS_UPDATE = 30.0 # seconds
-assert PROGRESS_UPDATE >= PROGRESS_MIN_TIME
-
-# Kill the main process after 5 minutes. It is supposed to write an update
-# every PROGRESS_UPDATE seconds. Tolerate 5 minutes for Python slowest
-# buildbot workers.
-MAIN_PROCESS_TIMEOUT = 5 * 60.0
-assert MAIN_PROCESS_TIMEOUT >= PROGRESS_UPDATE
-
-# Time to wait until a worker completes: should be immediate
-JOIN_TIMEOUT = 30.0 # seconds
-
-USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
-
-
-def must_stop(result: TestResult, ns: Namespace) -> bool:
- if isinstance(result, Interrupted):
- return True
- if ns.failfast and is_failed(result, ns):
- return True
- return False
-
-
-def parse_worker_args(worker_args) -> tuple[Namespace, str]:
- ns_dict, test_name = json.loads(worker_args)
- ns = Namespace(**ns_dict)
- return (ns, test_name)
-
-
-def run_test_in_subprocess(testname: str, ns: Namespace, tmp_dir: str, stdout_fh: TextIO) -> subprocess.Popen:
- ns_dict = vars(ns)
- worker_args = (ns_dict, testname)
- worker_args = json.dumps(worker_args)
- if ns.python is not None:
- executable = ns.python
- else:
- executable = [sys.executable]
- cmd = [*executable, *support.args_from_interpreter_flags(),
- '-u', # Unbuffered stdout and stderr
- '-m', 'test.regrtest',
- '--worker-args', worker_args]
-
- env = dict(os.environ)
- if tmp_dir is not None:
- env['TMPDIR'] = tmp_dir
- env['TEMP'] = tmp_dir
- env['TMP'] = tmp_dir
-
- # Running the child from the same working directory as regrtest's original
- # invocation ensures that TEMPDIR for the child is the same when
- # sysconfig.is_python_build() is true. See issue 15300.
- kw = dict(
- env=env,
- stdout=stdout_fh,
- # bpo-45410: Write stderr into stdout to keep messages order
- stderr=stdout_fh,
- text=True,
- close_fds=(os.name != 'nt'),
- cwd=os_helper.SAVEDCWD,
- )
- if USE_PROCESS_GROUP:
- kw['start_new_session'] = True
- return subprocess.Popen(cmd, **kw)
-
-
-def run_tests_worker(ns: Namespace, test_name: str) -> NoReturn:
- setup_tests(ns)
-
- result = runtest(ns, test_name)
-
- print() # Force a newline (just in case)
-
- # Serialize TestResult as dict in JSON
- print(json.dumps(result, cls=EncodeTestResult), flush=True)
- sys.exit(0)
-
-
-# We do not use a generator so multiple threads can call next().
-class MultiprocessIterator:
-
- """A thread-safe iterator over tests for multiprocess mode."""
-
- def __init__(self, tests_iter):
- self.lock = threading.Lock()
- self.tests_iter = tests_iter
-
- def __iter__(self):
- return self
-
- def __next__(self):
- with self.lock:
- if self.tests_iter is None:
- raise StopIteration
- return next(self.tests_iter)
-
- def stop(self):
- with self.lock:
- self.tests_iter = None
-
-
-class MultiprocessResult(NamedTuple):
- result: TestResult
- # bpo-45410: stderr is written into stdout to keep messages order
- stdout: str
- error_msg: str
-
-
-ExcStr = str
-QueueOutput = tuple[Literal[False], MultiprocessResult] | tuple[Literal[True], ExcStr]
-
-
-class ExitThread(Exception):
- pass
-
-
-class TestWorkerProcess(threading.Thread):
- def __init__(self, worker_id: int, runner: "MultiprocessTestRunner") -> None:
- super().__init__()
- self.worker_id = worker_id
- self.pending = runner.pending
- self.output = runner.output
- self.ns = runner.ns
- self.timeout = runner.worker_timeout
- self.regrtest = runner.regrtest
- self.current_test_name = None
- self.start_time = None
- self._popen = None
- self._killed = False
- self._stopped = False
-
- def __repr__(self) -> str:
- info = [f'TestWorkerProcess #{self.worker_id}']
- if self.is_alive():
- info.append("running")
- else:
- info.append('stopped')
- test = self.current_test_name
- if test:
- info.append(f'test={test}')
- popen = self._popen
- if popen is not None:
- dt = time.monotonic() - self.start_time
- info.extend((f'pid={self._popen.pid}',
- f'time={format_duration(dt)}'))
- return '<%s>' % ' '.join(info)
-
- def _kill(self) -> None:
- popen = self._popen
- if popen is None:
- return
-
- if self._killed:
- return
- self._killed = True
-
- if USE_PROCESS_GROUP:
- what = f"{self} process group"
- else:
- what = f"{self}"
-
- print(f"Kill {what}", file=sys.stderr, flush=True)
- try:
- if USE_PROCESS_GROUP:
- os.killpg(popen.pid, signal.SIGKILL)
- else:
- popen.kill()
- except ProcessLookupError:
- # popen.kill(): the process completed, the TestWorkerProcess thread
- # read its exit status, but Popen.send_signal() read the returncode
- # just before Popen.wait() set returncode.
- pass
- except OSError as exc:
- print_warning(f"Failed to kill {what}: {exc!r}")
-
- def stop(self) -> None:
- # Method called from a different thread to stop this thread
- self._stopped = True
- self._kill()
-
- def mp_result_error(
- self,
- test_result: TestResult,
- stdout: str = '',
- err_msg=None
- ) -> MultiprocessResult:
- test_result.duration_sec = time.monotonic() - self.start_time
- return MultiprocessResult(test_result, stdout, err_msg)
-
- def _run_process(self, test_name: str, tmp_dir: str, stdout_fh: TextIO) -> int:
- self.start_time = time.monotonic()
-
- self.current_test_name = test_name
- try:
- popen = run_test_in_subprocess(test_name, self.ns, tmp_dir, stdout_fh)
-
- self._killed = False
- self._popen = popen
- except:
- self.current_test_name = None
- raise
-
- try:
- if self._stopped:
- # If kill() has been called before self._popen is set,
- # self._popen is still running. Call again kill()
- # to ensure that the process is killed.
- self._kill()
- raise ExitThread
-
- try:
- # gh-94026: stdout+stderr are written to tempfile
- retcode = popen.wait(timeout=self.timeout)
- assert retcode is not None
- return retcode
- except subprocess.TimeoutExpired:
- if self._stopped:
- # kill() has been called: communicate() fails on reading
- # closed stdout
- raise ExitThread
-
- # On timeout, kill the process
- self._kill()
-
- # None means TIMEOUT for the caller
- retcode = None
- # bpo-38207: Don't attempt to call communicate() again: on it
- # can hang until all child processes using stdout
- # pipes completes.
- except OSError:
- if self._stopped:
- # kill() has been called: communicate() fails
- # on reading closed stdout
- raise ExitThread
- raise
- except:
- self._kill()
- raise
- finally:
- self._wait_completed()
- self._popen = None
- self.current_test_name = None
-
- def _runtest(self, test_name: str) -> MultiprocessResult:
- if sys.platform == 'win32':
- # gh-95027: When stdout is not a TTY, Python uses the ANSI code
- # page for the sys.stdout encoding. If the main process runs in a
- # terminal, sys.stdout uses WindowsConsoleIO with UTF-8 encoding.
- encoding = locale.getencoding()
- else:
- encoding = sys.stdout.encoding
-
- # gh-94026: Write stdout+stderr to a tempfile as workaround for
- # non-blocking pipes on Emscripten with NodeJS.
- with tempfile.TemporaryFile('w+', encoding=encoding) as stdout_fh:
- # gh-93353: Check for leaked temporary files in the parent process,
- # since the deletion of temporary files can happen late during
- # Python finalization: too late for libregrtest.
- if not support.is_wasi:
- # Don't check for leaked temporary files and directories if Python is
- # run on WASI. WASI don't pass environment variables like TMPDIR to
- # worker processes.
- tmp_dir = tempfile.mkdtemp(prefix="test_python_")
- tmp_dir = os.path.abspath(tmp_dir)
- try:
- retcode = self._run_process(test_name, tmp_dir, stdout_fh)
- finally:
- tmp_files = os.listdir(tmp_dir)
- os_helper.rmtree(tmp_dir)
- else:
- retcode = self._run_process(test_name, None, stdout_fh)
- tmp_files = ()
- stdout_fh.seek(0)
-
- try:
- stdout = stdout_fh.read().strip()
- except Exception as exc:
- # gh-101634: Catch UnicodeDecodeError if stdout cannot be
- # decoded from encoding
- err_msg = f"Cannot read process stdout: {exc}"
- return self.mp_result_error(ChildError(test_name), '', err_msg)
-
- if retcode is None:
- return self.mp_result_error(Timeout(test_name), stdout)
-
- err_msg = None
- if retcode != 0:
- err_msg = "Exit code %s" % retcode
- else:
- stdout, _, result = stdout.rpartition("\n")
- stdout = stdout.rstrip()
- if not result:
- err_msg = "Failed to parse worker stdout"
- else:
- try:
- # deserialize run_tests_worker() output
- result = json.loads(result, object_hook=decode_test_result)
- except Exception as exc:
- err_msg = "Failed to parse worker JSON: %s" % exc
-
- if err_msg is not None:
- return self.mp_result_error(ChildError(test_name), stdout, err_msg)
-
- if tmp_files:
- msg = (f'\n\n'
- f'Warning -- {test_name} leaked temporary files '
- f'({len(tmp_files)}): {", ".join(sorted(tmp_files))}')
- stdout += msg
- if isinstance(result, Passed):
- result = EnvChanged.from_passed(result)
-
- return MultiprocessResult(result, stdout, err_msg)
-
- def run(self) -> None:
- while not self._stopped:
- try:
- try:
- test_name = next(self.pending)
- except StopIteration:
- break
-
- mp_result = self._runtest(test_name)
- self.output.put((False, mp_result))
-
- if must_stop(mp_result.result, self.ns):
- break
- except ExitThread:
- break
- except BaseException:
- self.output.put((True, traceback.format_exc()))
- break
-
- def _wait_completed(self) -> None:
- popen = self._popen
-
- try:
- popen.wait(JOIN_TIMEOUT)
- except (subprocess.TimeoutExpired, OSError) as exc:
- print_warning(f"Failed to wait for {self} completion "
- f"(timeout={format_duration(JOIN_TIMEOUT)}): "
- f"{exc!r}")
-
- def wait_stopped(self, start_time: float) -> None:
- # bpo-38207: MultiprocessTestRunner.stop_workers() called self.stop()
- # which killed the process. Sometimes, killing the process from the
- # main thread does not interrupt popen.communicate() in
- # TestWorkerProcess thread. This loop with a timeout is a workaround
- # for that.
- #
- # Moreover, if this method fails to join the thread, it is likely
- # that Python will hang at exit while calling threading._shutdown()
- # which tries again to join the blocked thread. Regrtest.main()
- # uses EXIT_TIMEOUT to workaround this second bug.
- while True:
- # Write a message every second
- self.join(1.0)
- if not self.is_alive():
- break
- dt = time.monotonic() - start_time
- self.regrtest.log(f"Waiting for {self} thread "
- f"for {format_duration(dt)}")
- if dt > JOIN_TIMEOUT:
- print_warning(f"Failed to join {self} in {format_duration(dt)}")
- break
-
-
-def get_running(workers: list[TestWorkerProcess]) -> list[TestWorkerProcess]:
- running = []
- for worker in workers:
- current_test_name = worker.current_test_name
- if not current_test_name:
- continue
- dt = time.monotonic() - worker.start_time
- if dt >= PROGRESS_MIN_TIME:
- text = '%s (%s)' % (current_test_name, format_duration(dt))
- running.append(text)
- return running
-
-
-class MultiprocessTestRunner:
- def __init__(self, regrtest: Regrtest) -> None:
- self.regrtest = regrtest
- self.log = self.regrtest.log
- self.ns = regrtest.ns
- self.output: queue.Queue[QueueOutput] = queue.Queue()
- self.pending = MultiprocessIterator(self.regrtest.tests)
- if self.ns.timeout is not None:
- # Rely on faulthandler to kill a worker process. This timouet is
- # when faulthandler fails to kill a worker process. Give a maximum
- # of 5 minutes to faulthandler to kill the worker.
- self.worker_timeout = min(self.ns.timeout * 1.5,
- self.ns.timeout + 5 * 60)
- else:
- self.worker_timeout = None
- self.workers = None
-
- def start_workers(self) -> None:
- self.workers = [TestWorkerProcess(index, self)
- for index in range(1, self.ns.use_mp + 1)]
- msg = f"Run tests in parallel using {len(self.workers)} child processes"
- if self.ns.timeout:
- msg += (" (timeout: %s, worker timeout: %s)"
- % (format_duration(self.ns.timeout),
- format_duration(self.worker_timeout)))
- self.log(msg)
- for worker in self.workers:
- worker.start()
-
- def stop_workers(self) -> None:
- start_time = time.monotonic()
- for worker in self.workers:
- worker.stop()
- for worker in self.workers:
- worker.wait_stopped(start_time)
-
- def _get_result(self) -> QueueOutput | None:
- use_faulthandler = (self.ns.timeout is not None)
- timeout = PROGRESS_UPDATE
-
- # bpo-46205: check the status of workers every iteration to avoid
- # waiting forever on an empty queue.
- while any(worker.is_alive() for worker in self.workers):
- if use_faulthandler:
- faulthandler.dump_traceback_later(MAIN_PROCESS_TIMEOUT,
- exit=True)
-
- # wait for a thread
- try:
- return self.output.get(timeout=timeout)
- except queue.Empty:
- pass
-
- # display progress
- running = get_running(self.workers)
- if running and not self.ns.pgo:
- self.log('running: %s' % ', '.join(running))
-
- # all worker threads are done: consume pending results
- try:
- return self.output.get(timeout=0)
- except queue.Empty:
- return None
-
- def display_result(self, mp_result: MultiprocessResult) -> None:
- result = mp_result.result
-
- text = str(result)
- if mp_result.error_msg is not None:
- # CHILD_ERROR
- text += ' (%s)' % mp_result.error_msg
- elif (result.duration_sec >= PROGRESS_MIN_TIME and not self.ns.pgo):
- text += ' (%s)' % format_duration(result.duration_sec)
- running = get_running(self.workers)
- if running and not self.ns.pgo:
- text += ' -- running: %s' % ', '.join(running)
- self.regrtest.display_progress(self.test_index, text)
-
- def _process_result(self, item: QueueOutput) -> bool:
- """Returns True if test runner must stop."""
- if item[0]:
- # Thread got an exception
- format_exc = item[1]
- print_warning(f"regrtest worker thread failed: {format_exc}")
- result = ChildError("")
- self.regrtest.accumulate_result(result)
- return True
-
- self.test_index += 1
- mp_result = item[1]
- self.regrtest.accumulate_result(mp_result.result)
- self.display_result(mp_result)
-
- if mp_result.stdout:
- print(mp_result.stdout, flush=True)
-
- if must_stop(mp_result.result, self.ns):
- return True
-
- return False
-
- def run_tests(self) -> None:
- self.start_workers()
-
- self.test_index = 0
- try:
- while True:
- item = self._get_result()
- if item is None:
- break
-
- stop = self._process_result(item)
- if stop:
- break
- except KeyboardInterrupt:
- print()
- self.regrtest.interrupted = True
- finally:
- if self.ns.timeout is not None:
- faulthandler.cancel_dump_traceback_later()
-
- # Always ensure that all worker processes are no longer
- # worker when we exit this function
- self.pending.stop()
- self.stop_workers()
-
-
-def run_tests_multiprocess(regrtest: Regrtest) -> None:
- MultiprocessTestRunner(regrtest).run_tests()
-
-
-class EncodeTestResult(json.JSONEncoder):
- """Encode a TestResult (sub)class object into a JSON dict."""
-
- def default(self, o: Any) -> dict[str, Any]:
- if isinstance(o, TestResult):
- result = vars(o)
- result["__test_result__"] = o.__class__.__name__
- return result
-
- return super().default(o)
-
-
-def decode_test_result(d: dict[str, Any]) -> TestResult | dict[str, Any]:
- """Decode a TestResult (sub)class object from a JSON dict."""
-
- if "__test_result__" not in d:
- return d
-
- cls_name = d.pop("__test_result__")
- for cls in get_all_test_result_classes():
- if cls.__name__ == cls_name:
- return cls(**d)
-
-
-def get_all_test_result_classes() -> set[type[TestResult]]:
- prev_count = 0
- classes = {TestResult}
- while len(classes) > prev_count:
- prev_count = len(classes)
- to_add = []
- for cls in classes:
- to_add.extend(cls.__subclasses__())
- classes.update(to_add)
- return classes
diff --git a/Lib/test/libregrtest/runtests.py b/Lib/test/libregrtest/runtests.py
new file mode 100644
index 00000000000000..4da312db4cb02e
--- /dev/null
+++ b/Lib/test/libregrtest/runtests.py
@@ -0,0 +1,162 @@
+import contextlib
+import dataclasses
+import json
+import os
+import subprocess
+from typing import Any
+
+from test import support
+
+from .utils import (
+ StrPath, StrJSON, TestTuple, FilterTuple, FilterDict)
+
+
+class JsonFileType:
+ UNIX_FD = "UNIX_FD"
+ WINDOWS_HANDLE = "WINDOWS_HANDLE"
+ STDOUT = "STDOUT"
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class JsonFile:
+ # file type depends on file_type:
+ # - UNIX_FD: file descriptor (int)
+ # - WINDOWS_HANDLE: handle (int)
+ # - STDOUT: use process stdout (None)
+ file: int | None
+ file_type: str
+
+ def configure_subprocess(self, popen_kwargs: dict) -> None:
+ match self.file_type:
+ case JsonFileType.UNIX_FD:
+ # Unix file descriptor
+ popen_kwargs['pass_fds'] = [self.file]
+ case JsonFileType.WINDOWS_HANDLE:
+ # Windows handle
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.lpAttributeList = {"handle_list": [self.file]}
+ popen_kwargs['startupinfo'] = startupinfo
+
+ @contextlib.contextmanager
+ def inherit_subprocess(self):
+ if self.file_type == JsonFileType.WINDOWS_HANDLE:
+ os.set_handle_inheritable(self.file, True)
+ try:
+ yield
+ finally:
+ os.set_handle_inheritable(self.file, False)
+ else:
+ yield
+
+ def open(self, mode='r', *, encoding):
+ if self.file_type == JsonFileType.STDOUT:
+ raise ValueError("for STDOUT file type, just use sys.stdout")
+
+ file = self.file
+ if self.file_type == JsonFileType.WINDOWS_HANDLE:
+ import msvcrt
+ # Create a file descriptor from the handle
+ file = msvcrt.open_osfhandle(file, os.O_WRONLY)
+ return open(file, mode, encoding=encoding)
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class HuntRefleak:
+ warmups: int
+ runs: int
+ filename: StrPath
+
+
+@dataclasses.dataclass(slots=True, frozen=True)
+class RunTests:
+ tests: TestTuple
+ fail_fast: bool
+ fail_env_changed: bool
+ match_tests: FilterTuple | None
+ ignore_tests: FilterTuple | None
+ match_tests_dict: FilterDict | None
+ rerun: bool
+ forever: bool
+ pgo: bool
+ pgo_extended: bool
+ output_on_failure: bool
+ timeout: float | None
+ verbose: int
+ quiet: bool
+ hunt_refleak: HuntRefleak | None
+ test_dir: StrPath | None
+ use_junit: bool
+ memory_limit: str | None
+ gc_threshold: int | None
+ use_resources: tuple[str, ...]
+ python_cmd: tuple[str, ...] | None
+ randomize: bool
+ random_seed: int | None
+ json_file: JsonFile | None
+
+ def copy(self, **override):
+ state = dataclasses.asdict(self)
+ state.update(override)
+ return RunTests(**state)
+
+ def get_match_tests(self, test_name) -> FilterTuple | None:
+ if self.match_tests_dict is not None:
+ return self.match_tests_dict.get(test_name, None)
+ else:
+ return None
+
+ def get_jobs(self):
+ # Number of run_single_test() calls needed to run all tests.
+ # None means that there is not bound limit (--forever option).
+ if self.forever:
+ return None
+ return len(self.tests)
+
+ def iter_tests(self):
+ if self.forever:
+ while True:
+ yield from self.tests
+ else:
+ yield from self.tests
+
+ def as_json(self) -> StrJSON:
+ return json.dumps(self, cls=_EncodeRunTests)
+
+ @staticmethod
+ def from_json(worker_json: StrJSON) -> 'RunTests':
+ return json.loads(worker_json, object_hook=_decode_runtests)
+
+ def json_file_use_stdout(self) -> bool:
+ # Use STDOUT in two cases:
+ #
+ # - If --python command line option is used;
+ # - On Emscripten and WASI.
+ #
+ # On other platforms, UNIX_FD or WINDOWS_HANDLE can be used.
+ return (
+ bool(self.python_cmd)
+ or support.is_emscripten
+ or support.is_wasi
+ )
+
+
+class _EncodeRunTests(json.JSONEncoder):
+ def default(self, o: Any) -> dict[str, Any]:
+ if isinstance(o, RunTests):
+ result = dataclasses.asdict(o)
+ result["__runtests__"] = True
+ return result
+ else:
+ return super().default(o)
+
+
+def _decode_runtests(data: dict[str, Any]) -> RunTests | dict[str, Any]:
+ if "__runtests__" in data:
+ data.pop('__runtests__')
+ if data['hunt_refleak']:
+ data['hunt_refleak'] = HuntRefleak(**data['hunt_refleak'])
+ if data['json_file']:
+ data['json_file'] = JsonFile(**data['json_file'])
+ return RunTests(**data)
+ else:
+ return data
diff --git a/Lib/test/libregrtest/save_env.py b/Lib/test/libregrtest/save_env.py
index c7801b767c590c..b2cc381344b2ef 100644
--- a/Lib/test/libregrtest/save_env.py
+++ b/Lib/test/libregrtest/save_env.py
@@ -3,9 +3,11 @@
import os
import sys
import threading
+
from test import support
from test.support import os_helper
-from test.libregrtest.utils import print_warning
+
+from .utils import print_warning
class SkipTestEnvironment(Exception):
@@ -23,7 +25,7 @@ class SkipTestEnvironment(Exception):
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
- with saved_test_environment(testname, verbose, quiet):
+ with saved_test_environment(test_name, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
@@ -34,8 +36,8 @@ class saved_test_environment:
items is also printed.
"""
- def __init__(self, testname, verbose=0, quiet=False, *, pgo=False):
- self.testname = testname
+ def __init__(self, test_name, verbose, quiet, *, pgo):
+ self.test_name = test_name
self.verbose = verbose
self.quiet = quiet
self.pgo = pgo
@@ -323,7 +325,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
restore(original)
if not self.quiet and not self.pgo:
print_warning(
- f"{name} was modified by {self.testname}\n"
+ f"{name} was modified by {self.test_name}\n"
f" Before: {original}\n"
f" After: {current} ")
return False
diff --git a/Lib/test/libregrtest/setup.py b/Lib/test/libregrtest/setup.py
index b76bece7ca08b5..204f10fe839792 100644
--- a/Lib/test/libregrtest/setup.py
+++ b/Lib/test/libregrtest/setup.py
@@ -1,24 +1,32 @@
-import atexit
import faulthandler
+import gc
import os
+import random
import signal
import sys
import unittest
from test import support
from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII
-try:
- import gc
-except ImportError:
- gc = None
-from test.libregrtest.utils import (setup_unraisable_hook,
- setup_threading_excepthook)
+from .runtests import RunTests
+from .utils import (
+ setup_unraisable_hook, setup_threading_excepthook, fix_umask,
+ replace_stdout, adjust_rlimit_nofile)
UNICODE_GUARD_ENV = "PYTHONREGRTEST_UNICODE_GUARD"
-def setup_tests(ns):
+def setup_test_dir(testdir: str | None) -> None:
+ if testdir:
+ # Prepend test directory to sys.path, so runtest() will be able
+ # to locate tests
+ sys.path.insert(0, os.path.abspath(testdir))
+
+
+def setup_process():
+ fix_umask()
+
try:
stderr_fd = sys.__stderr__.fileno()
except (ValueError, AttributeError):
@@ -40,15 +48,10 @@ def setup_tests(ns):
for signum in signals:
faulthandler.register(signum, chain=True, file=stderr_fd)
- _adjust_resource_limits()
+ adjust_rlimit_nofile()
replace_stdout()
support.record_original_stdout(sys.stdout)
- if ns.testdir:
- # Prepend test directory to sys.path, so runtest() will be able
- # to locate tests
- sys.path.insert(0, os.path.abspath(ns.testdir))
-
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
@@ -66,19 +69,6 @@ def setup_tests(ns):
if getattr(module, '__file__', None):
module.__file__ = os.path.abspath(module.__file__)
- if ns.huntrleaks:
- unittest.BaseTestSuite._cleanup = False
-
- if ns.memlimit is not None:
- support.set_memlimit(ns.memlimit)
-
- if ns.threshold is not None:
- gc.set_threshold(ns.threshold)
-
- support.suppress_msvcrt_asserts(ns.verbose and ns.verbose >= 2)
-
- support.use_resources = ns.use_resources
-
if hasattr(sys, 'addaudithook'):
# Add an auditing hook for all tests to ensure PySys_Audit is tested
def _test_audit_hook(name, args):
@@ -88,21 +78,6 @@ def _test_audit_hook(name, args):
setup_unraisable_hook()
setup_threading_excepthook()
- if ns.timeout is not None:
- # For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT
- support.SHORT_TIMEOUT = max(support.SHORT_TIMEOUT, ns.timeout / 40)
- support.LONG_TIMEOUT = max(support.LONG_TIMEOUT, ns.timeout / 4)
-
- # If --timeout is short: reduce timeouts
- support.LOOPBACK_TIMEOUT = min(support.LOOPBACK_TIMEOUT, ns.timeout)
- support.INTERNET_TIMEOUT = min(support.INTERNET_TIMEOUT, ns.timeout)
- support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, ns.timeout)
- support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, ns.timeout)
-
- if ns.xmlpath:
- from test.support.testresult import RegressionTestResult
- RegressionTestResult.USE_XML = True
-
# Ensure there's a non-ASCII character in env vars at all times to force
# tests consider this case. See BPO-44647 for details.
if TESTFN_UNDECODABLE and os.supports_bytes_environ:
@@ -111,49 +86,45 @@ def _test_audit_hook(name, args):
os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)
-def replace_stdout():
- """Set stdout encoder error handler to backslashreplace (as stderr error
- handler) to avoid UnicodeEncodeError when printing a traceback"""
- stdout = sys.stdout
- try:
- fd = stdout.fileno()
- except ValueError:
- # On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper
- # object. Leaving sys.stdout unchanged.
- #
- # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
- # and ValueError on a closed stream.
- return
+def setup_tests(runtests: RunTests):
+ support.verbose = runtests.verbose
+ support.failfast = runtests.fail_fast
+ support.PGO = runtests.pgo
+ support.PGO_EXTENDED = runtests.pgo_extended
- sys.stdout = open(fd, 'w',
- encoding=stdout.encoding,
- errors="backslashreplace",
- closefd=False,
- newline='\n')
+ support.set_match_tests(runtests.match_tests, runtests.ignore_tests)
- def restore_stdout():
- sys.stdout.close()
- sys.stdout = stdout
- atexit.register(restore_stdout)
+ if runtests.use_junit:
+ support.junit_xml_list = []
+ from test.support.testresult import RegressionTestResult
+ RegressionTestResult.USE_XML = True
+ else:
+ support.junit_xml_list = None
+ if runtests.memory_limit is not None:
+ support.set_memlimit(runtests.memory_limit)
-def _adjust_resource_limits():
- """Adjust the system resource limits (ulimit) if needed."""
- try:
- import resource
- from resource import RLIMIT_NOFILE
- except ImportError:
- return
- fd_limit, max_fds = resource.getrlimit(RLIMIT_NOFILE)
- # On macOS the default fd limit is sometimes too low (256) for our
- # test suite to succeed. Raise it to something more reasonable.
- # 1024 is a common Linux default.
- desired_fds = 1024
- if fd_limit < desired_fds and fd_limit < max_fds:
- new_fd_limit = min(desired_fds, max_fds)
- try:
- resource.setrlimit(RLIMIT_NOFILE, (new_fd_limit, max_fds))
- print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}")
- except (ValueError, OSError) as err:
- print(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to "
- f"{new_fd_limit}: {err}.")
+ support.suppress_msvcrt_asserts(runtests.verbose >= 2)
+
+ support.use_resources = runtests.use_resources
+
+ timeout = runtests.timeout
+ if timeout is not None:
+ # For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT
+ support.SHORT_TIMEOUT = max(support.SHORT_TIMEOUT, timeout / 40)
+ support.LONG_TIMEOUT = max(support.LONG_TIMEOUT, timeout / 4)
+
+ # If --timeout is short: reduce timeouts
+ support.LOOPBACK_TIMEOUT = min(support.LOOPBACK_TIMEOUT, timeout)
+ support.INTERNET_TIMEOUT = min(support.INTERNET_TIMEOUT, timeout)
+ support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, timeout)
+ support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, timeout)
+
+ if runtests.hunt_refleak:
+ unittest.BaseTestSuite._cleanup = False
+
+ if runtests.gc_threshold is not None:
+ gc.set_threshold(runtests.gc_threshold)
+
+ if runtests.randomize:
+ random.seed(runtests.random_seed)
diff --git a/Lib/test/libregrtest/single.py b/Lib/test/libregrtest/single.py
new file mode 100644
index 00000000000000..0304f858edf42c
--- /dev/null
+++ b/Lib/test/libregrtest/single.py
@@ -0,0 +1,278 @@
+import doctest
+import faulthandler
+import gc
+import importlib
+import io
+import sys
+import time
+import traceback
+import unittest
+
+from test import support
+from test.support import TestStats
+from test.support import threading_helper
+
+from .result import State, TestResult
+from .runtests import RunTests
+from .save_env import saved_test_environment
+from .setup import setup_tests
+from .utils import (
+ TestName,
+ clear_caches, remove_testfn, abs_module_name, print_warning)
+
+
+# Minimum duration of a test to display its duration or to mention that
+# the test is running in background
+PROGRESS_MIN_TIME = 30.0 # seconds
+
+
+def run_unittest(test_mod):
+ loader = unittest.TestLoader()
+ tests = loader.loadTestsFromModule(test_mod)
+ for error in loader.errors:
+ print(error, file=sys.stderr)
+ if loader.errors:
+ raise Exception("errors while loading tests")
+ return support.run_unittest(tests)
+
+
+def regrtest_runner(result: TestResult, test_func, runtests: RunTests) -> None:
+ # Run test_func(), collect statistics, and detect reference and memory
+ # leaks.
+ if runtests.hunt_refleak:
+ from .refleak import runtest_refleak
+ refleak, test_result = runtest_refleak(result.test_name, test_func,
+ runtests.hunt_refleak,
+ runtests.quiet)
+ else:
+ test_result = test_func()
+ refleak = False
+
+ if refleak:
+ result.state = State.REFLEAK
+
+ stats: TestStats | None
+
+ match test_result:
+ case TestStats():
+ stats = test_result
+ case unittest.TestResult():
+ stats = TestStats.from_unittest(test_result)
+ case doctest.TestResults():
+ stats = TestStats.from_doctest(test_result)
+ case None:
+ print_warning(f"{result.test_name} test runner returned None: {test_func}")
+ stats = None
+ case _:
+ print_warning(f"Unknown test result type: {type(test_result)}")
+ stats = None
+
+ result.stats = stats
+
+
+# Storage of uncollectable GC objects (gc.garbage)
+GC_GARBAGE = []
+
+
+def _load_run_test(result: TestResult, runtests: RunTests) -> None:
+ # Load the test module and run the tests.
+ test_name = result.test_name
+ module_name = abs_module_name(test_name, runtests.test_dir)
+
+ # Remove the module from sys.module to reload it if it was already imported
+ sys.modules.pop(module_name, None)
+
+ test_mod = importlib.import_module(module_name)
+
+ if hasattr(test_mod, "test_main"):
+ # https://github.com/python/cpython/issues/89392
+ raise Exception(f"Module {test_name} defines test_main() which "
+ f"is no longer supported by regrtest")
+ def test_func():
+ return run_unittest(test_mod)
+
+ try:
+ regrtest_runner(result, test_func, runtests)
+ finally:
+ # First kill any dangling references to open files etc.
+ # This can also issue some ResourceWarnings which would otherwise get
+ # triggered during the following test run, and possibly produce
+ # failures.
+ support.gc_collect()
+
+ remove_testfn(test_name, runtests.verbose)
+
+ if gc.garbage:
+ support.environment_altered = True
+ print_warning(f"{test_name} created {len(gc.garbage)} "
+ f"uncollectable object(s)")
+
+ # move the uncollectable objects somewhere,
+ # so we don't see them again
+ GC_GARBAGE.extend(gc.garbage)
+ gc.garbage.clear()
+
+ support.reap_children()
+
+
+def _runtest_env_changed_exc(result: TestResult, runtests: RunTests,
+ display_failure: bool = True) -> None:
+ # Handle exceptions, detect environment changes.
+
+ # Reset the environment_altered flag to detect if a test altered
+ # the environment
+ support.environment_altered = False
+
+ pgo = runtests.pgo
+ if pgo:
+ display_failure = False
+ quiet = runtests.quiet
+
+ test_name = result.test_name
+ try:
+ clear_caches()
+ support.gc_collect()
+
+ with saved_test_environment(test_name,
+ runtests.verbose, quiet, pgo=pgo):
+ _load_run_test(result, runtests)
+ except support.ResourceDenied as exc:
+ if not quiet and not pgo:
+ print(f"{test_name} skipped -- {exc}", flush=True)
+ result.state = State.RESOURCE_DENIED
+ return
+ except unittest.SkipTest as exc:
+ if not quiet and not pgo:
+ print(f"{test_name} skipped -- {exc}", flush=True)
+ result.state = State.SKIPPED
+ return
+ except support.TestFailedWithDetails as exc:
+ msg = f"test {test_name} failed"
+ if display_failure:
+ msg = f"{msg} -- {exc}"
+ print(msg, file=sys.stderr, flush=True)
+ result.state = State.FAILED
+ result.errors = exc.errors
+ result.failures = exc.failures
+ result.stats = exc.stats
+ return
+ except support.TestFailed as exc:
+ msg = f"test {test_name} failed"
+ if display_failure:
+ msg = f"{msg} -- {exc}"
+ print(msg, file=sys.stderr, flush=True)
+ result.state = State.FAILED
+ result.stats = exc.stats
+ return
+ except support.TestDidNotRun:
+ result.state = State.DID_NOT_RUN
+ return
+ except KeyboardInterrupt:
+ print()
+ result.state = State.INTERRUPTED
+ return
+ except:
+ if not pgo:
+ msg = traceback.format_exc()
+ print(f"test {test_name} crashed -- {msg}",
+ file=sys.stderr, flush=True)
+ result.state = State.UNCAUGHT_EXC
+ return
+
+ if support.environment_altered:
+ result.set_env_changed()
+ # Don't override the state if it was already set (REFLEAK or ENV_CHANGED)
+ if result.state is None:
+ result.state = State.PASSED
+
+
+def _runtest(result: TestResult, runtests: RunTests) -> None:
+ # Capture stdout and stderr, set faulthandler timeout,
+ # and create JUnit XML report.
+ verbose = runtests.verbose
+ output_on_failure = runtests.output_on_failure
+ timeout = runtests.timeout
+
+ use_timeout = (
+ timeout is not None and threading_helper.can_start_thread
+ )
+ if use_timeout:
+ faulthandler.dump_traceback_later(timeout, exit=True)
+
+ try:
+ setup_tests(runtests)
+
+ if output_on_failure:
+ support.verbose = True
+
+ stream = io.StringIO()
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+ print_warning = support.print_warning
+ orig_print_warnings_stderr = print_warning.orig_stderr
+
+ output = None
+ try:
+ sys.stdout = stream
+ sys.stderr = stream
+ # print_warning() writes into the temporary stream to preserve
+ # messages order. If support.environment_altered becomes true,
+ # warnings will be written to sys.stderr below.
+ print_warning.orig_stderr = stream
+
+ _runtest_env_changed_exc(result, runtests, display_failure=False)
+ # Ignore output if the test passed successfully
+ if result.state != State.PASSED:
+ output = stream.getvalue()
+ finally:
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+ print_warning.orig_stderr = orig_print_warnings_stderr
+
+ if output is not None:
+ sys.stderr.write(output)
+ sys.stderr.flush()
+ else:
+ # Tell tests to be moderately quiet
+ support.verbose = verbose
+ _runtest_env_changed_exc(result, runtests,
+ display_failure=not verbose)
+
+ xml_list = support.junit_xml_list
+ if xml_list:
+ import xml.etree.ElementTree as ET
+ result.xml_data = [ET.tostring(x).decode('us-ascii')
+ for x in xml_list]
+ finally:
+ if use_timeout:
+ faulthandler.cancel_dump_traceback_later()
+ support.junit_xml_list = None
+
+
+def run_single_test(test_name: TestName, runtests: RunTests) -> TestResult:
+ """Run a single test.
+
+ test_name -- the name of the test
+
+ Returns a TestResult.
+
+ If runtests.use_junit, xml_data is a list containing each generated
+ testsuite element.
+ """
+ start_time = time.perf_counter()
+ result = TestResult(test_name)
+ pgo = runtests.pgo
+ try:
+ _runtest(result, runtests)
+ except:
+ if not pgo:
+ msg = traceback.format_exc()
+ print(f"test {test_name} crashed -- {msg}",
+ file=sys.stderr, flush=True)
+ result.state = State.UNCAUGHT_EXC
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ result.duration = time.perf_counter() - start_time
+ return result
diff --git a/Lib/test/libregrtest/utils.py b/Lib/test/libregrtest/utils.py
index 89a149ec5d6b36..6af949cea9c926 100644
--- a/Lib/test/libregrtest/utils.py
+++ b/Lib/test/libregrtest/utils.py
@@ -1,9 +1,46 @@
+import atexit
+import contextlib
+import faulthandler
+import locale
import math
import os.path
+import platform
+import random
import sys
import sysconfig
+import tempfile
import textwrap
+from collections.abc import Callable
+
from test import support
+from test.support import os_helper
+from test.support import threading_helper
+
+
+MS_WINDOWS = (sys.platform == 'win32')
+
+# All temporary files and temporary directories created by libregrtest should
+# use TMP_PREFIX so cleanup_temp_dir() can remove them all.
+TMP_PREFIX = 'test_python_'
+WORK_DIR_PREFIX = TMP_PREFIX
+WORKER_WORK_DIR_PREFIX = WORK_DIR_PREFIX + 'worker_'
+
+# bpo-38203: Maximum delay in seconds to exit Python (call Py_Finalize()).
+# Used to protect against threading._shutdown() hang.
+# Must be smaller than buildbot "1200 seconds without output" limit.
+EXIT_TIMEOUT = 120.0
+
+
+# Types for types hints
+StrPath = str
+TestName = str
+StrJSON = str
+TestTuple = tuple[TestName, ...]
+TestList = list[TestName]
+# --match and --ignore options: list of patterns
+# ('*' joker character can be used)
+FilterTuple = tuple[TestName, ...]
+FilterDict = dict[TestName, FilterTuple]
def format_duration(seconds):
@@ -31,7 +68,7 @@ def format_duration(seconds):
return ' '.join(parts)
-def removepy(names):
+def strip_py_suffix(names: list[str] | None) -> None:
if not names:
return
for idx, name in enumerate(names):
@@ -40,11 +77,20 @@ def removepy(names):
names[idx] = basename
+def plural(n, singular, plural=None):
+ if n == 1:
+ return singular
+ elif plural is not None:
+ return plural
+ else:
+ return singular + 's'
+
+
def count(n, word):
if n == 1:
- return "%d %s" % (n, word)
+ return f"{n} {word}"
else:
- return "%d %ss" % (n, word)
+ return f"{n} {word}s"
def printlist(x, width=70, indent=4, file=None):
@@ -264,16 +310,8 @@ def get_build_info():
elif '-flto' in ldflags_nodist:
optimizations.append('LTO')
- # --enable-optimizations
- pgo_options = (
- # GCC
- '-fprofile-use',
- # clang: -fprofile-instr-use=code.profclangd
- '-fprofile-instr-use',
- # ICC
- "-prof-use",
- )
- if any(option in cflags_nodist for option in pgo_options):
+ if support.check_cflags_pgo():
+ # PGO (--enable-optimizations)
optimizations.append('PGO')
if optimizations:
build.append('+'.join(optimizations))
@@ -305,3 +343,267 @@ def get_build_info():
build.append("dtrace")
return build
+
+
+def get_temp_dir(tmp_dir: StrPath | None = None) -> StrPath:
+ if tmp_dir:
+ tmp_dir = os.path.expanduser(tmp_dir)
+ else:
+ # When tests are run from the Python build directory, it is best practice
+ # to keep the test files in a subfolder. This eases the cleanup of leftover
+ # files using the "make distclean" command.
+ if sysconfig.is_python_build():
+ if not support.is_wasi:
+ tmp_dir = sysconfig.get_config_var('abs_builddir')
+ if tmp_dir is None:
+ # bpo-30284: On Windows, only srcdir is available. Using
+ # abs_builddir mostly matters on UNIX when building Python
+ # out of the source tree, especially when the source tree
+ # is read only.
+ tmp_dir = sysconfig.get_config_var('srcdir')
+ tmp_dir = os.path.join(tmp_dir, 'build')
+ else:
+ # WASI platform
+ tmp_dir = sysconfig.get_config_var('projectbase')
+ tmp_dir = os.path.join(tmp_dir, 'build')
+
+ # When get_temp_dir() is called in a worker process,
+ # get_temp_dir() path is different than in the parent process
+ # which is not a WASI process. So the parent does not create
+ # the same "tmp_dir" than the test worker process.
+ os.makedirs(tmp_dir, exist_ok=True)
+ else:
+ tmp_dir = tempfile.gettempdir()
+
+ return os.path.abspath(tmp_dir)
+
+
+def fix_umask():
+ if support.is_emscripten:
+ # Emscripten has default umask 0o777, which breaks some tests.
+ # see https://github.com/emscripten-core/emscripten/issues/17269
+ old_mask = os.umask(0)
+ if old_mask == 0o777:
+ os.umask(0o027)
+ else:
+ os.umask(old_mask)
+
+
+def get_work_dir(parent_dir: StrPath, worker: bool = False) -> StrPath:
+ # Define a writable temp dir that will be used as cwd while running
+ # the tests. The name of the dir includes the pid to allow parallel
+ # testing (see the -j option).
+ # Emscripten and WASI have stubbed getpid(), Emscripten has only
+ # milisecond clock resolution. Use randint() instead.
+ if support.is_emscripten or support.is_wasi:
+ nounce = random.randint(0, 1_000_000)
+ else:
+ nounce = os.getpid()
+
+ if worker:
+ work_dir = WORK_DIR_PREFIX + str(nounce)
+ else:
+ work_dir = WORKER_WORK_DIR_PREFIX + str(nounce)
+ work_dir += os_helper.FS_NONASCII
+ work_dir = os.path.join(parent_dir, work_dir)
+ return work_dir
+
+
+@contextlib.contextmanager
+def exit_timeout():
+ try:
+ yield
+ except SystemExit as exc:
+ # bpo-38203: Python can hang at exit in Py_Finalize(), especially
+ # on threading._shutdown() call: put a timeout
+ if threading_helper.can_start_thread:
+ faulthandler.dump_traceback_later(EXIT_TIMEOUT, exit=True)
+ sys.exit(exc.code)
+
+
+def remove_testfn(test_name: TestName, verbose: int) -> None:
+ # Try to clean up os_helper.TESTFN if left behind.
+ #
+ # While tests shouldn't leave any files or directories behind, when a test
+ # fails that can be tedious for it to arrange. The consequences can be
+ # especially nasty on Windows, since if a test leaves a file open, it
+ # cannot be deleted by name (while there's nothing we can do about that
+ # here either, we can display the name of the offending test, which is a
+ # real help).
+ name = os_helper.TESTFN
+ if not os.path.exists(name):
+ return
+
+ nuker: Callable[[str], None]
+ if os.path.isdir(name):
+ import shutil
+ kind, nuker = "directory", shutil.rmtree
+ elif os.path.isfile(name):
+ kind, nuker = "file", os.unlink
+ else:
+ raise RuntimeError(f"os.path says {name!r} exists but is neither "
+ f"directory nor file")
+
+ if verbose:
+ print_warning(f"{test_name} left behind {kind} {name!r}")
+ support.environment_altered = True
+
+ try:
+ import stat
+ # fix possible permissions problems that might prevent cleanup
+ os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+ nuker(name)
+ except Exception as exc:
+ print_warning(f"{test_name} left behind {kind} {name!r} "
+ f"and it couldn't be removed: {exc}")
+
+
+def abs_module_name(test_name: TestName, test_dir: StrPath | None) -> TestName:
+ if test_name.startswith('test.') or test_dir:
+ return test_name
+ else:
+ # Import it from the test package
+ return 'test.' + test_name
+
+
+# gh-90681: When rerunning tests, we might need to rerun the whole
+# class or module suite if some its life-cycle hooks fail.
+# Test level hooks are not affected.
+_TEST_LIFECYCLE_HOOKS = frozenset((
+ 'setUpClass', 'tearDownClass',
+ 'setUpModule', 'tearDownModule',
+))
+
+def normalize_test_name(test_full_name, *, is_error=False):
+ short_name = test_full_name.split(" ")[0]
+ if is_error and short_name in _TEST_LIFECYCLE_HOOKS:
+ if test_full_name.startswith(('setUpModule (', 'tearDownModule (')):
+ # if setUpModule() or tearDownModule() failed, don't filter
+ # tests with the test file name, don't use use filters.
+ return None
+
+ # This means that we have a failure in a life-cycle hook,
+ # we need to rerun the whole module or class suite.
+ # Basically the error looks like this:
+ # ERROR: setUpClass (test.test_reg_ex.RegTest)
+ # or
+ # ERROR: setUpModule (test.test_reg_ex)
+ # So, we need to parse the class / module name.
+ lpar = test_full_name.index('(')
+ rpar = test_full_name.index(')')
+ return test_full_name[lpar + 1: rpar].split('.')[-1]
+ return short_name
+
+
+def replace_stdout():
+ """Set stdout encoder error handler to backslashreplace (as stderr error
+ handler) to avoid UnicodeEncodeError when printing a traceback"""
+ stdout = sys.stdout
+ try:
+ fd = stdout.fileno()
+ except ValueError:
+ # On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper
+ # object. Leaving sys.stdout unchanged.
+ #
+ # Catch ValueError to catch io.UnsupportedOperation on TextIOBase
+ # and ValueError on a closed stream.
+ return
+
+ sys.stdout = open(fd, 'w',
+ encoding=stdout.encoding,
+ errors="backslashreplace",
+ closefd=False,
+ newline='\n')
+
+ def restore_stdout():
+ sys.stdout.close()
+ sys.stdout = stdout
+ atexit.register(restore_stdout)
+
+
+def adjust_rlimit_nofile():
+ """
+ On macOS the default fd limit (RLIMIT_NOFILE) is sometimes too low (256)
+ for our test suite to succeed. Raise it to something more reasonable. 1024
+ is a common Linux default.
+ """
+ try:
+ import resource
+ except ImportError:
+ return
+
+ fd_limit, max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ desired_fds = 1024
+
+ if fd_limit < desired_fds and fd_limit < max_fds:
+ new_fd_limit = min(desired_fds, max_fds)
+ try:
+ resource.setrlimit(resource.RLIMIT_NOFILE,
+ (new_fd_limit, max_fds))
+ print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}")
+ except (ValueError, OSError) as err:
+ print_warning(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to "
+ f"{new_fd_limit}: {err}.")
+
+
+def display_header():
+ encoding = sys.stdout.encoding
+
+ # Print basic platform information
+ print("==", platform.python_implementation(), *sys.version.split())
+ print("==", platform.platform(aliased=True),
+ "%s-endian" % sys.byteorder)
+ print("== Python build:", ' '.join(get_build_info()))
+
+ cwd = os.getcwd()
+ # gh-109508: support.os_helper.FS_NONASCII, used by get_work_dir(), cannot
+ # be encoded to the filesystem encoding on purpose, escape non-encodable
+ # characters with backslashreplace error handler.
+ formatted_cwd = cwd.encode(encoding, "backslashreplace").decode(encoding)
+ print("== cwd:", formatted_cwd)
+
+ cpu_count = os.cpu_count()
+ if cpu_count:
+ print("== CPU count:", cpu_count)
+ print("== encodings: locale=%s, FS=%s"
+ % (locale.getencoding(), sys.getfilesystemencoding()))
+
+ # This makes it easier to remember what to set in your local
+ # environment when trying to reproduce a sanitizer failure.
+ asan = support.check_sanitizer(address=True)
+ msan = support.check_sanitizer(memory=True)
+ ubsan = support.check_sanitizer(ub=True)
+ sanitizers = []
+ if asan:
+ sanitizers.append("address")
+ if msan:
+ sanitizers.append("memory")
+ if ubsan:
+ sanitizers.append("undefined behavior")
+ if not sanitizers:
+ return
+
+ print(f"== sanitizers: {', '.join(sanitizers)}")
+ for sanitizer, env_var in (
+ (asan, "ASAN_OPTIONS"),
+ (msan, "MSAN_OPTIONS"),
+ (ubsan, "UBSAN_OPTIONS"),
+ ):
+ options= os.environ.get(env_var)
+ if sanitizer and options is not None:
+ print(f"== {env_var}={options!r}")
+
+
+def cleanup_temp_dir(tmp_dir: StrPath):
+ import glob
+
+ path = os.path.join(glob.escape(tmp_dir), TMP_PREFIX + '*')
+ print("Cleanup %s directory" % tmp_dir)
+ for name in glob.glob(path):
+ if os.path.isdir(name):
+ print("Remove directory: %s" % name)
+ os_helper.rmtree(name)
+ else:
+ print("Remove file: %s" % name)
+ os_helper.unlink(name)
diff --git a/Lib/test/libregrtest/worker.py b/Lib/test/libregrtest/worker.py
new file mode 100644
index 00000000000000..610e0a8437839d
--- /dev/null
+++ b/Lib/test/libregrtest/worker.py
@@ -0,0 +1,109 @@
+import subprocess
+import sys
+import os
+from typing import Any, NoReturn
+
+from test import support
+from test.support import os_helper
+
+from .setup import setup_process, setup_test_dir
+from .runtests import RunTests, JsonFile, JsonFileType
+from .single import run_single_test
+from .utils import (
+ StrPath, StrJSON, FilterTuple,
+ get_temp_dir, get_work_dir, exit_timeout)
+
+
+USE_PROCESS_GROUP = (hasattr(os, "setsid") and hasattr(os, "killpg"))
+
+
+def create_worker_process(runtests: RunTests, output_fd: int,
+ tmp_dir: StrPath | None = None) -> subprocess.Popen:
+ python_cmd = runtests.python_cmd
+ worker_json = runtests.as_json()
+
+ if python_cmd is not None:
+ executable = python_cmd
+ else:
+ executable = (sys.executable,)
+ cmd = [*executable, *support.args_from_interpreter_flags(),
+ '-u', # Unbuffered stdout and stderr
+ '-m', 'test.libregrtest.worker',
+ worker_json]
+
+ env = dict(os.environ)
+ if tmp_dir is not None:
+ env['TMPDIR'] = tmp_dir
+ env['TEMP'] = tmp_dir
+ env['TMP'] = tmp_dir
+
+ # Emscripten and WASI Python must start in the Python source code directory
+ # to get 'python.js' or 'python.wasm' file. Then worker_process() changes
+ # to a temporary directory created to run tests.
+ work_dir = os_helper.SAVEDCWD
+
+ # Running the child from the same working directory as regrtest's original
+ # invocation ensures that TEMPDIR for the child is the same when
+ # sysconfig.is_python_build() is true. See issue 15300.
+ kwargs: dict[str, Any] = dict(
+ env=env,
+ stdout=output_fd,
+ # bpo-45410: Write stderr into stdout to keep messages order
+ stderr=output_fd,
+ text=True,
+ close_fds=True,
+ cwd=work_dir,
+ )
+
+ # Pass json_file to the worker process
+ json_file = runtests.json_file
+ json_file.configure_subprocess(kwargs)
+
+ with json_file.inherit_subprocess():
+ return subprocess.Popen(cmd, **kwargs)
+
+
+def worker_process(worker_json: StrJSON) -> NoReturn:
+ runtests = RunTests.from_json(worker_json)
+ test_name = runtests.tests[0]
+ match_tests: FilterTuple | None = runtests.match_tests
+ json_file: JsonFile = runtests.json_file
+
+ setup_test_dir(runtests.test_dir)
+ setup_process()
+
+ if runtests.rerun:
+ if match_tests:
+ matching = "matching: " + ", ".join(match_tests)
+ print(f"Re-running {test_name} in verbose mode ({matching})", flush=True)
+ else:
+ print(f"Re-running {test_name} in verbose mode", flush=True)
+
+ result = run_single_test(test_name, runtests)
+
+ if json_file.file_type == JsonFileType.STDOUT:
+ print()
+ result.write_json_into(sys.stdout)
+ else:
+ with json_file.open('w', encoding='utf-8') as json_fp:
+ result.write_json_into(json_fp)
+
+ sys.exit(0)
+
+
+def main():
+ if len(sys.argv) != 2:
+ print("usage: python -m test.libregrtest.worker JSON")
+ sys.exit(1)
+ worker_json = sys.argv[1]
+
+ tmp_dir = get_temp_dir()
+ work_dir = get_work_dir(tmp_dir, worker=True)
+
+ with exit_timeout():
+ with os_helper.temp_cwd(work_dir, quiet=True):
+ worker_process(worker_json)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/Lib/test/list_tests.py b/Lib/test/list_tests.py
index b1ef332522d2ce..d9ab21d4941cdb 100644
--- a/Lib/test/list_tests.py
+++ b/Lib/test/list_tests.py
@@ -6,7 +6,7 @@
from functools import cmp_to_key
from test import seq_tests
-from test.support import ALWAYS_EQ, NEVER_EQ, C_RECURSION_LIMIT
+from test.support import ALWAYS_EQ, NEVER_EQ, Py_C_RECURSION_LIMIT
class CommonTest(seq_tests.CommonTest):
@@ -61,7 +61,7 @@ def test_repr(self):
def test_repr_deep(self):
a = self.type2test([])
- for i in range(C_RECURSION_LIMIT + 1):
+ for i in range(Py_C_RECURSION_LIMIT + 1):
a = self.type2test([a])
self.assertRaises(RecursionError, repr, a)
diff --git a/Lib/test/mapping_tests.py b/Lib/test/mapping_tests.py
index 5492bbf86d1f87..b4cfce19a7174e 100644
--- a/Lib/test/mapping_tests.py
+++ b/Lib/test/mapping_tests.py
@@ -1,8 +1,7 @@
# tests common to dict and UserDict
import unittest
import collections
-import sys
-from test.support import C_RECURSION_LIMIT
+from test.support import Py_C_RECURSION_LIMIT
class BasicTestMappingProtocol(unittest.TestCase):
@@ -625,7 +624,7 @@ def __repr__(self):
def test_repr_deep(self):
d = self._empty_mapping()
- for i in range(C_RECURSION_LIMIT + 1):
+ for i in range(Py_C_RECURSION_LIMIT + 1):
d0 = d
d = self._empty_mapping()
d[1] = d0
diff --git a/Lib/test/cmath_testcases.txt b/Lib/test/mathdata/cmath_testcases.txt
similarity index 100%
rename from Lib/test/cmath_testcases.txt
rename to Lib/test/mathdata/cmath_testcases.txt
diff --git a/Lib/test/floating_points.txt b/Lib/test/mathdata/floating_points.txt
similarity index 100%
rename from Lib/test/floating_points.txt
rename to Lib/test/mathdata/floating_points.txt
diff --git a/Lib/test/formatfloat_testcases.txt b/Lib/test/mathdata/formatfloat_testcases.txt
similarity index 100%
rename from Lib/test/formatfloat_testcases.txt
rename to Lib/test/mathdata/formatfloat_testcases.txt
diff --git a/Lib/test/ieee754.txt b/Lib/test/mathdata/ieee754.txt
similarity index 100%
rename from Lib/test/ieee754.txt
rename to Lib/test/mathdata/ieee754.txt
diff --git a/Lib/test/math_testcases.txt b/Lib/test/mathdata/math_testcases.txt
similarity index 100%
rename from Lib/test/math_testcases.txt
rename to Lib/test/mathdata/math_testcases.txt
diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py
index a687fe0629080a..ddb180ef5ef825 100644
--- a/Lib/test/pickletester.py
+++ b/Lib/test/pickletester.py
@@ -2408,6 +2408,22 @@ def test_reduce_calls_base(self):
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
+ def test_reduce_ex_None(self):
+ c = REX_None()
+ with self.assertRaises(TypeError):
+ self.dumps(c)
+
+ def test_reduce_None(self):
+ c = R_None()
+ with self.assertRaises(TypeError):
+ self.dumps(c)
+
+ def test_pickle_setstate_None(self):
+ c = C_None_setstate()
+ p = self.dumps(c)
+ with self.assertRaises(TypeError):
+ self.loads(p)
+
@no_tracing
def test_bad_getattr(self):
# Issue #3514: crash when there is an infinite loop in __getattr__
@@ -3349,6 +3365,21 @@ def __setstate__(self, state):
def __reduce__(self):
return type(self), (), self.state
+class REX_None:
+ """ Setting __reduce_ex__ to None should fail """
+ __reduce_ex__ = None
+
+class R_None:
+ """ Setting __reduce__ to None should fail """
+ __reduce__ = None
+
+class C_None_setstate:
+ """ Setting __setstate__ to None should fail """
+ def __getstate__(self):
+ return 1
+
+ __setstate__ = None
+
# Test classes for newobj
@@ -3752,6 +3783,25 @@ def test_unpickling_buffering_readline(self):
unpickler = self.unpickler_class(f)
self.assertEqual(unpickler.load(), data)
+ def test_pickle_invalid_reducer_override(self):
+ # gh-103035
+ obj = object()
+
+ f = io.BytesIO()
+ class MyPickler(self.pickler_class):
+ pass
+ pickler = MyPickler(f)
+ pickler.dump(obj)
+
+ pickler.clear_memo()
+ pickler.reducer_override = None
+ with self.assertRaises(TypeError):
+ pickler.dump(obj)
+
+ pickler.clear_memo()
+ pickler.reducer_override = 10
+ with self.assertRaises(TypeError):
+ pickler.dump(obj)
# Tests for dispatch_table attribute
@@ -3914,6 +3964,15 @@ def dumps(obj, protocol=None):
self._test_dispatch_table(dumps, dt)
+ def test_dispatch_table_None_item(self):
+ # gh-93627
+ obj = object()
+ f = io.BytesIO()
+ pickler = self.pickler_class(f)
+ pickler.dispatch_table = {type(obj): None}
+ with self.assertRaises(TypeError):
+ pickler.dump(obj)
+
def _test_dispatch_table(self, dumps, dispatch_table):
def custom_load_dump(obj):
return pickle.loads(dumps(obj, 0))
diff --git a/Lib/test/pythoninfo.py b/Lib/test/pythoninfo.py
index 53af21db0755b1..c372efaedd313b 100644
--- a/Lib/test/pythoninfo.py
+++ b/Lib/test/pythoninfo.py
@@ -1,18 +1,13 @@
"""
Collect various information about Python to help debugging test failures.
"""
-from __future__ import print_function
import errno
import re
import sys
import traceback
-import unittest
import warnings
-MS_WINDOWS = (sys.platform == 'win32')
-
-
def normalize_text(text):
if text is None:
return None
@@ -112,6 +107,7 @@ def collect_sys(info_add):
call_func(info_add, 'sys.androidapilevel', sys, 'getandroidapilevel')
call_func(info_add, 'sys.windowsversion', sys, 'getwindowsversion')
+ call_func(info_add, 'sys.getrecursionlimit', sys, 'getrecursionlimit')
encoding = sys.getfilesystemencoding()
if hasattr(sys, 'getfilesystemencodeerrors'):
@@ -163,6 +159,26 @@ def collect_platform(info_add):
if libc_ver:
info_add('platform.libc_ver', libc_ver)
+ try:
+ os_release = platform.freedesktop_os_release()
+ except OSError:
+ pass
+ else:
+ for key in (
+ 'ID',
+ 'NAME',
+ 'PRETTY_NAME'
+ 'VARIANT',
+ 'VARIANT_ID',
+ 'VERSION',
+ 'VERSION_CODENAME',
+ 'VERSION_ID',
+ ):
+ if key not in os_release:
+ continue
+ info_add(f'platform.freedesktop_os_release[{key}]',
+ os_release[key])
+
def collect_locale(info_add):
import locale
@@ -252,6 +268,7 @@ def format_groups(groups):
"ARCHFLAGS",
"ARFLAGS",
"AUDIODEV",
+ "BUILDPYTHON",
"CC",
"CFLAGS",
"COLUMNS",
@@ -304,6 +321,7 @@ def format_groups(groups):
"VIRTUAL_ENV",
"WAYLAND_DISPLAY",
"WINDIR",
+ "_PYTHON_HOSTRUNNER",
"_PYTHON_HOST_PLATFORM",
"_PYTHON_PROJECT_BASE",
"_PYTHON_SYSCONFIGDATA_NAME",
@@ -319,7 +337,8 @@ def format_groups(groups):
for name, value in os.environ.items():
uname = name.upper()
if (uname in ENV_VARS
- # Copy PYTHON* and LC_* variables
+ # Copy PYTHON* variables like PYTHONPATH
+ # Copy LC_* variables like LC_ALL
or uname.startswith(("PYTHON", "LC_"))
# Visual Studio: VS140COMNTOOLS
or (uname.startswith("VS") and uname.endswith("COMNTOOLS"))):
@@ -472,13 +491,10 @@ def collect_datetime(info_add):
def collect_sysconfig(info_add):
- # On Windows, sysconfig is not reliable to get macros used
- # to build Python
- if MS_WINDOWS:
- return
-
import sysconfig
+ info_add('sysconfig.is_python_build', sysconfig.is_python_build())
+
for name in (
'ABIFLAGS',
'ANDROID_API_LEVEL',
@@ -487,6 +503,7 @@ def collect_sysconfig(info_add):
'CFLAGS',
'CFLAGSFORSHARED',
'CONFIG_ARGS',
+ 'HOSTRUNNER',
'HOST_GNU_TYPE',
'MACHDEP',
'MULTIARCH',
@@ -502,7 +519,9 @@ def collect_sysconfig(info_add):
'Py_NOGIL',
'SHELL',
'SOABI',
+ 'abs_builddir',
'prefix',
+ 'srcdir',
):
value = sysconfig.get_config_var(name)
if name == 'ANDROID_API_LEVEL' and not value:
@@ -644,6 +663,22 @@ def collect_decimal(info_add):
def collect_testcapi(info_add):
+ try:
+ import _testcapi
+ except ImportError:
+ return
+
+ for name in (
+ 'LONG_MAX', # always 32-bit on Windows, 64-bit on 64-bit Unix
+ 'PY_SSIZE_T_MAX',
+ 'Py_C_RECURSION_LIMIT',
+ 'SIZEOF_TIME_T', # 32-bit or 64-bit depending on the platform
+ 'SIZEOF_WCHAR_T', # 16-bit or 32-bit depending on the platform
+ ):
+ copy_attr(info_add, f'_testcapi.{name}', _testcapi, name)
+
+
+def collect_testinternalcapi(info_add):
try:
import _testinternalcapi
except ImportError:
@@ -651,6 +686,12 @@ def collect_testcapi(info_add):
call_func(info_add, 'pymem.allocator', _testinternalcapi, 'pymem_getallocatorsname')
+ for name in (
+ 'SIZEOF_PYGC_HEAD',
+ 'SIZEOF_PYOBJECT',
+ ):
+ copy_attr(info_add, f'_testinternalcapi.{name}', _testinternalcapi, name)
+
def collect_resource(info_add):
try:
@@ -668,6 +709,7 @@ def collect_resource(info_add):
def collect_test_socket(info_add):
+ import unittest
try:
from test import test_socket
except (ImportError, unittest.SkipTest):
@@ -679,26 +721,82 @@ def collect_test_socket(info_add):
copy_attributes(info_add, test_socket, 'test_socket.%s', attributes)
-def collect_test_support(info_add):
+def collect_support(info_add):
try:
from test import support
except ImportError:
return
- attributes = ('IPV6_ENABLED',)
- copy_attributes(info_add, support, 'test_support.%s', attributes)
+ attributes = (
+ 'has_fork_support',
+ 'has_socket_support',
+ 'has_strftime_extensions',
+ 'has_subprocess_support',
+ 'is_android',
+ 'is_emscripten',
+ 'is_jython',
+ 'is_wasi',
+ )
+ copy_attributes(info_add, support, 'support.%s', attributes)
- call_func(info_add, 'test_support._is_gui_available', support, '_is_gui_available')
- call_func(info_add, 'test_support.python_is_optimized', support, 'python_is_optimized')
+ call_func(info_add, 'support._is_gui_available', support, '_is_gui_available')
+ call_func(info_add, 'support.python_is_optimized', support, 'python_is_optimized')
- info_add('test_support.check_sanitizer(address=True)',
+ info_add('support.check_sanitizer(address=True)',
support.check_sanitizer(address=True))
- info_add('test_support.check_sanitizer(memory=True)',
+ info_add('support.check_sanitizer(memory=True)',
support.check_sanitizer(memory=True))
- info_add('test_support.check_sanitizer(ub=True)',
+ info_add('support.check_sanitizer(ub=True)',
support.check_sanitizer(ub=True))
+def collect_support_os_helper(info_add):
+ try:
+ from test.support import os_helper
+ except ImportError:
+ return
+
+ for name in (
+ 'can_symlink',
+ 'can_xattr',
+ 'can_chmod',
+ 'can_dac_override',
+ ):
+ func = getattr(os_helper, name)
+ info_add(f'support_os_helper.{name}', func())
+
+
+def collect_support_socket_helper(info_add):
+ try:
+ from test.support import socket_helper
+ except ImportError:
+ return
+
+ attributes = (
+ 'IPV6_ENABLED',
+ 'has_gethostname',
+ )
+ copy_attributes(info_add, socket_helper, 'support_socket_helper.%s', attributes)
+
+ for name in (
+ 'tcp_blackhole',
+ ):
+ func = getattr(socket_helper, name)
+ info_add(f'support_socket_helper.{name}', func())
+
+
+def collect_support_threading_helper(info_add):
+ try:
+ from test.support import threading_helper
+ except ImportError:
+ return
+
+ attributes = (
+ 'can_start_thread',
+ )
+ copy_attributes(info_add, threading_helper, 'support_threading_helper.%s', attributes)
+
+
def collect_cc(info_add):
import subprocess
import sysconfig
@@ -853,6 +951,21 @@ def collect_fips(info_add):
pass
+def collect_tempfile(info_add):
+ import tempfile
+
+ info_add('tempfile.gettempdir', tempfile.gettempdir())
+
+
+def collect_libregrtest_utils(info_add):
+ try:
+ from test.libregrtest import utils
+ except ImportError:
+ return
+
+ info_add('libregrtests.build_info', ' '.join(utils.get_build_info()))
+
+
def collect_info(info):
error = False
info_add = info.add
@@ -886,14 +999,20 @@ def collect_info(info):
collect_sys,
collect_sysconfig,
collect_testcapi,
+ collect_testinternalcapi,
+ collect_tempfile,
collect_time,
collect_tkinter,
collect_windows,
collect_zlib,
+ collect_libregrtest_utils,
# Collecting from tests should be last as they have side effects.
collect_test_socket,
- collect_test_support,
+ collect_support,
+ collect_support_os_helper,
+ collect_support_socket_helper,
+ collect_support_threading_helper,
):
try:
collect_func(info_add)
@@ -919,7 +1038,6 @@ def dump_info(info, file=None):
for key, value in infos:
value = value.replace("\n", " ")
print("%s: %s" % (key, value))
- print()
def main():
@@ -928,6 +1046,7 @@ def main():
dump_info(info)
if error:
+ print()
print("Collection failed: exit with error", file=sys.stderr)
sys.exit(1)
diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py
index 0ffb3ed454eda0..46a74fe276f553 100755
--- a/Lib/test/regrtest.py
+++ b/Lib/test/regrtest.py
@@ -8,7 +8,7 @@
import os
import sys
-from test.libregrtest import main
+from test.libregrtest.main import main
# Alias for backward compatibility (just in case)
diff --git a/Lib/test/ssl_servers.py b/Lib/test/ssl_servers.py
index a4bd7455d47e76..15b071e04dda1f 100644
--- a/Lib/test/ssl_servers.py
+++ b/Lib/test/ssl_servers.py
@@ -14,7 +14,7 @@
here = os.path.dirname(__file__)
HOST = socket_helper.HOST
-CERTFILE = os.path.join(here, 'keycert.pem')
+CERTFILE = os.path.join(here, 'certdata', 'keycert.pem')
# This one's based on HTTPServer, which is based on socketserver
diff --git a/Lib/test/support/__init__.py b/Lib/test/support/__init__.py
index 16a5056a33aa12..8a4555ce16fbb6 100644
--- a/Lib/test/support/__init__.py
+++ b/Lib/test/support/__init__.py
@@ -4,6 +4,7 @@
raise ImportError('support must be imported from the test package')
import contextlib
+import dataclasses
import functools
import getpass
import _opcode
@@ -59,8 +60,9 @@
"run_with_tz", "PGO", "missing_compiler_executable",
"ALWAYS_EQ", "NEVER_EQ", "LARGEST", "SMALLEST",
"LOOPBACK_TIMEOUT", "INTERNET_TIMEOUT", "SHORT_TIMEOUT", "LONG_TIMEOUT",
- "Py_DEBUG", "EXCEEDS_RECURSION_LIMIT", "C_RECURSION_LIMIT",
+ "Py_DEBUG", "EXCEEDS_RECURSION_LIMIT", "Py_C_RECURSION_LIMIT",
"skip_on_s390x",
+ "without_optimizer",
]
@@ -118,17 +120,20 @@ class Error(Exception):
class TestFailed(Error):
"""Test failed."""
+ def __init__(self, msg, *args, stats=None):
+ self.msg = msg
+ self.stats = stats
+ super().__init__(msg, *args)
+
+ def __str__(self):
+ return self.msg
class TestFailedWithDetails(TestFailed):
"""Test failed."""
- def __init__(self, msg, errors, failures):
- self.msg = msg
+ def __init__(self, msg, errors, failures, stats):
self.errors = errors
self.failures = failures
- super().__init__(msg, errors, failures)
-
- def __str__(self):
- return self.msg
+ super().__init__(msg, errors, failures, stats=stats)
class TestDidNotRun(Error):
"""Test did not run any subtests."""
@@ -502,15 +507,6 @@ def has_no_debug_ranges():
def requires_debug_ranges(reason='requires co_positions / debug_ranges'):
return unittest.skipIf(has_no_debug_ranges(), reason)
-def requires_legacy_unicode_capi():
- try:
- from _testcapi import unicode_legacy_string
- except ImportError:
- unicode_legacy_string = None
-
- return unittest.skipUnless(unicode_legacy_string,
- 'requires legacy Unicode C API')
-
# Is not actually used in tests, but is kept for compatibility.
is_jython = sys.platform.startswith('java')
@@ -777,6 +773,21 @@ def python_is_optimized():
return final_opt not in ('', '-O0', '-Og')
+def check_cflags_pgo():
+ # Check if Python was built with ./configure --enable-optimizations:
+ # with Profile Guided Optimization (PGO).
+ cflags_nodist = sysconfig.get_config_var('PY_CFLAGS_NODIST') or ''
+ pgo_options = (
+ # GCC
+ '-fprofile-use',
+ # clang: -fprofile-instr-use=code.profclangd
+ '-fprofile-instr-use',
+ # ICC
+ "-prof-use",
+ )
+ return any(option in cflags_nodist for option in pgo_options)
+
+
_header = 'nP'
_align = '0n'
_vheader = _header + 'n'
@@ -883,27 +894,31 @@ def inner(*args, **kwds):
MAX_Py_ssize_t = sys.maxsize
-def set_memlimit(limit):
- global max_memuse
- global real_max_memuse
+def _parse_memlimit(limit: str) -> int:
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
- m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
+ m = re.match(r'(\d+(?:\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
- raise ValueError('Invalid memory limit %r' % (limit,))
- memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
- real_max_memuse = memlimit
- if memlimit > MAX_Py_ssize_t:
- memlimit = MAX_Py_ssize_t
+ raise ValueError(f'Invalid memory limit: {limit!r}')
+ return int(float(m.group(1)) * sizes[m.group(2).lower()])
+
+def set_memlimit(limit: str) -> None:
+ global max_memuse
+ global real_max_memuse
+ memlimit = _parse_memlimit(limit)
if memlimit < _2G - 1:
- raise ValueError('Memory limit %r too low to be useful' % (limit,))
+ raise ValueError('Memory limit {limit!r} too low to be useful')
+
+ real_max_memuse = memlimit
+ memlimit = min(memlimit, MAX_Py_ssize_t)
max_memuse = memlimit
+
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
@@ -1086,8 +1101,7 @@ def requires_limited_api(test):
import _testcapi
except ImportError:
return unittest.skip('needs _testcapi module')(test)
- return unittest.skipUnless(
- _testcapi.LIMITED_API_AVAILABLE, 'needs Limited API support')(test)
+ return test
def requires_specialization(test):
return unittest.skipUnless(
@@ -1105,6 +1119,30 @@ def _filter_suite(suite, pred):
newtests.append(test)
suite._tests = newtests
+@dataclasses.dataclass(slots=True)
+class TestStats:
+ tests_run: int = 0
+ failures: int = 0
+ skipped: int = 0
+
+ @staticmethod
+ def from_unittest(result):
+ return TestStats(result.testsRun,
+ len(result.failures),
+ len(result.skipped))
+
+ @staticmethod
+ def from_doctest(results):
+ return TestStats(results.attempted,
+ results.failed,
+ results.skipped)
+
+ def accumulate(self, stats):
+ self.tests_run += stats.tests_run
+ self.failures += stats.failures
+ self.skipped += stats.skipped
+
+
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
runner = get_test_runner(sys.stdout,
@@ -1119,6 +1157,7 @@ def _run_suite(suite):
if not result.testsRun and not result.skipped and not result.errors:
raise TestDidNotRun
if not result.wasSuccessful():
+ stats = TestStats.from_unittest(result)
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
@@ -1128,7 +1167,8 @@ def _run_suite(suite):
if not verbose: err += "; run in verbose mode for details"
errors = [(str(tc), exc_str) for tc, exc_str in result.errors]
failures = [(str(tc), exc_str) for tc, exc_str in result.failures]
- raise TestFailedWithDetails(err, errors, failures)
+ raise TestFailedWithDetails(err, errors, failures, stats=stats)
+ return result
# By default, don't filter tests
@@ -1159,7 +1199,6 @@ def _is_full_match_test(pattern):
def set_match_tests(accept_patterns=None, ignore_patterns=None):
global _match_test_func, _accept_test_patterns, _ignore_test_patterns
-
if accept_patterns is None:
accept_patterns = ()
if ignore_patterns is None:
@@ -1237,7 +1276,7 @@ def run_unittest(*classes):
else:
suite.addTest(loader.loadTestsFromTestCase(cls))
_filter_suite(suite, match_test)
- _run_suite(suite)
+ return _run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
@@ -1277,13 +1316,18 @@ def run_doctest(module, verbosity=None, optionflags=0):
else:
verbosity = None
- f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
- if f:
- raise TestFailed("%d of %d doctests failed" % (f, t))
+ results = doctest.testmod(module,
+ verbose=verbosity,
+ optionflags=optionflags)
+ if results.failed:
+ stats = TestStats.from_doctest(results)
+ raise TestFailed(f"{results.failed} of {results.attempted} "
+ f"doctests failed",
+ stats=stats)
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
- (module.__name__, t))
- return f, t
+ (module.__name__, results.attempted))
+ return results
#=======================================================================
@@ -2207,6 +2251,39 @@ def check_disallow_instantiation(testcase, tp, *args, **kwds):
msg = f"cannot create '{re.escape(qualname)}' instances"
testcase.assertRaisesRegex(TypeError, msg, tp, *args, **kwds)
+def get_recursion_depth():
+ """Get the recursion depth of the caller function.
+
+ In the __main__ module, at the module level, it should be 1.
+ """
+ try:
+ import _testinternalcapi
+ depth = _testinternalcapi.get_recursion_depth()
+ except (ImportError, RecursionError) as exc:
+ # sys._getframe() + frame.f_back implementation.
+ try:
+ depth = 0
+ frame = sys._getframe()
+ while frame is not None:
+ depth += 1
+ frame = frame.f_back
+ finally:
+ # Break any reference cycles.
+ frame = None
+
+ # Ignore get_recursion_depth() frame.
+ return max(depth - 1, 1)
+
+def get_recursion_available():
+ """Get the number of available frames before RecursionError.
+
+ It depends on the current recursion depth of the caller function and
+ sys.getrecursionlimit().
+ """
+ limit = sys.getrecursionlimit()
+ depth = get_recursion_depth()
+ return limit - depth
+
@contextlib.contextmanager
def set_recursion_limit(limit):
"""Temporarily change the recursion limit."""
@@ -2217,14 +2294,18 @@ def set_recursion_limit(limit):
finally:
sys.setrecursionlimit(original_limit)
-def infinite_recursion(max_depth=75):
+def infinite_recursion(max_depth=100):
"""Set a lower limit for tests that interact with infinite recursions
(e.g test_ast.ASTHelpers_Test.test_recursion_direct) since on some
debug windows builds, due to not enough functions being inlined the
stack size might not handle the default recursion limit (1000). See
bpo-11105 for details."""
- return set_recursion_limit(max_depth)
-
+ if max_depth < 3:
+ raise ValueError("max_depth must be at least 3, got {max_depth}")
+ depth = get_recursion_depth()
+ depth = max(depth - 1, 1) # Ignore infinite_recursion() frame.
+ limit = depth + max_depth
+ return set_recursion_limit(limit)
def ignore_deprecations_from(module: str, *, like: str) -> object:
token = object()
@@ -2461,10 +2542,26 @@ def adjust_int_max_str_digits(max_digits):
EXCEEDS_RECURSION_LIMIT = 5000
# The default C recursion limit (from Include/cpython/pystate.h).
-C_RECURSION_LIMIT = 1500
+Py_C_RECURSION_LIMIT = 1500
#Windows doesn't have os.uname() but it doesn't support s390x.
skip_on_s390x = unittest.skipIf(hasattr(os, 'uname') and os.uname().machine == 's390x',
'skipped on s390x')
Py_TRACE_REFS = hasattr(sys, 'getobjects')
+
+# Decorator to disable optimizer while a function run
+def without_optimizer(func):
+ try:
+ import _testinternalcapi
+ except ImportError:
+ return func
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ save_opt = _testinternalcapi.get_optimizer()
+ try:
+ _testinternalcapi.set_optimizer(None)
+ return func(*args, **kwargs)
+ finally:
+ _testinternalcapi.set_optimizer(save_opt)
+ return wrapper
diff --git a/Lib/test/support/hypothesis_helper.py b/Lib/test/support/hypothesis_helper.py
index da16eb50c25958..db93eea5e912e0 100644
--- a/Lib/test/support/hypothesis_helper.py
+++ b/Lib/test/support/hypothesis_helper.py
@@ -10,7 +10,10 @@
hypothesis.settings.register_profile(
"slow-is-ok",
deadline=None,
- suppress_health_check=[hypothesis.HealthCheck.too_slow],
+ suppress_health_check=[
+ hypothesis.HealthCheck.too_slow,
+ hypothesis.HealthCheck.differing_executors,
+ ],
)
hypothesis.settings.load_profile("slow-is-ok")
diff --git a/Lib/test/support/import_helper.py b/Lib/test/support/import_helper.py
index 67f18e530edc4b..3d804f2b590108 100644
--- a/Lib/test/support/import_helper.py
+++ b/Lib/test/support/import_helper.py
@@ -8,7 +8,7 @@
import unittest
import warnings
-from .os_helper import unlink
+from .os_helper import unlink, temp_dir
@contextlib.contextmanager
@@ -274,3 +274,26 @@ def mock_register_at_fork(func):
# memory.
from unittest import mock
return mock.patch('os.register_at_fork', create=True)(func)
+
+
+@contextlib.contextmanager
+def ready_to_import(name=None, source=""):
+ from test.support import script_helper
+
+ # 1. Sets up a temporary directory and removes it afterwards
+ # 2. Creates the module file
+ # 3. Temporarily clears the module from sys.modules (if any)
+ # 4. Reverts or removes the module when cleaning up
+ name = name or "spam"
+ with temp_dir() as tempdir:
+ path = script_helper.make_script(tempdir, name, source)
+ old_module = sys.modules.pop(name, None)
+ try:
+ sys.path.insert(0, tempdir)
+ yield name, path
+ sys.path.remove(tempdir)
+ finally:
+ if old_module is not None:
+ sys.modules[name] = old_module
+ else:
+ sys.modules.pop(name, None)
diff --git a/Lib/test/support/interpreters.py b/Lib/test/support/interpreters.py
index 5c484d1170d1d9..eeff3abe0324e5 100644
--- a/Lib/test/support/interpreters.py
+++ b/Lib/test/support/interpreters.py
@@ -5,7 +5,7 @@
import _xxinterpchannels as _channels
# aliases:
-from _xxsubinterpreters import is_shareable, RunFailedError
+from _xxsubinterpreters import is_shareable
from _xxinterpchannels import (
ChannelError, ChannelNotFoundError, ChannelEmptyError,
)
diff --git a/Lib/test/support/socket_helper.py b/Lib/test/support/socket_helper.py
index 45f6d65c355dd4..87941ee1791b4e 100644
--- a/Lib/test/support/socket_helper.py
+++ b/Lib/test/support/socket_helper.py
@@ -3,6 +3,7 @@
import os.path
import socket
import sys
+import subprocess
import tempfile
import unittest
@@ -277,3 +278,62 @@ def create_unix_domain_name():
"""
return tempfile.mktemp(prefix="test_python_", suffix='.sock',
dir=os.path.curdir)
+
+
+# consider that sysctl values should not change while tests are running
+_sysctl_cache = {}
+
+def _get_sysctl(name):
+ """Get a sysctl value as an integer."""
+ try:
+ return _sysctl_cache[name]
+ except KeyError:
+ pass
+
+ # At least Linux and FreeBSD support the "-n" option
+ cmd = ['sysctl', '-n', name]
+ proc = subprocess.run(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True)
+ if proc.returncode:
+ support.print_warning(f'{' '.join(cmd)!r} command failed with '
+ f'exit code {proc.returncode}')
+ # cache the error to only log the warning once
+ _sysctl_cache[name] = None
+ return None
+ output = proc.stdout
+
+ # Parse '0\n' to get '0'
+ try:
+ value = int(output.strip())
+ except Exception as exc:
+ support.print_warning(f'Failed to parse {' '.join(cmd)!r} '
+ f'command output {output!r}: {exc!r}')
+ # cache the error to only log the warning once
+ _sysctl_cache[name] = None
+ return None
+
+ _sysctl_cache[name] = value
+ return value
+
+
+def tcp_blackhole():
+ if not sys.platform.startswith('freebsd'):
+ return False
+
+ # gh-109015: test if FreeBSD TCP blackhole is enabled
+ value = _get_sysctl('net.inet.tcp.blackhole')
+ if value is None:
+ # don't skip if we fail to get the sysctl value
+ return False
+ return (value != 0)
+
+
+def skip_if_tcp_blackhole(test):
+ """Decorator skipping test if TCP blackhole is enabled."""
+ skip_if = unittest.skipIf(
+ tcp_blackhole(),
+ "TCP blackhole is enabled (sysctl net.inet.tcp.blackhole)"
+ )
+ return skip_if(test)
diff --git a/Lib/test/support/testresult.py b/Lib/test/support/testresult.py
index 14474be222dc4b..de23fdd59ded95 100644
--- a/Lib/test/support/testresult.py
+++ b/Lib/test/support/testresult.py
@@ -8,6 +8,7 @@
import time
import traceback
import unittest
+from test import support
class RegressionTestResult(unittest.TextTestResult):
USE_XML = False
@@ -112,6 +113,8 @@ def addExpectedFailure(self, test, err):
def addFailure(self, test, err):
self._add_result(test, True, failure=self.__makeErrorDict(*err))
super().addFailure(test, err)
+ if support.failfast:
+ self.stop()
def addSkip(self, test, reason):
self._add_result(test, skipped=reason)
diff --git a/Lib/test/test_asyncgen.py b/Lib/test/test_asyncgen.py
index 4f00558770dafd..a49630112af510 100644
--- a/Lib/test/test_asyncgen.py
+++ b/Lib/test/test_asyncgen.py
@@ -1058,8 +1058,7 @@ async def gen():
while True:
yield 1
finally:
- await asyncio.sleep(0.01)
- await asyncio.sleep(0.01)
+ await asyncio.sleep(0)
DONE = 1
async def run():
@@ -1069,7 +1068,10 @@ async def run():
del g
gc_collect() # For PyPy or other GCs.
- await asyncio.sleep(0.1)
+ # Starts running the aclose task
+ await asyncio.sleep(0)
+ # For asyncio.sleep(0) in finally block
+ await asyncio.sleep(0)
self.loop.run_until_complete(run())
self.assertEqual(DONE, 1)
diff --git a/Lib/test/test_asyncio/test_eager_task_factory.py b/Lib/test/test_asyncio/test_eager_task_factory.py
index fc9ad8eb43bb1b..0f8212dbec47be 100644
--- a/Lib/test/test_asyncio/test_eager_task_factory.py
+++ b/Lib/test/test_asyncio/test_eager_task_factory.py
@@ -7,7 +7,6 @@
from unittest import mock
from asyncio import tasks
from test.test_asyncio import utils as test_utils
-import test.support
from test.support.script_helper import assert_python_ok
MOCK_ANY = mock.ANY
diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py
index b9069056c3a436..f22cb5e58bba62 100644
--- a/Lib/test/test_asyncio/test_events.py
+++ b/Lib/test/test_asyncio/test_events.py
@@ -1,6 +1,5 @@
"""Tests for events.py."""
-import collections.abc
import concurrent.futures
import functools
import io
@@ -31,6 +30,7 @@
from asyncio import coroutines
from asyncio import events
from asyncio import selector_events
+from multiprocessing.util import _cleanup_tests as multiprocessing_cleanup_tests
from test.test_asyncio import utils as test_utils
from test import support
from test.support import socket_helper
@@ -671,6 +671,7 @@ def test_create_connection_local_addr(self):
self.assertEqual(port, expected)
tr.close()
+ @socket_helper.skip_if_tcp_blackhole
def test_create_connection_local_addr_skip_different_family(self):
# See https://github.com/python/cpython/issues/86508
port1 = socket_helper.find_unused_port()
@@ -692,6 +693,7 @@ async def getaddrinfo(host, port, *args, **kwargs):
with self.assertRaises(OSError):
self.loop.run_until_complete(f)
+ @socket_helper.skip_if_tcp_blackhole
def test_create_connection_local_addr_nomatch_family(self):
# See https://github.com/python/cpython/issues/86508
port1 = socket_helper.find_unused_port()
@@ -1271,6 +1273,7 @@ def connection_made(self, transport):
server.close()
+ @socket_helper.skip_if_tcp_blackhole
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
@@ -2331,8 +2334,6 @@ def check_source_traceback(h):
h = loop.call_later(0, noop)
check_source_traceback(h)
- @unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
- 'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
@@ -2762,6 +2763,8 @@ def test_get_event_loop_new_process(self):
# multiprocessing.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
+ self.addCleanup(multiprocessing_cleanup_tests)
+
async def main():
if multiprocessing.get_start_method() == 'fork':
# Avoid 'fork' DeprecationWarning.
diff --git a/Lib/test/test_asyncio/test_sock_lowlevel.py b/Lib/test/test_asyncio/test_sock_lowlevel.py
index b829fd4cc69fff..075113cbe8e4a6 100644
--- a/Lib/test/test_asyncio/test_sock_lowlevel.py
+++ b/Lib/test/test_asyncio/test_sock_lowlevel.py
@@ -10,6 +10,10 @@
from test import support
from test.support import socket_helper
+if socket_helper.tcp_blackhole():
+ raise unittest.SkipTest('Not relevant to ProactorEventLoop')
+
+
def tearDownModule():
asyncio.set_event_loop_policy(None)
diff --git a/Lib/test/test_asyncio/test_sslproto.py b/Lib/test/test_asyncio/test_sslproto.py
index 52a45f1c7c6e96..37d015339761c6 100644
--- a/Lib/test/test_asyncio/test_sslproto.py
+++ b/Lib/test/test_asyncio/test_sslproto.py
@@ -5,6 +5,7 @@
import unittest
import weakref
from test import support
+from test.support import socket_helper
from unittest import mock
try:
import ssl
@@ -350,6 +351,7 @@ async def client(addr):
support.gc_collect()
self.assertIsNone(client_context())
+ @socket_helper.skip_if_tcp_blackhole
def test_start_tls_client_buf_proto_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
@@ -502,6 +504,7 @@ async def client(addr):
asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
+ @socket_helper.skip_if_tcp_blackhole
def test_start_tls_server_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
ANSWER = b'answer'
diff --git a/Lib/test/test_asyncio/test_subprocess.py b/Lib/test/test_asyncio/test_subprocess.py
index eeeca40c15cd28..dc5a48d500e8d5 100644
--- a/Lib/test/test_asyncio/test_subprocess.py
+++ b/Lib/test/test_asyncio/test_subprocess.py
@@ -1,6 +1,7 @@
import os
import signal
import sys
+import textwrap
import unittest
import warnings
from unittest import mock
@@ -12,9 +13,14 @@
from test import support
from test.support import os_helper
-if sys.platform != 'win32':
+
+MS_WINDOWS = (sys.platform == 'win32')
+if MS_WINDOWS:
+ import msvcrt
+else:
from asyncio import unix_events
+
if support.check_sanitizer(address=True):
raise unittest.SkipTest("Exposes ASAN flakiness in GitHub CI")
@@ -270,26 +276,43 @@ async def send_signal(proc):
finally:
signal.signal(signal.SIGHUP, old_handler)
- def prepare_broken_pipe_test(self):
+ def test_stdin_broken_pipe(self):
# buffer large enough to feed the whole pipe buffer
large_data = b'x' * support.PIPE_MAX_SIZE
+ rfd, wfd = os.pipe()
+ self.addCleanup(os.close, rfd)
+ self.addCleanup(os.close, wfd)
+ if MS_WINDOWS:
+ handle = msvcrt.get_osfhandle(rfd)
+ os.set_handle_inheritable(handle, True)
+ code = textwrap.dedent(f'''
+ import os, msvcrt
+ handle = {handle}
+ fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
+ os.read(fd, 1)
+ ''')
+ from subprocess import STARTUPINFO
+ startupinfo = STARTUPINFO()
+ startupinfo.lpAttributeList = {"handle_list": [handle]}
+ kwargs = dict(startupinfo=startupinfo)
+ else:
+ code = f'import os; fd = {rfd}; os.read(fd, 1)'
+ kwargs = dict(pass_fds=(rfd,))
+
# the program ends before the stdin can be fed
proc = self.loop.run_until_complete(
asyncio.create_subprocess_exec(
- sys.executable, '-c', 'pass',
+ sys.executable, '-c', code,
stdin=subprocess.PIPE,
+ **kwargs
)
)
- return (proc, large_data)
-
- def test_stdin_broken_pipe(self):
- proc, large_data = self.prepare_broken_pipe_test()
-
async def write_stdin(proc, data):
- await asyncio.sleep(0.5)
proc.stdin.write(data)
+ # Only exit the child process once the write buffer is filled
+ os.write(wfd, b'go')
await proc.stdin.drain()
coro = write_stdin(proc, large_data)
@@ -300,7 +323,16 @@ async def write_stdin(proc, data):
self.loop.run_until_complete(proc.wait())
def test_communicate_ignore_broken_pipe(self):
- proc, large_data = self.prepare_broken_pipe_test()
+ # buffer large enough to feed the whole pipe buffer
+ large_data = b'x' * support.PIPE_MAX_SIZE
+
+ # the program ends before the stdin can be fed
+ proc = self.loop.run_until_complete(
+ asyncio.create_subprocess_exec(
+ sys.executable, '-c', 'pass',
+ stdin=subprocess.PIPE,
+ )
+ )
# communicate() must ignore BrokenPipeError when feeding stdin
self.loop.set_exception_handler(lambda loop, msg: None)
@@ -753,21 +785,44 @@ async def main() -> None:
self.loop.run_until_complete(main())
- def test_subprocess_consistent_callbacks(self):
+ def test_subprocess_protocol_events(self):
+ # gh-108973: Test that all subprocess protocol methods are called.
+ # The protocol methods are not called in a determistic order.
+ # The order depends on the event loop and the operating system.
events = []
+ fds = [1, 2]
+ expected = [
+ ('pipe_data_received', 1, b'stdout'),
+ ('pipe_data_received', 2, b'stderr'),
+ ('pipe_connection_lost', 1),
+ ('pipe_connection_lost', 2),
+ 'process_exited',
+ ]
+ per_fd_expected = [
+ 'pipe_data_received',
+ 'pipe_connection_lost',
+ ]
+
class MyProtocol(asyncio.SubprocessProtocol):
def __init__(self, exit_future: asyncio.Future) -> None:
self.exit_future = exit_future
def pipe_data_received(self, fd, data) -> None:
events.append(('pipe_data_received', fd, data))
+ self.exit_maybe()
def pipe_connection_lost(self, fd, exc) -> None:
- events.append('pipe_connection_lost')
+ events.append(('pipe_connection_lost', fd))
+ self.exit_maybe()
def process_exited(self) -> None:
events.append('process_exited')
- self.exit_future.set_result(True)
+ self.exit_maybe()
+
+ def exit_maybe(self):
+ # Only exit when we got all expected events
+ if len(events) >= len(expected):
+ self.exit_future.set_result(True)
async def main() -> None:
loop = asyncio.get_running_loop()
@@ -777,15 +832,24 @@ async def main() -> None:
sys.executable, '-c', code, stdin=None)
await exit_future
transport.close()
- self.assertEqual(events, [
- ('pipe_data_received', 1, b'stdout'),
- ('pipe_data_received', 2, b'stderr'),
- 'pipe_connection_lost',
- 'pipe_connection_lost',
- 'process_exited',
- ])
- self.loop.run_until_complete(main())
+ return events
+
+ events = self.loop.run_until_complete(main())
+
+ # First, make sure that we received all events
+ self.assertSetEqual(set(events), set(expected))
+
+ # Second, check order of pipe events per file descriptor
+ per_fd_events = {fd: [] for fd in fds}
+ for event in events:
+ if event == 'process_exited':
+ continue
+ name, fd = event[:2]
+ per_fd_events[fd].append(name)
+
+ for fd in fds:
+ self.assertEqual(per_fd_events[fd], per_fd_expected, (fd, events))
def test_subprocess_communicate_stdout(self):
# See https://github.com/python/cpython/issues/100133
diff --git a/Lib/test/test_asyncio/test_unix_events.py b/Lib/test/test_asyncio/test_unix_events.py
index cdf3eaac68af15..7322be597ae2d2 100644
--- a/Lib/test/test_asyncio/test_unix_events.py
+++ b/Lib/test/test_asyncio/test_unix_events.py
@@ -11,9 +11,11 @@
import stat
import sys
import threading
+import time
import unittest
from unittest import mock
import warnings
+from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import wait_process
@@ -1911,8 +1913,14 @@ def test_fork_signal_handling(self):
parent_handled = manager.Event()
def child_main():
- signal.signal(signal.SIGTERM, lambda *args: child_handled.set())
+ def on_sigterm(*args):
+ child_handled.set()
+ sys.exit()
+
+ signal.signal(signal.SIGTERM, on_sigterm)
child_started.set()
+ while True:
+ time.sleep(1)
async def main():
loop = asyncio.get_running_loop()
@@ -1922,7 +1930,7 @@ async def main():
process.start()
child_started.wait()
os.kill(process.pid, signal.SIGTERM)
- process.join()
+ process.join(timeout=support.SHORT_TIMEOUT)
async def func():
await asyncio.sleep(0.1)
@@ -1933,6 +1941,7 @@ async def func():
asyncio.run(main())
+ child_handled.wait(timeout=support.SHORT_TIMEOUT)
self.assertFalse(parent_handled.is_set())
self.assertTrue(child_handled.is_set())
diff --git a/Lib/test/test_asyncio/utils.py b/Lib/test/test_asyncio/utils.py
index 6dee5bb33b2560..1e5ab6eb935ef1 100644
--- a/Lib/test/test_asyncio/utils.py
+++ b/Lib/test/test_asyncio/utils.py
@@ -36,21 +36,20 @@
from test.support import threading_helper
-def data_file(filename):
- if hasattr(support, 'TEST_HOME_DIR'):
- fullname = os.path.join(support.TEST_HOME_DIR, filename)
- if os.path.isfile(fullname):
- return fullname
- fullname = os.path.join(os.path.dirname(__file__), '..', filename)
+def data_file(*filename):
+ fullname = os.path.join(support.TEST_HOME_DIR, *filename)
if os.path.isfile(fullname):
return fullname
- raise FileNotFoundError(filename)
+ fullname = os.path.join(os.path.dirname(__file__), '..', *filename)
+ if os.path.isfile(fullname):
+ return fullname
+ raise FileNotFoundError(os.path.join(filename))
-ONLYCERT = data_file('ssl_cert.pem')
-ONLYKEY = data_file('ssl_key.pem')
-SIGNED_CERTFILE = data_file('keycert3.pem')
-SIGNING_CA = data_file('pycacert.pem')
+ONLYCERT = data_file('certdata', 'ssl_cert.pem')
+ONLYKEY = data_file('certdata', 'ssl_key.pem')
+SIGNED_CERTFILE = data_file('certdata', 'keycert3.pem')
+SIGNING_CA = data_file('certdata', 'pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
diff --git a/Lib/test/test_audit.py b/Lib/test/test_audit.py
index 3a15835917cc32..47e5832d311bd1 100644
--- a/Lib/test/test_audit.py
+++ b/Lib/test/test_audit.py
@@ -19,7 +19,7 @@ class AuditTest(unittest.TestCase):
maxDiff = None
@support.requires_subprocess()
- def do_test(self, *args):
+ def run_test_in_subprocess(self, *args):
with subprocess.Popen(
[sys.executable, "-X utf8", AUDIT_TESTS_PY, *args],
encoding="utf-8",
@@ -27,27 +27,26 @@ def do_test(self, *args):
stderr=subprocess.PIPE,
) as p:
p.wait()
- sys.stdout.writelines(p.stdout)
- sys.stderr.writelines(p.stderr)
- if p.returncode:
- self.fail("".join(p.stderr))
+ return p, p.stdout.read(), p.stderr.read()
- @support.requires_subprocess()
- def run_python(self, *args):
+ def do_test(self, *args):
+ proc, stdout, stderr = self.run_test_in_subprocess(*args)
+
+ sys.stdout.write(stdout)
+ sys.stderr.write(stderr)
+ if proc.returncode:
+ self.fail(stderr)
+
+ def run_python(self, *args, expect_stderr=False):
events = []
- with subprocess.Popen(
- [sys.executable, "-X utf8", AUDIT_TESTS_PY, *args],
- encoding="utf-8",
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- ) as p:
- p.wait()
- sys.stderr.writelines(p.stderr)
- return (
- p.returncode,
- [line.strip().partition(" ") for line in p.stdout],
- "".join(p.stderr),
- )
+ proc, stdout, stderr = self.run_test_in_subprocess(*args)
+ if not expect_stderr or support.verbose:
+ sys.stderr.write(stderr)
+ return (
+ proc.returncode,
+ [line.strip().partition(" ") for line in stdout.splitlines()],
+ stderr,
+ )
def test_basic(self):
self.do_test("test_basic")
@@ -257,7 +256,7 @@ def test_not_in_gc(self):
self.fail(stderr)
def test_time(self):
- returncode, events, stderr = self.run_python("test_time")
+ returncode, events, stderr = self.run_python("test_time", "print")
if returncode:
self.fail(stderr)
@@ -271,6 +270,11 @@ def test_time(self):
self.assertEqual(actual, expected)
+ def test_time_fail(self):
+ returncode, events, stderr = self.run_python("test_time", "fail",
+ expect_stderr=True)
+ self.assertNotEqual(returncode, 0)
+ self.assertIn('hook failed', stderr.splitlines()[-1])
def test_sys_monitoring_register_callback(self):
returncode, events, stderr = self.run_python("test_sys_monitoring_register_callback")
diff --git a/Lib/test/test_binascii.py b/Lib/test/test_binascii.py
index eb831b1a06fcb8..ef744f6b97259c 100644
--- a/Lib/test/test_binascii.py
+++ b/Lib/test/test_binascii.py
@@ -230,10 +230,10 @@ def test_uu(self):
binascii.b2a_uu(b"", True)
@hypothesis.given(
- binary=hypothesis.strategies.binary(),
+ binary=hypothesis.strategies.binary(max_size=45),
backtick=hypothesis.strategies.booleans(),
)
- def test_hex_roundtrip(self, binary, backtick):
+ def test_b2a_roundtrip(self, binary, backtick):
converted = binascii.b2a_uu(self.type2test(binary), backtick=backtick)
restored = binascii.a2b_uu(self.type2test(converted))
self.assertConversion(binary, converted, restored, backtick=backtick)
diff --git a/Lib/test/test_capi/test_abstract.py b/Lib/test/test_capi/test_abstract.py
index 3f51e5b28104d9..eeaef60a8b47b5 100644
--- a/Lib/test/test_capi/test_abstract.py
+++ b/Lib/test/test_capi/test_abstract.py
@@ -1,8 +1,5 @@
import unittest
-import sys
from collections import OrderedDict
-from test import support
-from test.support import import_helper
import _testcapi
@@ -129,6 +126,34 @@ def test_object_hasattrstring(self):
# CRASHES hasattrstring(obj, NULL)
# CRASHES hasattrstring(NULL, b'a')
+ def test_object_hasattrwitherror(self):
+ xhasattr = _testcapi.object_hasattrwitherror
+ obj = TestObject()
+ obj.a = 1
+ setattr(obj, '\U0001f40d', 2)
+ self.assertTrue(xhasattr(obj, 'a'))
+ self.assertFalse(xhasattr(obj, 'b'))
+ self.assertTrue(xhasattr(obj, '\U0001f40d'))
+
+ self.assertRaises(RuntimeError, xhasattr, obj, 'evil')
+ self.assertRaises(TypeError, xhasattr, obj, 1)
+ # CRASHES xhasattr(obj, NULL)
+ # CRASHES xhasattr(NULL, 'a')
+
+ def test_object_hasattrstringwitherror(self):
+ hasattrstring = _testcapi.object_hasattrstringwitherror
+ obj = TestObject()
+ obj.a = 1
+ setattr(obj, '\U0001f40d', 2)
+ self.assertTrue(hasattrstring(obj, b'a'))
+ self.assertFalse(hasattrstring(obj, b'b'))
+ self.assertTrue(hasattrstring(obj, '\U0001f40d'.encode()))
+
+ self.assertRaises(RuntimeError, hasattrstring, obj, b'evil')
+ self.assertRaises(UnicodeDecodeError, hasattrstring, obj, b'\xff')
+ # CRASHES hasattrstring(obj, NULL)
+ # CRASHES hasattrstring(NULL, b'a')
+
def test_object_setattr(self):
xsetattr = _testcapi.object_setattr
obj = TestObject()
@@ -265,6 +290,43 @@ def test_mapping_getitemstring(self):
self.assertRaises(TypeError, getitemstring, [], b'a')
self.assertRaises(SystemError, getitemstring, NULL, b'a')
+ def test_mapping_getoptionalitem(self):
+ getitem = _testcapi.mapping_getoptionalitem
+ dct = {'a': 1, '\U0001f40d': 2}
+ self.assertEqual(getitem(dct, 'a'), 1)
+ self.assertEqual(getitem(dct, 'b'), KeyError)
+ self.assertEqual(getitem(dct, '\U0001f40d'), 2)
+
+ dct2 = ProxyGetItem(dct)
+ self.assertEqual(getitem(dct2, 'a'), 1)
+ self.assertEqual(getitem(dct2, 'b'), KeyError)
+
+ self.assertEqual(getitem(['a', 'b', 'c'], 1), 'b')
+
+ self.assertRaises(TypeError, getitem, 42, 'a')
+ self.assertRaises(TypeError, getitem, {}, []) # unhashable
+ self.assertRaises(IndexError, getitem, [], 1)
+ self.assertRaises(TypeError, getitem, [], 'a')
+ # CRASHES getitem({}, NULL)
+ # CRASHES getitem(NULL, 'a')
+
+ def test_mapping_getoptionalitemstring(self):
+ getitemstring = _testcapi.mapping_getoptionalitemstring
+ dct = {'a': 1, '\U0001f40d': 2}
+ self.assertEqual(getitemstring(dct, b'a'), 1)
+ self.assertEqual(getitemstring(dct, b'b'), KeyError)
+ self.assertEqual(getitemstring(dct, '\U0001f40d'.encode()), 2)
+
+ dct2 = ProxyGetItem(dct)
+ self.assertEqual(getitemstring(dct2, b'a'), 1)
+ self.assertEqual(getitemstring(dct2, b'b'), KeyError)
+
+ self.assertRaises(TypeError, getitemstring, 42, b'a')
+ self.assertRaises(UnicodeDecodeError, getitemstring, {}, b'\xff')
+ self.assertRaises(SystemError, getitemstring, {}, NULL)
+ self.assertRaises(TypeError, getitemstring, [], b'a')
+ # CRASHES getitemstring(NULL, b'a')
+
def test_mapping_haskey(self):
haskey = _testcapi.mapping_haskey
dct = {'a': 1, '\U0001f40d': 2}
@@ -302,6 +364,44 @@ def test_mapping_haskeystring(self):
self.assertFalse(haskeystring([], b'a'))
self.assertFalse(haskeystring(NULL, b'a'))
+ def test_mapping_haskeywitherror(self):
+ haskey = _testcapi.mapping_haskeywitherror
+ dct = {'a': 1, '\U0001f40d': 2}
+ self.assertTrue(haskey(dct, 'a'))
+ self.assertFalse(haskey(dct, 'b'))
+ self.assertTrue(haskey(dct, '\U0001f40d'))
+
+ dct2 = ProxyGetItem(dct)
+ self.assertTrue(haskey(dct2, 'a'))
+ self.assertFalse(haskey(dct2, 'b'))
+
+ self.assertTrue(haskey(['a', 'b', 'c'], 1))
+
+ self.assertRaises(TypeError, haskey, 42, 'a')
+ self.assertRaises(TypeError, haskey, {}, []) # unhashable
+ self.assertRaises(IndexError, haskey, [], 1)
+ self.assertRaises(TypeError, haskey, [], 'a')
+
+ # CRASHES haskey({}, NULL))
+ # CRASHES haskey(NULL, 'a'))
+
+ def test_mapping_haskeystringwitherror(self):
+ haskeystring = _testcapi.mapping_haskeystringwitherror
+ dct = {'a': 1, '\U0001f40d': 2}
+ self.assertTrue(haskeystring(dct, b'a'))
+ self.assertFalse(haskeystring(dct, b'b'))
+ self.assertTrue(haskeystring(dct, '\U0001f40d'.encode()))
+
+ dct2 = ProxyGetItem(dct)
+ self.assertTrue(haskeystring(dct2, b'a'))
+ self.assertFalse(haskeystring(dct2, b'b'))
+
+ self.assertRaises(TypeError, haskeystring, 42, b'a')
+ self.assertRaises(UnicodeDecodeError, haskeystring, {}, b'\xff')
+ self.assertRaises(SystemError, haskeystring, {}, NULL)
+ self.assertRaises(TypeError, haskeystring, [], b'a')
+ # CRASHES haskeystring(NULL, b'a')
+
def test_object_setitem(self):
setitem = _testcapi.object_setitem
dct = {}
diff --git a/Lib/test/test_capi/test_dict.py b/Lib/test/test_capi/test_dict.py
index b22fa20e14dfea..11b2ca910707df 100644
--- a/Lib/test/test_capi/test_dict.py
+++ b/Lib/test/test_capi/test_dict.py
@@ -1,9 +1,6 @@
import unittest
-import sys
from collections import OrderedDict, UserDict
from types import MappingProxyType
-from test import support
-from test.support import import_helper
import _testcapi
diff --git a/Lib/test/test_capi/test_getargs.py b/Lib/test/test_capi/test_getargs.py
index 246206af86101c..e10f679eeb71c8 100644
--- a/Lib/test/test_capi/test_getargs.py
+++ b/Lib/test/test_capi/test_getargs.py
@@ -1004,70 +1004,6 @@ def test_et_hash(self):
buf = bytearray()
self.assertRaises(ValueError, getargs_et_hash, 'abc\xe9', 'latin1', buf)
- @support.requires_legacy_unicode_capi()
- def test_u(self):
- from _testcapi import getargs_u
- with self.assertWarns(DeprecationWarning):
- self.assertEqual(getargs_u('abc\xe9'), 'abc\xe9')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(ValueError, getargs_u, 'nul:\0')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u, b'bytes')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u, bytearray(b'bytearray'))
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u, memoryview(b'memoryview'))
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u, None)
-
- @support.requires_legacy_unicode_capi()
- def test_u_hash(self):
- from _testcapi import getargs_u_hash
- with self.assertWarns(DeprecationWarning):
- self.assertEqual(getargs_u_hash('abc\xe9'), 'abc\xe9')
- with self.assertWarns(DeprecationWarning):
- self.assertEqual(getargs_u_hash('nul:\0'), 'nul:\0')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u_hash, b'bytes')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u_hash, bytearray(b'bytearray'))
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u_hash, memoryview(b'memoryview'))
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_u_hash, None)
-
- @support.requires_legacy_unicode_capi()
- def test_Z(self):
- from _testcapi import getargs_Z
- with self.assertWarns(DeprecationWarning):
- self.assertEqual(getargs_Z('abc\xe9'), 'abc\xe9')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(ValueError, getargs_Z, 'nul:\0')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_Z, b'bytes')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_Z, bytearray(b'bytearray'))
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_Z, memoryview(b'memoryview'))
- with self.assertWarns(DeprecationWarning):
- self.assertIsNone(getargs_Z(None))
-
- @support.requires_legacy_unicode_capi()
- def test_Z_hash(self):
- from _testcapi import getargs_Z_hash
- with self.assertWarns(DeprecationWarning):
- self.assertEqual(getargs_Z_hash('abc\xe9'), 'abc\xe9')
- with self.assertWarns(DeprecationWarning):
- self.assertEqual(getargs_Z_hash('nul:\0'), 'nul:\0')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_Z_hash, b'bytes')
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_Z_hash, bytearray(b'bytearray'))
- with self.assertWarns(DeprecationWarning):
- self.assertRaises(TypeError, getargs_Z_hash, memoryview(b'memoryview'))
- with self.assertWarns(DeprecationWarning):
- self.assertIsNone(getargs_Z_hash(None))
-
def test_gh_99240_clear_args(self):
from _testcapi import gh_99240_clear_args
self.assertRaises(TypeError, gh_99240_clear_args, 'a', '\0b')
diff --git a/Lib/test/test_capi/test_misc.py b/Lib/test/test_capi/test_misc.py
index 004ce397696556..5ece213e7b2363 100644
--- a/Lib/test/test_capi/test_misc.py
+++ b/Lib/test/test_capi/test_misc.py
@@ -2,7 +2,7 @@
# these are all functions _testcapi exports whose name begins with 'test_'.
import _thread
-from collections import OrderedDict, deque
+from collections import deque
import contextlib
import importlib.machinery
import importlib.util
@@ -301,24 +301,42 @@ def test_getitem_with_error(self):
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
- @unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
- 'need _testcapi.negative_refcount')
- def test_negative_refcount(self):
+ def check_negative_refcount(self, code):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
- code = textwrap.dedent("""
- import _testcapi
- from test import support
-
- with support.SuppressCrashReport():
- _testcapi.negative_refcount()
- """)
+ code = textwrap.dedent(code)
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
+ @unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
+ 'need _testcapi.negative_refcount()')
+ def test_negative_refcount(self):
+ code = """
+ import _testcapi
+ from test import support
+
+ with support.SuppressCrashReport():
+ _testcapi.negative_refcount()
+ """
+ self.check_negative_refcount(code)
+
+ @unittest.skipUnless(hasattr(_testcapi, 'decref_freed_object'),
+ 'need _testcapi.decref_freed_object()')
+ @support.skip_if_sanitizer("use after free on purpose",
+ address=True, memory=True, ub=True)
+ def test_decref_freed_object(self):
+ code = """
+ import _testcapi
+ from test import support
+
+ with support.SuppressCrashReport():
+ _testcapi.decref_freed_object()
+ """
+ self.check_negative_refcount(code)
+
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
@@ -2067,7 +2085,15 @@ def test_version_api_data(self):
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
- if name.startswith('test_'))
+ if name.startswith('test_')
+ and not name.startswith('test_lock_'))
+
+
+@threading_helper.requires_working_threading()
+class Test_PyLock(unittest.TestCase):
+ locals().update((name, getattr(_testinternalcapi, name))
+ for name in dir(_testinternalcapi)
+ if name.startswith('test_lock_'))
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
@@ -2390,7 +2416,7 @@ def testfunc(x):
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
uops = {opname for opname, _, _ in ex}
- self.assertIn("SAVE_IP", uops)
+ self.assertIn("_SET_IP", uops)
self.assertIn("LOAD_FAST", uops)
def test_extended_arg(self):
@@ -2455,7 +2481,7 @@ def testfunc(x):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(10)
+ testfunc(20)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2470,7 +2496,7 @@ def testfunc(n):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(10)
+ testfunc(20)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2485,7 +2511,7 @@ def testfunc(a):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(range(10))
+ testfunc(range(20))
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2495,12 +2521,13 @@ def testfunc(a):
def test_pop_jump_if_not_none(self):
def testfunc(a):
for x in a:
+ x = None
if x is not None:
x = 0
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(range(10))
+ testfunc(range(20))
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2515,7 +2542,7 @@ def testfunc(n):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(10)
+ testfunc(20)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2530,12 +2557,12 @@ def testfunc(n):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(10)
+ testfunc(20)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
uops = {opname for opname, _, _ in ex}
- self.assertIn("JUMP_TO_TOP", uops)
+ self.assertIn("_JUMP_TO_TOP", uops)
def test_jump_forward(self):
def testfunc(n):
@@ -2550,7 +2577,7 @@ def testfunc(n):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(10)
+ testfunc(20)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2568,8 +2595,8 @@ def testfunc(n):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- total = testfunc(10)
- self.assertEqual(total, 45)
+ total = testfunc(20)
+ self.assertEqual(total, 190)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2589,9 +2616,9 @@ def testfunc(a):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- a = list(range(10))
+ a = list(range(20))
total = testfunc(a)
- self.assertEqual(total, 45)
+ self.assertEqual(total, 190)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2611,9 +2638,9 @@ def testfunc(a):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- a = tuple(range(10))
+ a = tuple(range(20))
total = testfunc(a)
- self.assertEqual(total, 45)
+ self.assertEqual(total, 190)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2647,7 +2674,7 @@ def dummy(x):
opt = _testinternalcapi.get_uop_optimizer()
with temporary_optimizer(opt):
- testfunc(10)
+ testfunc(20)
ex = get_first_executor(testfunc)
self.assertIsNotNone(ex)
@@ -2655,6 +2682,22 @@ def dummy(x):
self.assertIn("_PUSH_FRAME", uops)
self.assertIn("_BINARY_OP_ADD_INT", uops)
+ def test_branch_taken(self):
+ def testfunc(n):
+ for i in range(n):
+ if i < 0:
+ i = 0
+ else:
+ i = 1
+
+ opt = _testinternalcapi.get_uop_optimizer()
+ with temporary_optimizer(opt):
+ testfunc(20)
+
+ ex = get_first_executor(testfunc)
+ self.assertIsNotNone(ex)
+ uops = {opname for opname, _, _ in ex}
+ self.assertIn("_POP_JUMP_IF_TRUE", uops)
if __name__ == "__main__":
diff --git a/Lib/test/test_cmd_line.py b/Lib/test/test_cmd_line.py
index e88b7c8572d9e8..f4754dbf735a1d 100644
--- a/Lib/test/test_cmd_line.py
+++ b/Lib/test/test_cmd_line.py
@@ -799,6 +799,7 @@ def check_pythonmalloc(self, env_var, name):
self.assertEqual(proc.stdout.rstrip(), name)
self.assertEqual(proc.returncode, 0)
+ @support.cpython_only
def test_pythonmalloc(self):
# Test the PYTHONMALLOC environment variable
pymalloc = support.with_pymalloc()
diff --git a/Lib/test/test_code.py b/Lib/test/test_code.py
index e056c16466e8c4..a961ddbe17a3d3 100644
--- a/Lib/test/test_code.py
+++ b/Lib/test/test_code.py
@@ -125,6 +125,7 @@
"""
+import copy
import inspect
import sys
import threading
@@ -280,11 +281,17 @@ def func2():
with self.subTest(attr=attr, value=value):
new_code = code.replace(**{attr: value})
self.assertEqual(getattr(new_code, attr), value)
+ new_code = copy.replace(code, **{attr: value})
+ self.assertEqual(getattr(new_code, attr), value)
new_code = code.replace(co_varnames=code2.co_varnames,
co_nlocals=code2.co_nlocals)
self.assertEqual(new_code.co_varnames, code2.co_varnames)
self.assertEqual(new_code.co_nlocals, code2.co_nlocals)
+ new_code = copy.replace(code, co_varnames=code2.co_varnames,
+ co_nlocals=code2.co_nlocals)
+ self.assertEqual(new_code.co_varnames, code2.co_varnames)
+ self.assertEqual(new_code.co_nlocals, code2.co_nlocals)
def test_nlocals_mismatch(self):
def func():
@@ -498,6 +505,25 @@ def test_code_hash_uses_bytecode(self):
self.assertNotEqual(c, c1)
self.assertNotEqual(hash(c), hash(c1))
+ @cpython_only
+ def test_code_equal_with_instrumentation(self):
+ """ GH-109052
+
+ Make sure the instrumentation doesn't affect the code equality
+ The validity of this test relies on the fact that "x is x" and
+ "x in x" have only one different instruction and the instructions
+ have the same argument.
+
+ """
+ code1 = compile("x is x", "example.py", "eval")
+ code2 = compile("x in x", "example.py", "eval")
+ sys._getframe().f_trace_opcodes = True
+ sys.settrace(lambda *args: None)
+ exec(code1, {'x': []})
+ exec(code2, {'x': []})
+ self.assertNotEqual(code1, code2)
+ sys.settrace(None)
+
def isinterned(s):
return s is sys.intern(('_' + s + '_')[1:-1])
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index 91d7eaf997ae20..b5e9271ac0c3cd 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -1,7 +1,9 @@
import codecs
import contextlib
+import copy
import io
import locale
+import pickle
import sys
import unittest
import encodings
@@ -1771,6 +1773,61 @@ def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
+ def test_copy(self):
+ f = self.reader(Queue(b'\xed\x95\x9c\n\xea\xb8\x80'))
+ with self.assertRaisesRegex(TypeError, 'StreamReader'):
+ copy.copy(f)
+ with self.assertRaisesRegex(TypeError, 'StreamReader'):
+ copy.deepcopy(f)
+
+ def test_pickle(self):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=proto):
+ f = self.reader(Queue(b'\xed\x95\x9c\n\xea\xb8\x80'))
+ with self.assertRaisesRegex(TypeError, 'StreamReader'):
+ pickle.dumps(f, proto)
+
+
+class StreamWriterTest(unittest.TestCase):
+
+ def setUp(self):
+ self.writer = codecs.getwriter('utf-8')
+
+ def test_copy(self):
+ f = self.writer(Queue(b''))
+ with self.assertRaisesRegex(TypeError, 'StreamWriter'):
+ copy.copy(f)
+ with self.assertRaisesRegex(TypeError, 'StreamWriter'):
+ copy.deepcopy(f)
+
+ def test_pickle(self):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=proto):
+ f = self.writer(Queue(b''))
+ with self.assertRaisesRegex(TypeError, 'StreamWriter'):
+ pickle.dumps(f, proto)
+
+
+class StreamReaderWriterTest(unittest.TestCase):
+
+ def setUp(self):
+ self.reader = codecs.getreader('latin1')
+ self.writer = codecs.getwriter('utf-8')
+
+ def test_copy(self):
+ f = codecs.StreamReaderWriter(Queue(b''), self.reader, self.writer)
+ with self.assertRaisesRegex(TypeError, 'StreamReaderWriter'):
+ copy.copy(f)
+ with self.assertRaisesRegex(TypeError, 'StreamReaderWriter'):
+ copy.deepcopy(f)
+
+ def test_pickle(self):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=proto):
+ f = codecs.StreamReaderWriter(Queue(b''), self.reader, self.writer)
+ with self.assertRaisesRegex(TypeError, 'StreamReaderWriter'):
+ pickle.dumps(f, proto)
+
class EncodedFileTest(unittest.TestCase):
@@ -3346,6 +3403,28 @@ def test_seeking_write(self):
self.assertEqual(sr.readline(), b'abc\n')
self.assertEqual(sr.readline(), b'789\n')
+ def test_copy(self):
+ bio = io.BytesIO()
+ codec = codecs.lookup('ascii')
+ sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
+ encodings.ascii.StreamReader, encodings.ascii.StreamWriter)
+
+ with self.assertRaisesRegex(TypeError, 'StreamRecoder'):
+ copy.copy(sr)
+ with self.assertRaisesRegex(TypeError, 'StreamRecoder'):
+ copy.deepcopy(sr)
+
+ def test_pickle(self):
+ q = Queue(b'')
+ codec = codecs.lookup('ascii')
+ sr = codecs.StreamRecoder(q, codec.encode, codec.decode,
+ encodings.ascii.StreamReader, encodings.ascii.StreamWriter)
+
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.subTest(protocol=proto):
+ with self.assertRaisesRegex(TypeError, 'StreamRecoder'):
+ pickle.dumps(sr, proto)
+
@unittest.skipIf(_testinternalcapi is None, 'need _testinternalcapi module')
class LocaleCodecTest(unittest.TestCase):
diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py
index de513daf825d81..d3a5517963c540 100644
--- a/Lib/test/test_compile.py
+++ b/Lib/test/test_compile.py
@@ -11,7 +11,7 @@
import warnings
from test import support
from test.support import (script_helper, requires_debug_ranges,
- requires_specialization, C_RECURSION_LIMIT)
+ requires_specialization, Py_C_RECURSION_LIMIT)
from test.support.os_helper import FakePath
class TestSpecifics(unittest.TestCase):
@@ -111,7 +111,7 @@ def __getitem__(self, key):
@unittest.skipIf(support.is_wasi, "exhausts limited stack on WASI")
def test_extended_arg(self):
- repeat = int(C_RECURSION_LIMIT * 0.9)
+ repeat = int(Py_C_RECURSION_LIMIT * 0.9)
longexpr = 'x = x or ' + '-x' * repeat
g = {}
code = textwrap.dedent('''
@@ -443,6 +443,33 @@ def f():
self.assertIn("_A__mangled_mod", A.f.__code__.co_varnames)
self.assertIn("__package__", A.f.__code__.co_varnames)
+ def test_compile_invalid_namedexpr(self):
+ # gh-109351
+ m = ast.Module(
+ body=[
+ ast.Expr(
+ value=ast.ListComp(
+ elt=ast.NamedExpr(
+ target=ast.Constant(value=1),
+ value=ast.Constant(value=3),
+ ),
+ generators=[
+ ast.comprehension(
+ target=ast.Name(id="x", ctx=ast.Store()),
+ iter=ast.Name(id="y", ctx=ast.Load()),
+ ifs=[],
+ is_async=0,
+ )
+ ],
+ )
+ )
+ ],
+ type_ignores=[],
+ )
+
+ with self.assertRaisesRegex(TypeError, "NamedExpr target must be a Name"):
+ compile(ast.fix_missing_locations(m), "", "exec")
+
def test_compile_ast(self):
fname = __file__
if fname.lower().endswith('pyc'):
@@ -478,6 +505,26 @@ def test_compile_ast(self):
ast.body = [_ast.BoolOp()]
self.assertRaises(TypeError, compile, ast, '', 'exec')
+ def test_compile_invalid_typealias(self):
+ # gh-109341
+ m = ast.Module(
+ body=[
+ ast.TypeAlias(
+ name=ast.Subscript(
+ value=ast.Name(id="foo", ctx=ast.Load()),
+ slice=ast.Constant(value="x"),
+ ctx=ast.Store(),
+ ),
+ type_params=[],
+ value=ast.Name(id="Callable", ctx=ast.Load()),
+ )
+ ],
+ type_ignores=[],
+ )
+
+ with self.assertRaisesRegex(TypeError, "TypeAlias with non-Name name"):
+ compile(ast.fix_missing_locations(m), "", "exec")
+
def test_dict_evaluation_order(self):
i = 0
@@ -557,12 +604,12 @@ def test_yet_more_evil_still_undecodable(self):
@support.cpython_only
@unittest.skipIf(support.is_wasi, "exhausts limited stack on WASI")
def test_compiler_recursion_limit(self):
- # Expected limit is C_RECURSION_LIMIT * 2
+ # Expected limit is Py_C_RECURSION_LIMIT * 2
# Duplicating the limit here is a little ugly.
# Perhaps it should be exposed somewhere...
- fail_depth = C_RECURSION_LIMIT * 2 + 1
- crash_depth = C_RECURSION_LIMIT * 100
- success_depth = int(C_RECURSION_LIMIT * 1.8)
+ fail_depth = Py_C_RECURSION_LIMIT * 2 + 1
+ crash_depth = Py_C_RECURSION_LIMIT * 100
+ success_depth = int(Py_C_RECURSION_LIMIT * 1.8)
def check_limit(prefix, repeated, mode="single"):
expect_ok = prefix + repeated * success_depth
@@ -1205,6 +1252,26 @@ def f():
return a, b
self.assertEqual(f(), (54, 96))
+ def test_duplicated_small_exit_block(self):
+ # See gh-109627
+ def f():
+ while element and something:
+ try:
+ return something
+ except:
+ pass
+
+ def test_cold_block_moved_to_end(self):
+ # See gh-109719
+ def f():
+ while name:
+ try:
+ break
+ except:
+ pass
+ else:
+ 1 if 1 else 1
+
@requires_debug_ranges()
class TestSourcePositions(unittest.TestCase):
@@ -1751,6 +1818,13 @@ def test_column_offset_deduplication(self):
list(code.co_consts[1].co_positions()),
)
+ def test_load_super_attr(self):
+ source = "class C:\n def __init__(self):\n super().__init__()"
+ code = compile(source, "", "exec").co_consts[0].co_consts[1]
+ self.assertOpcodeSourcePositionIs(
+ code, "LOAD_GLOBAL", line=3, end_line=3, column=4, end_column=9
+ )
+
class TestExpressionStackSize(unittest.TestCase):
# These tests check that the computed stack size for a code object
diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py
index df7c5122b3b1f5..9cd92ad365c5a9 100644
--- a/Lib/test/test_compileall.py
+++ b/Lib/test/test_compileall.py
@@ -18,6 +18,7 @@
try:
# compileall relies on ProcessPoolExecutor if ProcessPoolExecutor exists
# and it can function.
+ from multiprocessing.util import _cleanup_tests as multiprocessing_cleanup_tests
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures.process import _check_system_limits
_check_system_limits()
@@ -54,6 +55,8 @@ class CompileallTestsBase:
def setUp(self):
self.directory = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.directory)
+
self.source_path = os.path.join(self.directory, '_test.py')
self.bc_path = importlib.util.cache_from_source(self.source_path)
with open(self.source_path, 'w', encoding="utf-8") as file:
@@ -66,9 +69,6 @@ def setUp(self):
self.source_path3 = os.path.join(self.subdirectory, '_test3.py')
shutil.copyfile(self.source_path, self.source_path3)
- def tearDown(self):
- shutil.rmtree(self.directory)
-
def add_bad_source_file(self):
self.bad_source_path = os.path.join(self.directory, '_test_bad.py')
with open(self.bad_source_path, 'w', encoding="utf-8") as file:
@@ -307,9 +307,13 @@ def _test_ddir_only(self, *, ddir, parallel=True):
script_helper.make_script(path, "__init__", "")
mods.append(script_helper.make_script(path, "mod",
"def fn(): 1/0\nfn()\n"))
+
+ if parallel:
+ self.addCleanup(multiprocessing_cleanup_tests)
compileall.compile_dir(
self.directory, quiet=True, ddir=ddir,
workers=2 if parallel else 1)
+
self.assertTrue(mods)
for mod in mods:
self.assertTrue(mod.startswith(self.directory), mod)
diff --git a/Lib/test/test_concurrent_futures/executor.py b/Lib/test/test_concurrent_futures/executor.py
index 36278bdd501971..1e7d4344740943 100644
--- a/Lib/test/test_concurrent_futures/executor.py
+++ b/Lib/test/test_concurrent_futures/executor.py
@@ -53,6 +53,7 @@ def test_map_exception(self):
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
+ @support.requires_resource('walltime')
def test_map_timeout(self):
results = []
try:
diff --git a/Lib/test/test_concurrent_futures/test_deadlock.py b/Lib/test/test_concurrent_futures/test_deadlock.py
index 6b78b360d15627..a76e075c3be180 100644
--- a/Lib/test/test_concurrent_futures/test_deadlock.py
+++ b/Lib/test/test_concurrent_futures/test_deadlock.py
@@ -1,10 +1,13 @@
import contextlib
+import queue
+import signal
import sys
import time
import unittest
+import unittest.mock
from pickle import PicklingError
from concurrent import futures
-from concurrent.futures.process import BrokenProcessPool
+from concurrent.futures.process import BrokenProcessPool, _ThreadWakeup
from test import support
@@ -88,7 +91,7 @@ def __reduce__(self):
class ExecutorDeadlockTest:
- TIMEOUT = support.SHORT_TIMEOUT
+ TIMEOUT = support.LONG_TIMEOUT
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
@@ -239,6 +242,75 @@ def test_crash_big_data(self):
with self.assertRaises(BrokenProcessPool):
list(executor.map(_crash_with_data, [data] * 10))
+ executor.shutdown(wait=True)
+
+ def test_gh105829_should_not_deadlock_if_wakeup_pipe_full(self):
+ # Issue #105829: The _ExecutorManagerThread wakeup pipe could
+ # fill up and block. See: https://github.com/python/cpython/issues/105829
+
+ # Lots of cargo culting while writing this test, apologies if
+ # something is really stupid...
+
+ self.executor.shutdown(wait=True)
+
+ if not hasattr(signal, 'alarm'):
+ raise unittest.SkipTest(
+ "Tested platform does not support the alarm signal")
+
+ def timeout(_signum, _frame):
+ import faulthandler
+ faulthandler.dump_traceback()
+
+ raise RuntimeError("timed out while submitting jobs?")
+
+ thread_run = futures.process._ExecutorManagerThread.run
+ def mock_run(self):
+ # Delay thread startup so the wakeup pipe can fill up and block
+ time.sleep(3)
+ thread_run(self)
+
+ class MockWakeup(_ThreadWakeup):
+ """Mock wakeup object to force the wakeup to block"""
+ def __init__(self):
+ super().__init__()
+ self._dummy_queue = queue.Queue(maxsize=1)
+
+ def wakeup(self):
+ self._dummy_queue.put(None, block=True)
+ super().wakeup()
+
+ def clear(self):
+ try:
+ while True:
+ self._dummy_queue.get_nowait()
+ except queue.Empty:
+ super().clear()
+
+ with (unittest.mock.patch.object(futures.process._ExecutorManagerThread,
+ 'run', mock_run),
+ unittest.mock.patch('concurrent.futures.process._ThreadWakeup',
+ MockWakeup)):
+ with self.executor_type(max_workers=2,
+ mp_context=self.get_context()) as executor:
+ self.executor = executor # Allow clean up in fail_on_deadlock
+
+ job_num = 100
+ job_data = range(job_num)
+
+ # Need to use sigalarm for timeout detection because
+ # Executor.submit is not guarded by any timeout (both
+ # self._work_ids.put(self._queue_count) and
+ # self._executor_manager_thread_wakeup.wakeup() might
+ # timeout, maybe more?). In this specific case it was
+ # the wakeup call that deadlocked on a blocking pipe.
+ old_handler = signal.signal(signal.SIGALRM, timeout)
+ try:
+ signal.alarm(int(self.TIMEOUT))
+ self.assertEqual(job_num, len(list(executor.map(int, job_data))))
+ finally:
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, old_handler)
+
create_executor_tests(globals(), ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
diff --git a/Lib/test/test_concurrent_futures/test_wait.py b/Lib/test/test_concurrent_futures/test_wait.py
index e4bea8b05aced6..3f64ca173c02f6 100644
--- a/Lib/test/test_concurrent_futures/test_wait.py
+++ b/Lib/test/test_concurrent_futures/test_wait.py
@@ -3,6 +3,7 @@
import time
import unittest
from concurrent import futures
+from test import support
from .util import (
CANCELLED_FUTURE, CANCELLED_AND_NOTIFIED_FUTURE, EXCEPTION_FUTURE,
@@ -53,6 +54,7 @@ def test_first_completed_some_already_completed(self):
finished)
self.assertEqual(set([future1]), pending)
+ @support.requires_resource('walltime')
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
@@ -110,6 +112,7 @@ def test_all_completed(self):
future2]), finished)
self.assertEqual(set(), pending)
+ @support.requires_resource('walltime')
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
diff --git a/Lib/test/test_copy.py b/Lib/test/test_copy.py
index 826e46824e004c..c66c6eeb00811e 100644
--- a/Lib/test/test_copy.py
+++ b/Lib/test/test_copy.py
@@ -4,7 +4,7 @@
import copyreg
import weakref
import abc
-from operator import le, lt, ge, gt, eq, ne
+from operator import le, lt, ge, gt, eq, ne, attrgetter
import unittest
from test import support
@@ -899,7 +899,71 @@ def m(self):
g.b()
+class TestReplace(unittest.TestCase):
+
+ def test_unsupported(self):
+ self.assertRaises(TypeError, copy.replace, 1)
+ self.assertRaises(TypeError, copy.replace, [])
+ self.assertRaises(TypeError, copy.replace, {})
+ def f(): pass
+ self.assertRaises(TypeError, copy.replace, f)
+ class A: pass
+ self.assertRaises(TypeError, copy.replace, A)
+ self.assertRaises(TypeError, copy.replace, A())
+
+ def test_replace_method(self):
+ class A:
+ def __new__(cls, x, y=0):
+ self = object.__new__(cls)
+ self.x = x
+ self.y = y
+ return self
+
+ def __init__(self, *args, **kwargs):
+ self.z = self.x + self.y
+
+ def __replace__(self, **changes):
+ x = changes.get('x', self.x)
+ y = changes.get('y', self.y)
+ return type(self)(x, y)
+
+ attrs = attrgetter('x', 'y', 'z')
+ a = A(11, 22)
+ self.assertEqual(attrs(copy.replace(a)), (11, 22, 33))
+ self.assertEqual(attrs(copy.replace(a, x=1)), (1, 22, 23))
+ self.assertEqual(attrs(copy.replace(a, y=2)), (11, 2, 13))
+ self.assertEqual(attrs(copy.replace(a, x=1, y=2)), (1, 2, 3))
+
+ def test_namedtuple(self):
+ from collections import namedtuple
+ Point = namedtuple('Point', 'x y', defaults=(0,))
+ p = Point(11, 22)
+ self.assertEqual(copy.replace(p), (11, 22))
+ self.assertEqual(copy.replace(p, x=1), (1, 22))
+ self.assertEqual(copy.replace(p, y=2), (11, 2))
+ self.assertEqual(copy.replace(p, x=1, y=2), (1, 2))
+ with self.assertRaisesRegex(ValueError, 'unexpected field name'):
+ copy.replace(p, x=1, error=2)
+
+ def test_dataclass(self):
+ from dataclasses import dataclass
+ @dataclass
+ class C:
+ x: int
+ y: int = 0
+
+ attrs = attrgetter('x', 'y')
+ c = C(11, 22)
+ self.assertEqual(attrs(copy.replace(c)), (11, 22))
+ self.assertEqual(attrs(copy.replace(c, x=1)), (1, 22))
+ self.assertEqual(attrs(copy.replace(c, y=2)), (11, 2))
+ self.assertEqual(attrs(copy.replace(c, x=1, y=2)), (1, 2))
+ with self.assertRaisesRegex(TypeError, 'unexpected keyword argument'):
+ copy.replace(c, x=1, error=2)
+
+
def global_foo(x, y): return x+y
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_csv.py b/Lib/test/test_csv.py
index bc6879176cd85e..97b9bba24bcbca 100644
--- a/Lib/test/test_csv.py
+++ b/Lib/test/test_csv.py
@@ -10,7 +10,7 @@
import gc
import pickle
from test import support
-from test.support import warnings_helper, import_helper, check_disallow_instantiation
+from test.support import import_helper, check_disallow_instantiation
from itertools import permutations
from textwrap import dedent
from collections import OrderedDict
@@ -281,18 +281,6 @@ def test_writerows_errors(self):
self.assertRaises(TypeError, writer.writerows, None)
self.assertRaises(OSError, writer.writerows, BadIterable())
- @support.cpython_only
- @support.requires_legacy_unicode_capi()
- @warnings_helper.ignore_warnings(category=DeprecationWarning)
- def test_writerows_legacy_strings(self):
- import _testcapi
- c = _testcapi.unicode_legacy_string('a')
- with TemporaryFile("w+", encoding="utf-8", newline='') as fileobj:
- writer = csv.writer(fileobj)
- writer.writerows([[c]])
- fileobj.seek(0)
- self.assertEqual(fileobj.read(), "a\r\n")
-
def _read_test(self, input, expect, **kwargs):
reader = csv.reader(input, **kwargs)
result = list(reader)
diff --git a/Lib/test/test_ctypes/test_keeprefs.py b/Lib/test/test_ctypes/test_keeprefs.py
index c6fe1de62eae7c..23b03b64b4a716 100644
--- a/Lib/test/test_ctypes/test_keeprefs.py
+++ b/Lib/test/test_ctypes/test_keeprefs.py
@@ -98,33 +98,6 @@ def test_p_cint(self):
x = pointer(i)
self.assertEqual(x._objects, {'1': i})
- def test_pp_ownership(self):
- d = c_int(123)
- n = c_int(456)
-
- p = pointer(d)
- pp = pointer(p)
-
- self.assertIs(pp._objects['1'], p)
- self.assertIs(pp._objects['0']['1'], d)
-
- pp.contents.contents = n
-
- self.assertIs(pp._objects['1'], p)
- self.assertIs(pp._objects['0']['1'], n)
-
- self.assertIs(p._objects['1'], n)
- self.assertEqual(len(p._objects), 1)
-
- del d
- del p
-
- self.assertIs(pp._objects['0']['1'], n)
- self.assertEqual(len(pp._objects), 2)
-
- del n
-
- self.assertEqual(len(pp._objects), 2)
class PointerToStructure(unittest.TestCase):
def test(self):
diff --git a/Lib/test/test_ctypes/test_objects.py b/Lib/test/test_ctypes/test_objects.py
index 23c92b01a11107..fb01421b955951 100644
--- a/Lib/test/test_ctypes/test_objects.py
+++ b/Lib/test/test_ctypes/test_objects.py
@@ -55,14 +55,12 @@
import doctest
import unittest
-import test.test_ctypes.test_objects
-class TestCase(unittest.TestCase):
- def test(self):
- failures, tests = doctest.testmod(test.test_ctypes.test_objects)
- self.assertFalse(failures, 'doctests failed, see output above')
+def load_tests(loader, tests, pattern):
+ tests.addTest(doctest.DocTestSuite())
+ return tests
if __name__ == '__main__':
- doctest.testmod(test.test_ctypes.test_objects)
+ unittest.main()
diff --git a/Lib/test/test_ctypes/test_values.py b/Lib/test/test_ctypes/test_values.py
index 9f8b69409cb880..d0b4803dff8529 100644
--- a/Lib/test/test_ctypes/test_values.py
+++ b/Lib/test/test_ctypes/test_values.py
@@ -58,7 +58,6 @@ class struct_frozen(Structure):
("code", POINTER(c_ubyte)),
("size", c_int),
("is_package", c_int),
- ("get_code", POINTER(c_ubyte)), # Function ptr
]
FrozenTable = POINTER(struct_frozen)
diff --git a/Lib/test/test_dataclasses.py b/Lib/test/test_dataclasses/__init__.py
similarity index 99%
rename from Lib/test/test_dataclasses.py
rename to Lib/test/test_dataclasses/__init__.py
index bd8d82438414e6..7c07dfc77de208 100644
--- a/Lib/test/test_dataclasses.py
+++ b/Lib/test/test_dataclasses/__init__.py
@@ -3684,10 +3684,10 @@ class C:
self.assertEqual(C(10).x, 10)
def test_classvar_module_level_import(self):
- from test import dataclass_module_1
- from test import dataclass_module_1_str
- from test import dataclass_module_2
- from test import dataclass_module_2_str
+ from test.test_dataclasses import dataclass_module_1
+ from test.test_dataclasses import dataclass_module_1_str
+ from test.test_dataclasses import dataclass_module_2
+ from test.test_dataclasses import dataclass_module_2_str
for m in (dataclass_module_1, dataclass_module_1_str,
dataclass_module_2, dataclass_module_2_str,
@@ -3725,7 +3725,7 @@ def test_classvar_module_level_import(self):
self.assertNotIn('not_iv4', c.__dict__)
def test_text_annotations(self):
- from test import dataclass_textanno
+ from test.test_dataclasses import dataclass_textanno
self.assertEqual(
get_type_hints(dataclass_textanno.Bar),
diff --git a/Lib/test/dataclass_module_1.py b/Lib/test/test_dataclasses/dataclass_module_1.py
similarity index 100%
rename from Lib/test/dataclass_module_1.py
rename to Lib/test/test_dataclasses/dataclass_module_1.py
diff --git a/Lib/test/dataclass_module_1_str.py b/Lib/test/test_dataclasses/dataclass_module_1_str.py
similarity index 100%
rename from Lib/test/dataclass_module_1_str.py
rename to Lib/test/test_dataclasses/dataclass_module_1_str.py
diff --git a/Lib/test/dataclass_module_2.py b/Lib/test/test_dataclasses/dataclass_module_2.py
similarity index 100%
rename from Lib/test/dataclass_module_2.py
rename to Lib/test/test_dataclasses/dataclass_module_2.py
diff --git a/Lib/test/dataclass_module_2_str.py b/Lib/test/test_dataclasses/dataclass_module_2_str.py
similarity index 100%
rename from Lib/test/dataclass_module_2_str.py
rename to Lib/test/test_dataclasses/dataclass_module_2_str.py
diff --git a/Lib/test/dataclass_textanno.py b/Lib/test/test_dataclasses/dataclass_textanno.py
similarity index 100%
rename from Lib/test/dataclass_textanno.py
rename to Lib/test/test_dataclasses/dataclass_textanno.py
diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py
index abfd71c868d009..bd299483e7b0bd 100644
--- a/Lib/test/test_decimal.py
+++ b/Lib/test/test_decimal.py
@@ -34,14 +34,13 @@
import locale
from test.support import (is_resource_enabled,
requires_IEEE_754, requires_docstrings,
- requires_legacy_unicode_capi, check_sanitizer,
+ check_sanitizer,
check_disallow_instantiation)
from test.support import (TestFailed,
run_with_locale, cpython_only,
darwin_malloc_err_warning, is_emscripten)
from test.support.import_helper import import_fresh_module
from test.support import threading_helper
-from test.support import warnings_helper
import random
import inspect
import threading
@@ -587,18 +586,6 @@ def test_explicit_from_string(self):
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
- @cpython_only
- @requires_legacy_unicode_capi()
- @warnings_helper.ignore_warnings(category=DeprecationWarning)
- def test_from_legacy_strings(self):
- import _testcapi
- Decimal = self.decimal.Decimal
- context = self.decimal.Context()
-
- s = _testcapi.unicode_legacy_string('9.999999')
- self.assertEqual(str(Decimal(s)), '9.999999')
- self.assertEqual(str(context.create_decimal(s)), '9.999999')
-
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
@@ -2919,23 +2906,6 @@ def test_none_args(self):
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
- @cpython_only
- @requires_legacy_unicode_capi()
- @warnings_helper.ignore_warnings(category=DeprecationWarning)
- def test_from_legacy_strings(self):
- import _testcapi
- c = self.decimal.Context()
-
- for rnd in RoundingModes:
- c.rounding = _testcapi.unicode_legacy_string(rnd)
- self.assertEqual(c.rounding, rnd)
-
- s = _testcapi.unicode_legacy_string('')
- self.assertRaises(TypeError, setattr, c, 'rounding', s)
-
- s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
- self.assertRaises(TypeError, setattr, c, 'rounding', s)
-
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index f6bd9094e8fece..4a3db80ca43c27 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -1989,7 +1989,7 @@ def __getattr__(self, attr):
ns = {}
exec(code, ns)
number_attrs = ns["number_attrs"]
- # Warm up the the function for quickening (PEP 659)
+ # Warm up the function for quickening (PEP 659)
for _ in range(30):
self.assertEqual(number_attrs(Numbers()), list(range(280)))
@@ -4757,24 +4757,24 @@ class Thing:
thing = Thing()
for i in range(20):
with self.assertRaises(TypeError):
- # PRECALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS
+ # CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS
list.sort(thing)
for i in range(20):
with self.assertRaises(TypeError):
- # PRECALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS
+ # CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS
str.split(thing)
for i in range(20):
with self.assertRaises(TypeError):
- # PRECALL_NO_KW_METHOD_DESCRIPTOR_NOARGS
+ # CALL_METHOD_DESCRIPTOR_NOARGS
str.upper(thing)
for i in range(20):
with self.assertRaises(TypeError):
- # PRECALL_NO_KW_METHOD_DESCRIPTOR_FAST
+ # CALL_METHOD_DESCRIPTOR_FAST
str.strip(thing)
from collections import deque
for i in range(20):
with self.assertRaises(TypeError):
- # PRECALL_NO_KW_METHOD_DESCRIPTOR_O
+ # CALL_METHOD_DESCRIPTOR_O
deque.append(thing, thing)
def test_repr_as_str(self):
diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py
index eab64b4f9106c1..620d0ca4f4c2da 100644
--- a/Lib/test/test_dict.py
+++ b/Lib/test/test_dict.py
@@ -8,7 +8,7 @@
import unittest
import weakref
from test import support
-from test.support import import_helper, C_RECURSION_LIMIT
+from test.support import import_helper, Py_C_RECURSION_LIMIT
class DictTest(unittest.TestCase):
@@ -596,7 +596,7 @@ def __repr__(self):
def test_repr_deep(self):
d = {}
- for i in range(C_RECURSION_LIMIT + 1):
+ for i in range(Py_C_RECURSION_LIMIT + 1):
d = {1: d}
self.assertRaises(RecursionError, repr, d)
diff --git a/Lib/test/test_dictviews.py b/Lib/test/test_dictviews.py
index 2bd9d6eef8cfc6..cad568b6ac4c2d 100644
--- a/Lib/test/test_dictviews.py
+++ b/Lib/test/test_dictviews.py
@@ -1,9 +1,8 @@
import collections.abc
import copy
import pickle
-import sys
import unittest
-from test.support import C_RECURSION_LIMIT
+from test.support import Py_C_RECURSION_LIMIT
class DictSetTest(unittest.TestCase):
@@ -280,7 +279,7 @@ def test_recursive_repr(self):
def test_deeply_nested_repr(self):
d = {}
- for i in range(C_RECURSION_LIMIT//2 + 100):
+ for i in range(Py_C_RECURSION_LIMIT//2 + 100):
d = {42: d.values()}
self.assertRaises(RecursionError, repr, d)
diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py
index dacd6f6da2c5a9..d104e5dd904999 100644
--- a/Lib/test/test_dis.py
+++ b/Lib/test/test_dis.py
@@ -240,8 +240,8 @@ def wrap_func_w_kwargs():
LOAD_CONST 1 (1)
LOAD_CONST 2 (2)
LOAD_CONST 3 (5)
- KW_NAMES 4 (('c',))
- CALL 3
+ LOAD_CONST 4 (('c',))
+ CALL_KW 3
POP_TOP
RETURN_CONST 0 (None)
""" % (wrap_func_w_kwargs.__code__.co_firstlineno,
@@ -404,7 +404,7 @@ def wrap_func_w_kwargs():
%4d LOAD_GLOBAL 0 (Exception)
CHECK_EXC_MATCH
- POP_JUMP_IF_FALSE 23 (to 80)
+ POP_JUMP_IF_FALSE 23 (to 82)
STORE_FAST 0 (e)
%4d LOAD_FAST 0 (e)
@@ -492,7 +492,7 @@ def _with(c):
%4d >> PUSH_EXC_INFO
WITH_EXCEPT_START
TO_BOOL
- POP_JUMP_IF_TRUE 1 (to 50)
+ POP_JUMP_IF_TRUE 1 (to 52)
RERAISE 2
>> POP_TOP
POP_EXCEPT
@@ -579,7 +579,7 @@ async def _asyncwith(c):
>> CLEANUP_THROW
>> END_SEND
TO_BOOL
- POP_JUMP_IF_TRUE 1 (to 116)
+ POP_JUMP_IF_TRUE 1 (to 118)
RERAISE 2
>> POP_TOP
POP_EXCEPT
@@ -642,7 +642,8 @@ def _tryfinallyconst(b):
CALL 0
POP_TOP
RERAISE 0
- >> COPY 3
+
+None >> COPY 3
POP_EXCEPT
RERAISE 1
ExceptionTable:
@@ -674,7 +675,8 @@ def _tryfinallyconst(b):
CALL 0
POP_TOP
RERAISE 0
- >> COPY 3
+
+None >> COPY 3
POP_EXCEPT
RERAISE 1
ExceptionTable:
@@ -788,7 +790,7 @@ def load_test(x, y=0):
return a, b
dis_load_test_quickened_code = """\
-%3d 0 RESUME 0
+%3d 0 RESUME_CHECK 0
%3d 2 LOAD_FAST_LOAD_FAST 1 (x, y)
4 STORE_FAST_STORE_FAST 50 (b, a)
@@ -805,7 +807,7 @@ def loop_test():
load_test(i)
dis_loop_test_quickened_code = """\
-%3d RESUME 0
+%3d RESUME_CHECK 0
%3d BUILD_LIST 0
LOAD_CONST 1 ((1, 2, 3))
@@ -1003,7 +1005,7 @@ def test_bug_46724(self):
self.do_disassembly_test(bug46724, dis_bug46724)
def test_kw_names(self):
- # Test that value is displayed for KW_NAMES
+ # Test that value is displayed for keyword argument names:
self.do_disassembly_test(wrap_func_w_kwargs, dis_kw_names)
def test_intrinsic_1(self):
@@ -1197,7 +1199,7 @@ def test_super_instructions(self):
@requires_specialization
def test_binary_specialize(self):
binary_op_quicken = """\
- 0 0 RESUME 0
+ 0 0 RESUME_CHECK 0
1 2 LOAD_NAME 0 (a)
4 LOAD_NAME 1 (b)
@@ -1215,7 +1217,7 @@ def test_binary_specialize(self):
self.do_disassembly_compare(got, binary_op_quicken % "BINARY_OP_ADD_UNICODE 0 (+)", True)
binary_subscr_quicken = """\
- 0 0 RESUME 0
+ 0 0 RESUME_CHECK 0
1 2 LOAD_NAME 0 (a)
4 LOAD_CONST 0 (0)
@@ -1236,7 +1238,7 @@ def test_binary_specialize(self):
@requires_specialization
def test_load_attr_specialize(self):
load_attr_quicken = """\
- 0 0 RESUME 0
+ 0 0 RESUME_CHECK 0
1 2 LOAD_CONST 0 ('a')
4 LOAD_ATTR_SLOT 0 (__class__)
@@ -1251,12 +1253,12 @@ def test_load_attr_specialize(self):
@requires_specialization
def test_call_specialize(self):
call_quicken = """\
- 0 RESUME 0
+ 0 RESUME_CHECK 0
1 LOAD_NAME 0 (str)
PUSH_NULL
LOAD_CONST 0 (1)
- CALL_NO_KW_STR_1 1
+ CALL_STR_1 1
RETURN_VALUE
"""
co = compile("str(1)", "", "eval")
@@ -1273,7 +1275,8 @@ def test_loop_quicken(self):
got = self.get_disassembly(loop_test, adaptive=True)
expected = dis_loop_test_quickened_code
if _testinternalcapi.get_optimizer():
- expected = expected.replace("JUMP_BACKWARD ", "ENTER_EXECUTOR")
+ # We *may* see ENTER_EXECUTOR in the disassembly
+ got = got.replace("ENTER_EXECUTOR", "JUMP_BACKWARD ")
self.do_disassembly_compare(got, expected)
@cpython_only
@@ -1635,202 +1638,202 @@ def _prepare_test_cases():
result = result.replace(repr(code_object_inner), "code_object_inner")
print(result)
-# _prepare_test_cases()
+# from test.test_dis import _prepare_test_cases; _prepare_test_cases()
Instruction = dis.Instruction
expected_opinfo_outer = [
- Instruction(opname='MAKE_CELL', opcode=157, arg=0, argval='a', argrepr='a', offset=0, start_offset=0, starts_line=True, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='MAKE_CELL', opcode=157, arg=1, argval='b', argrepr='b', offset=2, start_offset=2, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='RESUME', opcode=166, arg=0, argval=0, argrepr='', offset=4, start_offset=4, starts_line=True, line_number=1, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=5, argval=(3, 4), argrepr='(3, 4)', offset=6, start_offset=6, starts_line=True, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='a', argrepr='a', offset=8, start_offset=8, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=1, argval='b', argrepr='b', offset=10, start_offset=10, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='BUILD_TUPLE', opcode=74, arg=2, argval=2, argrepr='', offset=12, start_offset=12, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=1, argval=code_object_f, argrepr=repr(code_object_f), offset=14, start_offset=14, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='MAKE_FUNCTION', opcode=38, arg=None, argval=None, argrepr='', offset=16, start_offset=16, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=171, arg=8, argval=8, argrepr='closure', offset=18, start_offset=18, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=171, arg=1, argval=1, argrepr='defaults', offset=20, start_offset=20, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='STORE_FAST', opcode=176, arg=2, argval='f', argrepr='f', offset=22, start_offset=22, starts_line=False, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=1, argval='print', argrepr='print + NULL', offset=24, start_offset=24, starts_line=True, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=0, argval='a', argrepr='a', offset=34, start_offset=34, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=1, argval='b', argrepr='b', offset=36, start_offset=36, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=2, argval='', argrepr="''", offset=38, start_offset=38, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=3, argval=1, argrepr='1', offset=40, start_offset=40, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='BUILD_LIST', opcode=69, arg=0, argval=0, argrepr='', offset=42, start_offset=42, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='BUILD_MAP', opcode=70, arg=0, argval=0, argrepr='', offset=44, start_offset=44, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=4, argval='Hello world!', argrepr="'Hello world!'", offset=46, start_offset=46, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=7, argval=7, argrepr='', offset=48, start_offset=48, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=56, start_offset=56, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=2, argval='f', argrepr='f', offset=58, start_offset=58, starts_line=True, line_number=8, is_jump_target=False, positions=None),
- Instruction(opname='RETURN_VALUE', opcode=48, arg=None, argval=None, argrepr='', offset=60, start_offset=60, starts_line=False, line_number=8, is_jump_target=False, positions=None),
+ Instruction(opname='MAKE_CELL', opcode=94, arg=0, argval='a', argrepr='a', offset=0, start_offset=0, starts_line=True, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='MAKE_CELL', opcode=94, arg=1, argval='b', argrepr='b', offset=2, start_offset=2, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='RESUME', opcode=149, arg=0, argval=0, argrepr='', offset=4, start_offset=4, starts_line=True, line_number=1, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=5, argval=(3, 4), argrepr='(3, 4)', offset=6, start_offset=6, starts_line=True, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='a', argrepr='a', offset=8, start_offset=8, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=1, argval='b', argrepr='b', offset=10, start_offset=10, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='BUILD_TUPLE', opcode=52, arg=2, argval=2, argrepr='', offset=12, start_offset=12, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=1, argval=code_object_f, argrepr=repr(code_object_f), offset=14, start_offset=14, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='MAKE_FUNCTION', opcode=26, arg=None, argval=None, argrepr='', offset=16, start_offset=16, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=106, arg=8, argval=8, argrepr='closure', offset=18, start_offset=18, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=106, arg=1, argval=1, argrepr='defaults', offset=20, start_offset=20, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='STORE_FAST', opcode=110, arg=2, argval='f', argrepr='f', offset=22, start_offset=22, starts_line=False, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=1, argval='print', argrepr='print + NULL', offset=24, start_offset=24, starts_line=True, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=0, argval='a', argrepr='a', offset=34, start_offset=34, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=1, argval='b', argrepr='b', offset=36, start_offset=36, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=2, argval='', argrepr="''", offset=38, start_offset=38, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=3, argval=1, argrepr='1', offset=40, start_offset=40, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='BUILD_LIST', opcode=47, arg=0, argval=0, argrepr='', offset=42, start_offset=42, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='BUILD_MAP', opcode=48, arg=0, argval=0, argrepr='', offset=44, start_offset=44, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=4, argval='Hello world!', argrepr="'Hello world!'", offset=46, start_offset=46, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=7, argval=7, argrepr='', offset=48, start_offset=48, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=56, start_offset=56, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=2, argval='f', argrepr='f', offset=58, start_offset=58, starts_line=True, line_number=8, is_jump_target=False, positions=None),
+ Instruction(opname='RETURN_VALUE', opcode=36, arg=None, argval=None, argrepr='', offset=60, start_offset=60, starts_line=False, line_number=8, is_jump_target=False, positions=None),
]
expected_opinfo_f = [
- Instruction(opname='COPY_FREE_VARS', opcode=104, arg=2, argval=2, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='MAKE_CELL', opcode=157, arg=0, argval='c', argrepr='c', offset=2, start_offset=2, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='MAKE_CELL', opcode=157, arg=1, argval='d', argrepr='d', offset=4, start_offset=4, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='RESUME', opcode=166, arg=0, argval=0, argrepr='', offset=6, start_offset=6, starts_line=True, line_number=2, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=2, argval=(5, 6), argrepr='(5, 6)', offset=8, start_offset=8, starts_line=True, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=3, argval='a', argrepr='a', offset=10, start_offset=10, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=4, argval='b', argrepr='b', offset=12, start_offset=12, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='c', argrepr='c', offset=14, start_offset=14, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=1, argval='d', argrepr='d', offset=16, start_offset=16, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='BUILD_TUPLE', opcode=74, arg=4, argval=4, argrepr='', offset=18, start_offset=18, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=1, argval=code_object_inner, argrepr=repr(code_object_inner), offset=20, start_offset=20, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='MAKE_FUNCTION', opcode=38, arg=None, argval=None, argrepr='', offset=22, start_offset=22, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=171, arg=8, argval=8, argrepr='closure', offset=24, start_offset=24, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=171, arg=1, argval=1, argrepr='defaults', offset=26, start_offset=26, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='STORE_FAST', opcode=176, arg=2, argval='inner', argrepr='inner', offset=28, start_offset=28, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=1, argval='print', argrepr='print + NULL', offset=30, start_offset=30, starts_line=True, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=3, argval='a', argrepr='a', offset=40, start_offset=40, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=4, argval='b', argrepr='b', offset=42, start_offset=42, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=0, argval='c', argrepr='c', offset=44, start_offset=44, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=1, argval='d', argrepr='d', offset=46, start_offset=46, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=4, argval=4, argrepr='', offset=48, start_offset=48, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=56, start_offset=56, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=2, argval='inner', argrepr='inner', offset=58, start_offset=58, starts_line=True, line_number=6, is_jump_target=False, positions=None),
- Instruction(opname='RETURN_VALUE', opcode=48, arg=None, argval=None, argrepr='', offset=60, start_offset=60, starts_line=False, line_number=6, is_jump_target=False, positions=None),
+ Instruction(opname='COPY_FREE_VARS', opcode=62, arg=2, argval=2, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='MAKE_CELL', opcode=94, arg=0, argval='c', argrepr='c', offset=2, start_offset=2, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='MAKE_CELL', opcode=94, arg=1, argval='d', argrepr='d', offset=4, start_offset=4, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='RESUME', opcode=149, arg=0, argval=0, argrepr='', offset=6, start_offset=6, starts_line=True, line_number=2, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=2, argval=(5, 6), argrepr='(5, 6)', offset=8, start_offset=8, starts_line=True, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=3, argval='a', argrepr='a', offset=10, start_offset=10, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=4, argval='b', argrepr='b', offset=12, start_offset=12, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='c', argrepr='c', offset=14, start_offset=14, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=1, argval='d', argrepr='d', offset=16, start_offset=16, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='BUILD_TUPLE', opcode=52, arg=4, argval=4, argrepr='', offset=18, start_offset=18, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=1, argval=code_object_inner, argrepr=repr(code_object_inner), offset=20, start_offset=20, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='MAKE_FUNCTION', opcode=26, arg=None, argval=None, argrepr='', offset=22, start_offset=22, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=106, arg=8, argval=8, argrepr='closure', offset=24, start_offset=24, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='SET_FUNCTION_ATTRIBUTE', opcode=106, arg=1, argval=1, argrepr='defaults', offset=26, start_offset=26, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='STORE_FAST', opcode=110, arg=2, argval='inner', argrepr='inner', offset=28, start_offset=28, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=1, argval='print', argrepr='print + NULL', offset=30, start_offset=30, starts_line=True, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=3, argval='a', argrepr='a', offset=40, start_offset=40, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=4, argval='b', argrepr='b', offset=42, start_offset=42, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=0, argval='c', argrepr='c', offset=44, start_offset=44, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=1, argval='d', argrepr='d', offset=46, start_offset=46, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=4, argval=4, argrepr='', offset=48, start_offset=48, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=56, start_offset=56, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=2, argval='inner', argrepr='inner', offset=58, start_offset=58, starts_line=True, line_number=6, is_jump_target=False, positions=None),
+ Instruction(opname='RETURN_VALUE', opcode=36, arg=None, argval=None, argrepr='', offset=60, start_offset=60, starts_line=False, line_number=6, is_jump_target=False, positions=None),
]
expected_opinfo_inner = [
- Instruction(opname='COPY_FREE_VARS', opcode=104, arg=4, argval=4, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='RESUME', opcode=166, arg=0, argval=0, argrepr='', offset=2, start_offset=2, starts_line=True, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=1, argval='print', argrepr='print + NULL', offset=4, start_offset=4, starts_line=True, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=2, argval='a', argrepr='a', offset=14, start_offset=14, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=3, argval='b', argrepr='b', offset=16, start_offset=16, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=4, argval='c', argrepr='c', offset=18, start_offset=18, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_DEREF', opcode=143, arg=5, argval='d', argrepr='d', offset=20, start_offset=20, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST_LOAD_FAST', opcode=147, arg=1, argval=('e', 'f'), argrepr='e, f', offset=22, start_offset=22, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=6, argval=6, argrepr='', offset=24, start_offset=24, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=32, start_offset=32, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='RETURN_CONST', opcode=167, arg=0, argval=None, argrepr='None', offset=34, start_offset=34, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='COPY_FREE_VARS', opcode=62, arg=4, argval=4, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='RESUME', opcode=149, arg=0, argval=0, argrepr='', offset=2, start_offset=2, starts_line=True, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=1, argval='print', argrepr='print + NULL', offset=4, start_offset=4, starts_line=True, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=2, argval='a', argrepr='a', offset=14, start_offset=14, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=3, argval='b', argrepr='b', offset=16, start_offset=16, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=4, argval='c', argrepr='c', offset=18, start_offset=18, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_DEREF', opcode=84, arg=5, argval='d', argrepr='d', offset=20, start_offset=20, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST_LOAD_FAST', opcode=88, arg=1, argval=('e', 'f'), argrepr='e, f', offset=22, start_offset=22, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=6, argval=6, argrepr='', offset=24, start_offset=24, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=32, start_offset=32, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='RETURN_CONST', opcode=103, arg=0, argval=None, argrepr='None', offset=34, start_offset=34, starts_line=False, line_number=4, is_jump_target=False, positions=None),
]
expected_opinfo_jumpy = [
- Instruction(opname='RESUME', opcode=166, arg=0, argval=0, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=1, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=1, argval='range', argrepr='range + NULL', offset=2, start_offset=2, starts_line=True, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=1, argval=10, argrepr='10', offset=12, start_offset=12, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=14, start_offset=14, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='GET_ITER', opcode=31, arg=None, argval=None, argrepr='', offset=22, start_offset=22, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='FOR_ITER', opcode=114, arg=28, argval=84, argrepr='to 84', offset=24, start_offset=24, starts_line=False, line_number=3, is_jump_target=True, positions=None),
- Instruction(opname='STORE_FAST', opcode=176, arg=0, argval='i', argrepr='i', offset=28, start_offset=28, starts_line=False, line_number=3, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=30, start_offset=30, starts_line=True, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=40, start_offset=40, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=42, start_offset=42, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=50, start_offset=50, starts_line=False, line_number=4, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=52, start_offset=52, starts_line=True, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=2, argval=4, argrepr='4', offset=54, start_offset=54, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='COMPARE_OP', opcode=97, arg=18, argval='<', argrepr='bool(<)', offset=56, start_offset=56, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_FALSE', opcode=160, arg=2, argval=66, argrepr='to 66', offset=60, start_offset=60, starts_line=False, line_number=5, is_jump_target=False, positions=None),
- Instruction(opname='JUMP_BACKWARD', opcode=123, arg=21, argval=24, argrepr='to 24', offset=62, start_offset=62, starts_line=True, line_number=6, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=66, start_offset=66, starts_line=True, line_number=7, is_jump_target=True, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=3, argval=6, argrepr='6', offset=68, start_offset=68, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='COMPARE_OP', opcode=97, arg=148, argval='>', argrepr='bool(>)', offset=70, start_offset=70, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_TRUE', opcode=163, arg=2, argval=80, argrepr='to 80', offset=74, start_offset=74, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='JUMP_BACKWARD', opcode=123, arg=28, argval=24, argrepr='to 24', offset=76, start_offset=76, starts_line=False, line_number=7, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=80, start_offset=80, starts_line=True, line_number=8, is_jump_target=True, positions=None),
- Instruction(opname='JUMP_FORWARD', opcode=125, arg=12, argval=108, argrepr='to 108', offset=82, start_offset=82, starts_line=False, line_number=8, is_jump_target=False, positions=None),
- Instruction(opname='END_FOR', opcode=24, arg=None, argval=None, argrepr='', offset=84, start_offset=84, starts_line=True, line_number=3, is_jump_target=True, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=86, start_offset=86, starts_line=True, line_number=10, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=4, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=96, start_offset=96, starts_line=False, line_number=10, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=98, start_offset=98, starts_line=False, line_number=10, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=106, start_offset=106, starts_line=False, line_number=10, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST_CHECK', opcode=146, arg=0, argval='i', argrepr='i', offset=108, start_offset=108, starts_line=True, line_number=11, is_jump_target=True, positions=None),
- Instruction(opname='TO_BOOL', opcode=56, arg=None, argval=None, argrepr='', offset=110, start_offset=110, starts_line=False, line_number=11, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_FALSE', opcode=160, arg=37, argval=194, argrepr='to 194', offset=118, start_offset=118, starts_line=False, line_number=11, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=120, start_offset=120, starts_line=True, line_number=12, is_jump_target=True, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=130, start_offset=130, starts_line=False, line_number=12, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=132, start_offset=132, starts_line=False, line_number=12, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=140, start_offset=140, starts_line=False, line_number=12, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=142, start_offset=142, starts_line=True, line_number=13, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=5, argval=1, argrepr='1', offset=144, start_offset=144, starts_line=False, line_number=13, is_jump_target=False, positions=None),
- Instruction(opname='BINARY_OP', opcode=67, arg=23, argval=23, argrepr='-=', offset=146, start_offset=146, starts_line=False, line_number=13, is_jump_target=False, positions=None),
- Instruction(opname='STORE_FAST', opcode=176, arg=0, argval='i', argrepr='i', offset=150, start_offset=150, starts_line=False, line_number=13, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=152, start_offset=152, starts_line=True, line_number=14, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=3, argval=6, argrepr='6', offset=154, start_offset=154, starts_line=False, line_number=14, is_jump_target=False, positions=None),
- Instruction(opname='COMPARE_OP', opcode=97, arg=148, argval='>', argrepr='bool(>)', offset=156, start_offset=156, starts_line=False, line_number=14, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_FALSE', opcode=160, arg=2, argval=166, argrepr='to 166', offset=160, start_offset=160, starts_line=False, line_number=14, is_jump_target=False, positions=None),
- Instruction(opname='JUMP_BACKWARD', opcode=123, arg=29, argval=108, argrepr='to 108', offset=162, start_offset=162, starts_line=True, line_number=15, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=166, start_offset=166, starts_line=True, line_number=16, is_jump_target=True, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=2, argval=4, argrepr='4', offset=168, start_offset=168, starts_line=False, line_number=16, is_jump_target=False, positions=None),
- Instruction(opname='COMPARE_OP', opcode=97, arg=18, argval='<', argrepr='bool(<)', offset=170, start_offset=170, starts_line=False, line_number=16, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_FALSE', opcode=160, arg=1, argval=178, argrepr='to 178', offset=174, start_offset=174, starts_line=False, line_number=16, is_jump_target=False, positions=None),
- Instruction(opname='JUMP_FORWARD', opcode=125, arg=19, argval=216, argrepr='to 216', offset=176, start_offset=176, starts_line=True, line_number=17, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=178, start_offset=178, starts_line=True, line_number=11, is_jump_target=True, positions=None),
- Instruction(opname='TO_BOOL', opcode=56, arg=None, argval=None, argrepr='', offset=180, start_offset=180, starts_line=False, line_number=11, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_FALSE', opcode=160, arg=2, argval=194, argrepr='to 194', offset=188, start_offset=188, starts_line=False, line_number=11, is_jump_target=False, positions=None),
- Instruction(opname='JUMP_BACKWARD', opcode=123, arg=37, argval=120, argrepr='to 120', offset=190, start_offset=190, starts_line=False, line_number=11, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=194, start_offset=194, starts_line=True, line_number=19, is_jump_target=True, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=6, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=204, start_offset=204, starts_line=False, line_number=19, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=206, start_offset=206, starts_line=False, line_number=19, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=214, start_offset=214, starts_line=False, line_number=19, is_jump_target=False, positions=None),
- Instruction(opname='NOP', opcode=42, arg=None, argval=None, argrepr='', offset=216, start_offset=216, starts_line=True, line_number=20, is_jump_target=True, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=5, argval=1, argrepr='1', offset=218, start_offset=218, starts_line=True, line_number=21, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=7, argval=0, argrepr='0', offset=220, start_offset=220, starts_line=False, line_number=21, is_jump_target=False, positions=None),
- Instruction(opname='BINARY_OP', opcode=67, arg=11, argval=11, argrepr='/', offset=222, start_offset=222, starts_line=False, line_number=21, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=226, start_offset=226, starts_line=False, line_number=21, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_FAST', opcode=144, arg=0, argval='i', argrepr='i', offset=228, start_offset=228, starts_line=True, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='BEFORE_WITH', opcode=2, arg=None, argval=None, argrepr='', offset=230, start_offset=230, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='STORE_FAST', opcode=176, arg=1, argval='dodgy', argrepr='dodgy', offset=232, start_offset=232, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=234, start_offset=234, starts_line=True, line_number=26, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=8, argval='Never reach this', argrepr="'Never reach this'", offset=244, start_offset=244, starts_line=False, line_number=26, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=246, start_offset=246, starts_line=False, line_number=26, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=254, start_offset=254, starts_line=False, line_number=26, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=0, argval=None, argrepr='None', offset=256, start_offset=256, starts_line=True, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=0, argval=None, argrepr='None', offset=258, start_offset=258, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=0, argval=None, argrepr='None', offset=260, start_offset=260, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=2, argval=2, argrepr='', offset=262, start_offset=262, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=270, start_offset=270, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=272, start_offset=272, starts_line=True, line_number=28, is_jump_target=True, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=282, start_offset=282, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=284, start_offset=284, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=292, start_offset=292, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='RETURN_CONST', opcode=167, arg=0, argval=None, argrepr='None', offset=294, start_offset=294, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='PUSH_EXC_INFO', opcode=45, arg=None, argval=None, argrepr='', offset=296, start_offset=296, starts_line=True, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='WITH_EXCEPT_START', opcode=66, arg=None, argval=None, argrepr='', offset=298, start_offset=298, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='TO_BOOL', opcode=56, arg=None, argval=None, argrepr='', offset=300, start_offset=300, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_TRUE', opcode=163, arg=1, argval=312, argrepr='to 312', offset=308, start_offset=308, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='RERAISE', opcode=165, arg=2, argval=2, argrepr='', offset=310, start_offset=310, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=312, start_offset=312, starts_line=False, line_number=25, is_jump_target=True, positions=None),
- Instruction(opname='POP_EXCEPT', opcode=43, arg=None, argval=None, argrepr='', offset=314, start_offset=314, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=316, start_offset=316, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=318, start_offset=318, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='JUMP_BACKWARD', opcode=123, arg=26, argval=272, argrepr='to 272', offset=320, start_offset=320, starts_line=False, line_number=25, is_jump_target=False, positions=None),
- Instruction(opname='COPY', opcode=103, arg=3, argval=3, argrepr='', offset=324, start_offset=324, starts_line=True, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='POP_EXCEPT', opcode=43, arg=None, argval=None, argrepr='', offset=326, start_offset=326, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='RERAISE', opcode=165, arg=1, argval=1, argrepr='', offset=328, start_offset=328, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='PUSH_EXC_INFO', opcode=45, arg=None, argval=None, argrepr='', offset=330, start_offset=330, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=332, start_offset=332, starts_line=True, line_number=22, is_jump_target=False, positions=None),
- Instruction(opname='CHECK_EXC_MATCH', opcode=20, arg=None, argval=None, argrepr='', offset=342, start_offset=342, starts_line=False, line_number=22, is_jump_target=False, positions=None),
- Instruction(opname='POP_JUMP_IF_FALSE', opcode=160, arg=15, argval=376, argrepr='to 376', offset=344, start_offset=344, starts_line=False, line_number=22, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=346, start_offset=346, starts_line=False, line_number=22, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=348, start_offset=348, starts_line=True, line_number=23, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=9, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=358, start_offset=358, starts_line=False, line_number=23, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=360, start_offset=360, starts_line=False, line_number=23, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=368, start_offset=368, starts_line=False, line_number=23, is_jump_target=False, positions=None),
- Instruction(opname='POP_EXCEPT', opcode=43, arg=None, argval=None, argrepr='', offset=370, start_offset=370, starts_line=False, line_number=23, is_jump_target=False, positions=None),
- Instruction(opname='JUMP_BACKWARD', opcode=123, arg=52, argval=272, argrepr='to 272', offset=372, start_offset=372, starts_line=False, line_number=23, is_jump_target=False, positions=None),
- Instruction(opname='RERAISE', opcode=165, arg=0, argval=0, argrepr='', offset=376, start_offset=376, starts_line=True, line_number=22, is_jump_target=True, positions=None),
- Instruction(opname='COPY', opcode=103, arg=3, argval=3, argrepr='', offset=378, start_offset=378, starts_line=True, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='POP_EXCEPT', opcode=43, arg=None, argval=None, argrepr='', offset=380, start_offset=380, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='RERAISE', opcode=165, arg=1, argval=1, argrepr='', offset=382, start_offset=382, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='PUSH_EXC_INFO', opcode=45, arg=None, argval=None, argrepr='', offset=384, start_offset=384, starts_line=False, line_number=None, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_GLOBAL', opcode=150, arg=3, argval='print', argrepr='print + NULL', offset=386, start_offset=386, starts_line=True, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='LOAD_CONST', opcode=142, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=396, start_offset=396, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='CALL', opcode=75, arg=1, argval=1, argrepr='', offset=398, start_offset=398, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='POP_TOP', opcode=44, arg=None, argval=None, argrepr='', offset=406, start_offset=406, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='RERAISE', opcode=165, arg=0, argval=0, argrepr='', offset=408, start_offset=408, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='COPY', opcode=103, arg=3, argval=3, argrepr='', offset=410, start_offset=410, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='POP_EXCEPT', opcode=43, arg=None, argval=None, argrepr='', offset=412, start_offset=412, starts_line=False, line_number=28, is_jump_target=False, positions=None),
- Instruction(opname='RERAISE', opcode=165, arg=1, argval=1, argrepr='', offset=414, start_offset=414, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='RESUME', opcode=149, arg=0, argval=0, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=1, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=1, argval='range', argrepr='range + NULL', offset=2, start_offset=2, starts_line=True, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=1, argval=10, argrepr='10', offset=12, start_offset=12, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=14, start_offset=14, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='GET_ITER', opcode=19, arg=None, argval=None, argrepr='', offset=22, start_offset=22, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='FOR_ITER', opcode=72, arg=30, argval=88, argrepr='to 88', offset=24, start_offset=24, starts_line=False, line_number=3, is_jump_target=True, positions=None),
+ Instruction(opname='STORE_FAST', opcode=110, arg=0, argval='i', argrepr='i', offset=28, start_offset=28, starts_line=False, line_number=3, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=30, start_offset=30, starts_line=True, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=40, start_offset=40, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=42, start_offset=42, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=50, start_offset=50, starts_line=False, line_number=4, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=52, start_offset=52, starts_line=True, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=2, argval=4, argrepr='4', offset=54, start_offset=54, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='COMPARE_OP', opcode=58, arg=18, argval='<', argrepr='bool(<)', offset=56, start_offset=56, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_FALSE', opcode=97, arg=2, argval=68, argrepr='to 68', offset=60, start_offset=60, starts_line=False, line_number=5, is_jump_target=False, positions=None),
+ Instruction(opname='JUMP_BACKWARD', opcode=77, arg=22, argval=24, argrepr='to 24', offset=64, start_offset=64, starts_line=True, line_number=6, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=68, start_offset=68, starts_line=True, line_number=7, is_jump_target=True, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=3, argval=6, argrepr='6', offset=70, start_offset=70, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='COMPARE_OP', opcode=58, arg=148, argval='>', argrepr='bool(>)', offset=72, start_offset=72, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_TRUE', opcode=100, arg=2, argval=84, argrepr='to 84', offset=76, start_offset=76, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='JUMP_BACKWARD', opcode=77, arg=30, argval=24, argrepr='to 24', offset=80, start_offset=80, starts_line=False, line_number=7, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=84, start_offset=84, starts_line=True, line_number=8, is_jump_target=True, positions=None),
+ Instruction(opname='JUMP_FORWARD', opcode=79, arg=12, argval=112, argrepr='to 112', offset=86, start_offset=86, starts_line=False, line_number=8, is_jump_target=False, positions=None),
+ Instruction(opname='END_FOR', opcode=11, arg=None, argval=None, argrepr='', offset=88, start_offset=88, starts_line=True, line_number=3, is_jump_target=True, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=90, start_offset=90, starts_line=True, line_number=10, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=4, argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=100, start_offset=100, starts_line=False, line_number=10, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=102, start_offset=102, starts_line=False, line_number=10, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=110, start_offset=110, starts_line=False, line_number=10, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST_CHECK', opcode=87, arg=0, argval='i', argrepr='i', offset=112, start_offset=112, starts_line=True, line_number=11, is_jump_target=True, positions=None),
+ Instruction(opname='TO_BOOL', opcode=40, arg=None, argval=None, argrepr='', offset=114, start_offset=114, starts_line=False, line_number=11, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_FALSE', opcode=97, arg=40, argval=206, argrepr='to 206', offset=122, start_offset=122, starts_line=False, line_number=11, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=126, start_offset=126, starts_line=True, line_number=12, is_jump_target=True, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=136, start_offset=136, starts_line=False, line_number=12, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=138, start_offset=138, starts_line=False, line_number=12, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=146, start_offset=146, starts_line=False, line_number=12, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=148, start_offset=148, starts_line=True, line_number=13, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=5, argval=1, argrepr='1', offset=150, start_offset=150, starts_line=False, line_number=13, is_jump_target=False, positions=None),
+ Instruction(opname='BINARY_OP', opcode=45, arg=23, argval=23, argrepr='-=', offset=152, start_offset=152, starts_line=False, line_number=13, is_jump_target=False, positions=None),
+ Instruction(opname='STORE_FAST', opcode=110, arg=0, argval='i', argrepr='i', offset=156, start_offset=156, starts_line=False, line_number=13, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=158, start_offset=158, starts_line=True, line_number=14, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=3, argval=6, argrepr='6', offset=160, start_offset=160, starts_line=False, line_number=14, is_jump_target=False, positions=None),
+ Instruction(opname='COMPARE_OP', opcode=58, arg=148, argval='>', argrepr='bool(>)', offset=162, start_offset=162, starts_line=False, line_number=14, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_FALSE', opcode=97, arg=2, argval=174, argrepr='to 174', offset=166, start_offset=166, starts_line=False, line_number=14, is_jump_target=False, positions=None),
+ Instruction(opname='JUMP_BACKWARD', opcode=77, arg=31, argval=112, argrepr='to 112', offset=170, start_offset=170, starts_line=True, line_number=15, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=174, start_offset=174, starts_line=True, line_number=16, is_jump_target=True, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=2, argval=4, argrepr='4', offset=176, start_offset=176, starts_line=False, line_number=16, is_jump_target=False, positions=None),
+ Instruction(opname='COMPARE_OP', opcode=58, arg=18, argval='<', argrepr='bool(<)', offset=178, start_offset=178, starts_line=False, line_number=16, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_FALSE', opcode=97, arg=1, argval=188, argrepr='to 188', offset=182, start_offset=182, starts_line=False, line_number=16, is_jump_target=False, positions=None),
+ Instruction(opname='JUMP_FORWARD', opcode=79, arg=20, argval=228, argrepr='to 228', offset=186, start_offset=186, starts_line=True, line_number=17, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=188, start_offset=188, starts_line=True, line_number=11, is_jump_target=True, positions=None),
+ Instruction(opname='TO_BOOL', opcode=40, arg=None, argval=None, argrepr='', offset=190, start_offset=190, starts_line=False, line_number=11, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_FALSE', opcode=97, arg=2, argval=206, argrepr='to 206', offset=198, start_offset=198, starts_line=False, line_number=11, is_jump_target=False, positions=None),
+ Instruction(opname='JUMP_BACKWARD', opcode=77, arg=40, argval=126, argrepr='to 126', offset=202, start_offset=202, starts_line=False, line_number=11, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=206, start_offset=206, starts_line=True, line_number=19, is_jump_target=True, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=6, argval='Who let lolcatz into this test suite?', argrepr="'Who let lolcatz into this test suite?'", offset=216, start_offset=216, starts_line=False, line_number=19, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=218, start_offset=218, starts_line=False, line_number=19, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=226, start_offset=226, starts_line=False, line_number=19, is_jump_target=False, positions=None),
+ Instruction(opname='NOP', opcode=30, arg=None, argval=None, argrepr='', offset=228, start_offset=228, starts_line=True, line_number=20, is_jump_target=True, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=5, argval=1, argrepr='1', offset=230, start_offset=230, starts_line=True, line_number=21, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=7, argval=0, argrepr='0', offset=232, start_offset=232, starts_line=False, line_number=21, is_jump_target=False, positions=None),
+ Instruction(opname='BINARY_OP', opcode=45, arg=11, argval=11, argrepr='/', offset=234, start_offset=234, starts_line=False, line_number=21, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=238, start_offset=238, starts_line=False, line_number=21, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_FAST', opcode=85, arg=0, argval='i', argrepr='i', offset=240, start_offset=240, starts_line=True, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='BEFORE_WITH', opcode=2, arg=None, argval=None, argrepr='', offset=242, start_offset=242, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='STORE_FAST', opcode=110, arg=1, argval='dodgy', argrepr='dodgy', offset=244, start_offset=244, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=246, start_offset=246, starts_line=True, line_number=26, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=8, argval='Never reach this', argrepr="'Never reach this'", offset=256, start_offset=256, starts_line=False, line_number=26, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=258, start_offset=258, starts_line=False, line_number=26, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=266, start_offset=266, starts_line=False, line_number=26, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=0, argval=None, argrepr='None', offset=268, start_offset=268, starts_line=True, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=0, argval=None, argrepr='None', offset=270, start_offset=270, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=0, argval=None, argrepr='None', offset=272, start_offset=272, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=2, argval=2, argrepr='', offset=274, start_offset=274, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=282, start_offset=282, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=284, start_offset=284, starts_line=True, line_number=28, is_jump_target=True, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=294, start_offset=294, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=296, start_offset=296, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=304, start_offset=304, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='RETURN_CONST', opcode=103, arg=0, argval=None, argrepr='None', offset=306, start_offset=306, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='PUSH_EXC_INFO', opcode=33, arg=None, argval=None, argrepr='', offset=308, start_offset=308, starts_line=True, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='WITH_EXCEPT_START', opcode=44, arg=None, argval=None, argrepr='', offset=310, start_offset=310, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='TO_BOOL', opcode=40, arg=None, argval=None, argrepr='', offset=312, start_offset=312, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_TRUE', opcode=100, arg=1, argval=326, argrepr='to 326', offset=320, start_offset=320, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='RERAISE', opcode=102, arg=2, argval=2, argrepr='', offset=324, start_offset=324, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=326, start_offset=326, starts_line=False, line_number=25, is_jump_target=True, positions=None),
+ Instruction(opname='POP_EXCEPT', opcode=31, arg=None, argval=None, argrepr='', offset=328, start_offset=328, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=330, start_offset=330, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=332, start_offset=332, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='JUMP_BACKWARD', opcode=77, arg=27, argval=284, argrepr='to 284', offset=334, start_offset=334, starts_line=False, line_number=25, is_jump_target=False, positions=None),
+ Instruction(opname='COPY', opcode=61, arg=3, argval=3, argrepr='', offset=338, start_offset=338, starts_line=True, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='POP_EXCEPT', opcode=31, arg=None, argval=None, argrepr='', offset=340, start_offset=340, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='RERAISE', opcode=102, arg=1, argval=1, argrepr='', offset=342, start_offset=342, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='PUSH_EXC_INFO', opcode=33, arg=None, argval=None, argrepr='', offset=344, start_offset=344, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=346, start_offset=346, starts_line=True, line_number=22, is_jump_target=False, positions=None),
+ Instruction(opname='CHECK_EXC_MATCH', opcode=7, arg=None, argval=None, argrepr='', offset=356, start_offset=356, starts_line=False, line_number=22, is_jump_target=False, positions=None),
+ Instruction(opname='POP_JUMP_IF_FALSE', opcode=97, arg=15, argval=392, argrepr='to 392', offset=358, start_offset=358, starts_line=False, line_number=22, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=362, start_offset=362, starts_line=False, line_number=22, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=364, start_offset=364, starts_line=True, line_number=23, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=9, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=374, start_offset=374, starts_line=False, line_number=23, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=376, start_offset=376, starts_line=False, line_number=23, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=384, start_offset=384, starts_line=False, line_number=23, is_jump_target=False, positions=None),
+ Instruction(opname='POP_EXCEPT', opcode=31, arg=None, argval=None, argrepr='', offset=386, start_offset=386, starts_line=False, line_number=23, is_jump_target=False, positions=None),
+ Instruction(opname='JUMP_BACKWARD', opcode=77, arg=54, argval=284, argrepr='to 284', offset=388, start_offset=388, starts_line=False, line_number=23, is_jump_target=False, positions=None),
+ Instruction(opname='RERAISE', opcode=102, arg=0, argval=0, argrepr='', offset=392, start_offset=392, starts_line=True, line_number=22, is_jump_target=True, positions=None),
+ Instruction(opname='COPY', opcode=61, arg=3, argval=3, argrepr='', offset=394, start_offset=394, starts_line=True, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='POP_EXCEPT', opcode=31, arg=None, argval=None, argrepr='', offset=396, start_offset=396, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='RERAISE', opcode=102, arg=1, argval=1, argrepr='', offset=398, start_offset=398, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='PUSH_EXC_INFO', opcode=33, arg=None, argval=None, argrepr='', offset=400, start_offset=400, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_GLOBAL', opcode=91, arg=3, argval='print', argrepr='print + NULL', offset=402, start_offset=402, starts_line=True, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='LOAD_CONST', opcode=83, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=412, start_offset=412, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='CALL', opcode=53, arg=1, argval=1, argrepr='', offset=414, start_offset=414, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='POP_TOP', opcode=32, arg=None, argval=None, argrepr='', offset=422, start_offset=422, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='RERAISE', opcode=102, arg=0, argval=0, argrepr='', offset=424, start_offset=424, starts_line=False, line_number=28, is_jump_target=False, positions=None),
+ Instruction(opname='COPY', opcode=61, arg=3, argval=3, argrepr='', offset=426, start_offset=426, starts_line=True, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='POP_EXCEPT', opcode=31, arg=None, argval=None, argrepr='', offset=428, start_offset=428, starts_line=False, line_number=None, is_jump_target=False, positions=None),
+ Instruction(opname='RERAISE', opcode=102, arg=1, argval=1, argrepr='', offset=430, start_offset=430, starts_line=False, line_number=None, is_jump_target=False, positions=None),
]
# One last piece of inspect fodder to check the default line number handling
def simple(): pass
expected_opinfo_simple = [
- Instruction(opname='RESUME', opcode=166, arg=0, argval=0, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=simple.__code__.co_firstlineno, is_jump_target=False, positions=None),
- Instruction(opname='RETURN_CONST', opcode=167, arg=0, argval=None, argrepr='None', offset=2, start_offset=2, starts_line=False, line_number=simple.__code__.co_firstlineno, is_jump_target=False),
+ Instruction(opname='RESUME', opcode=149, arg=0, argval=0, argrepr='', offset=0, start_offset=0, starts_line=True, line_number=simple.__code__.co_firstlineno, is_jump_target=False, positions=None),
+ Instruction(opname='RETURN_CONST', opcode=103, arg=0, argval=None, argrepr='None', offset=2, start_offset=2, starts_line=False, line_number=simple.__code__.co_firstlineno, is_jump_target=False),
]
@@ -2026,6 +2029,7 @@ def test_start_offset(self):
opcode.opmap["EXTENDED_ARG"], 0x01,
opcode.opmap["EXTENDED_ARG"], 0x01,
opcode.opmap["POP_JUMP_IF_TRUE"], 0xFF,
+ opcode.opmap["CACHE"], 0x00,
])
jump = list(dis._get_instructions_bytes(code))[-1]
self.assertEqual(8, jump.offset)
@@ -2035,18 +2039,20 @@ def test_start_offset(self):
opcode.opmap["LOAD_FAST"], 0x00,
opcode.opmap["EXTENDED_ARG"], 0x01,
opcode.opmap["POP_JUMP_IF_TRUE"], 0xFF,
+ opcode.opmap["CACHE"], 0x00,
opcode.opmap["EXTENDED_ARG"], 0x01,
opcode.opmap["EXTENDED_ARG"], 0x01,
opcode.opmap["EXTENDED_ARG"], 0x01,
opcode.opmap["POP_JUMP_IF_TRUE"], 0xFF,
+ opcode.opmap["CACHE"], 0x00,
])
instructions = list(dis._get_instructions_bytes(code))
# 1st jump
self.assertEqual(4, instructions[2].offset)
self.assertEqual(2, instructions[2].start_offset)
# 2nd jump
- self.assertEqual(12, instructions[6].offset)
- self.assertEqual(6, instructions[6].start_offset)
+ self.assertEqual(14, instructions[6].offset)
+ self.assertEqual(8, instructions[6].start_offset)
def test_cache_offset_and_end_offset(self):
code = bytes([
diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py
index 9cc460c8b913f6..6e12e82a7a0084 100644
--- a/Lib/test/test_doctest.py
+++ b/Lib/test/test_doctest.py
@@ -784,15 +784,13 @@ class TestDocTestFinder(unittest.TestCase):
def test_issue35753(self):
# This import of `call` should trigger issue35753 when
- # `support.run_doctest` is called due to unwrap failing,
+ # DocTestFinder.find() is called due to inspect.unwrap() failing,
# however with a patched doctest this should succeed.
from unittest.mock import call
dummy_module = types.ModuleType("dummy")
dummy_module.__dict__['inject_call'] = call
- try:
- support.run_doctest(dummy_module, verbosity=True)
- except ValueError as e:
- raise support.TestFailed("Doctest unwrap failed") from e
+ finder = doctest.DocTestFinder()
+ self.assertEqual(finder.find(dummy_module), [])
def test_empty_namespace_package(self):
pkg_name = 'doctest_empty_pkg'
diff --git a/Lib/test/test_dynamic.py b/Lib/test/test_dynamic.py
index 7e12d428e0fde2..0aa3be6a1bde6a 100644
--- a/Lib/test/test_dynamic.py
+++ b/Lib/test/test_dynamic.py
@@ -145,7 +145,7 @@ def __missing__(self, key):
code = "lambda: " + "+".join(f"_number_{i}" for i in range(variables))
sum_func = eval(code, MyGlobals())
expected = sum(range(variables))
- # Warm up the the function for quickening (PEP 659)
+ # Warm up the function for quickening (PEP 659)
for _ in range(30):
self.assertEqual(sum_func(), expected)
diff --git a/Lib/test/test_eintr.py b/Lib/test/test_eintr.py
index 528147802ba47e..49b15f1a2dba92 100644
--- a/Lib/test/test_eintr.py
+++ b/Lib/test/test_eintr.py
@@ -9,6 +9,7 @@
class EINTRTests(unittest.TestCase):
@unittest.skipUnless(hasattr(signal, "setitimer"), "requires setitimer()")
+ @support.requires_resource('walltime')
def test_all(self):
# Run the tester in a sub-process, to make sure there is only one
# thread (for reliable signal delivery).
diff --git a/Lib/test/test_email/test_email.py b/Lib/test/test_email/test_email.py
index 2a237095b9080c..512464f87162cd 100644
--- a/Lib/test/test_email/test_email.py
+++ b/Lib/test/test_email/test_email.py
@@ -2236,7 +2236,7 @@ def test_multipart_valid_cte_no_defect(self):
"\nContent-Transfer-Encoding: {}".format(cte)))
self.assertEqual(len(msg.defects), 0)
- # test_headerregistry.TestContentTyopeHeader invalid_1 and invalid_2.
+ # test_headerregistry.TestContentTypeHeader invalid_1 and invalid_2.
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
diff --git a/Lib/test/test_embed.py b/Lib/test/test_embed.py
index 50c9f61017e022..7f1a4e665f3b5d 100644
--- a/Lib/test/test_embed.py
+++ b/Lib/test/test_embed.py
@@ -26,6 +26,7 @@
PYMEM_ALLOCATOR_NOT_SET = 0
PYMEM_ALLOCATOR_DEBUG = 2
PYMEM_ALLOCATOR_MALLOC = 3
+Py_STATS = hasattr(sys, '_stats_on')
# _PyCoreConfig_InitCompatConfig()
API_COMPAT = 1
@@ -512,6 +513,8 @@ class InitConfigTests(EmbeddingTestsMixin, unittest.TestCase):
'safe_path': 0,
'_is_python_build': IGNORE_CONFIG,
}
+ if Py_STATS:
+ CONFIG_COMPAT['_pystats'] = 0
if MS_WINDOWS:
CONFIG_COMPAT.update({
'legacy_windows_stdio': 0,
@@ -895,6 +898,8 @@ def test_init_from_config(self):
'check_hash_pycs_mode': 'always',
'pathconfig_warnings': 0,
}
+ if Py_STATS:
+ config['_pystats'] = 1
self.check_all_configs("test_init_from_config", config, preconfig,
api=API_COMPAT)
@@ -927,6 +932,8 @@ def test_init_compat_env(self):
'safe_path': 1,
'int_max_str_digits': 4567,
}
+ if Py_STATS:
+ config['_pystats'] = 1
self.check_all_configs("test_init_compat_env", config, preconfig,
api=API_COMPAT)
@@ -960,6 +967,8 @@ def test_init_python_env(self):
'safe_path': 1,
'int_max_str_digits': 4567,
}
+ if Py_STATS:
+ config['_pystats'] = 1
self.check_all_configs("test_init_python_env", config, preconfig,
api=API_PYTHON)
diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py
index 11a5b425efff9a..8c1f285f7b3bc2 100644
--- a/Lib/test/test_enum.py
+++ b/Lib/test/test_enum.py
@@ -316,6 +316,7 @@ def __str__(self):
return self.name.title()
def __format__(self, spec):
return ''.join(reversed(self.name))
+ self.NewBaseEnum = NewBaseEnum
class NewSubEnum(NewBaseEnum):
first = auto()
self.NewSubEnum = NewSubEnum
@@ -382,10 +383,8 @@ def __str__(self):
return self.name.title()
def __format__(self, spec):
return ''.join(reversed(self.name))
- NewBaseEnum = self.enum_type('NewBaseEnum', dict(__format__=__format__, __str__=__str__))
- class NewSubEnum(NewBaseEnum):
- first = auto()
- self.NewSubEnum = NewBaseEnum('NewSubEnum', 'first')
+ self.NewBaseEnum = self.enum_type('NewBaseEnum', dict(__format__=__format__, __str__=__str__))
+ self.NewSubEnum = self.NewBaseEnum('NewSubEnum', 'first')
#
def _generate_next_value_(name, start, last, values):
pass
@@ -601,6 +600,10 @@ class SubEnum(SuperEnum):
self.assertTrue('description' not in dir(SubEnum))
self.assertTrue('description' in dir(SubEnum.sample), dir(SubEnum.sample))
+ def test_empty_enum_has_no_values(self):
+ with self.assertRaisesRegex(TypeError, "<.... 'NewBaseEnum'> has no members"):
+ self.NewBaseEnum(7)
+
def test_enum_in_enum_out(self):
Main = self.MainEnum
self.assertIs(Main(Main.first), Main.first)
@@ -4688,8 +4691,6 @@ def _generate_next_value_(name, start, count, last):
self.assertEqual(Huh.TWO.value, (2, 2))
self.assertEqual(Huh.THREE.value, (3, 3, 3))
-class TestEnumTypeSubclassing(unittest.TestCase):
- pass
expected_help_output_with_docs = """\
Help on class Color in module %s:
diff --git a/Lib/test/test_exception_group.py b/Lib/test/test_exception_group.py
index a02d54da35e948..20122679223843 100644
--- a/Lib/test/test_exception_group.py
+++ b/Lib/test/test_exception_group.py
@@ -1,7 +1,7 @@
import collections.abc
import types
import unittest
-from test.support import C_RECURSION_LIMIT
+from test.support import Py_C_RECURSION_LIMIT
class TestExceptionGroupTypeHierarchy(unittest.TestCase):
def test_exception_group_types(self):
@@ -460,7 +460,7 @@ def test_basics_split_by_predicate__match(self):
class DeepRecursionInSplitAndSubgroup(unittest.TestCase):
def make_deep_eg(self):
e = TypeError(1)
- for i in range(C_RECURSION_LIMIT + 1):
+ for i in range(Py_C_RECURSION_LIMIT + 1):
e = ExceptionGroup('eg', [e])
return e
diff --git a/Lib/test/test_faulthandler.py b/Lib/test/test_faulthandler.py
index 907c2cda86cbae..3c1e8c150ae711 100644
--- a/Lib/test/test_faulthandler.py
+++ b/Lib/test/test_faulthandler.py
@@ -683,6 +683,7 @@ def test_dump_traceback_later_fd(self):
with tempfile.TemporaryFile('wb+') as fp:
self.check_dump_traceback_later(fd=fp.fileno())
+ @support.requires_resource('walltime')
def test_dump_traceback_later_twice(self):
self.check_dump_traceback_later(loops=2)
diff --git a/Lib/test/test_float.py b/Lib/test/test_float.py
index c4ee1e08251d63..b6daae7e9280ff 100644
--- a/Lib/test/test_float.py
+++ b/Lib/test/test_float.py
@@ -25,7 +25,7 @@
#locate file with float format test values
test_dir = os.path.dirname(__file__) or os.curdir
-format_testfile = os.path.join(test_dir, 'formatfloat_testcases.txt')
+format_testfile = os.path.join(test_dir, 'mathdata', 'formatfloat_testcases.txt')
class FloatSubclass(float):
pass
@@ -733,8 +733,13 @@ def test_format_testfile(self):
lhs, rhs = map(str.strip, line.split('->'))
fmt, arg = lhs.split()
- self.assertEqual(fmt % float(arg), rhs)
- self.assertEqual(fmt % -float(arg), '-' + rhs)
+ f = float(arg)
+ self.assertEqual(fmt % f, rhs)
+ self.assertEqual(fmt % -f, '-' + rhs)
+ if fmt != '%r':
+ fmt2 = fmt[1:]
+ self.assertEqual(format(f, fmt2), rhs)
+ self.assertEqual(format(-f, fmt2), '-' + rhs)
def test_issue5864(self):
self.assertEqual(format(123.456, '.4'), '123.5')
@@ -763,6 +768,7 @@ def test_issue35560(self):
class ReprTestCase(unittest.TestCase):
def test_repr(self):
with open(os.path.join(os.path.split(__file__)[0],
+ 'mathdata',
'floating_points.txt'), encoding="utf-8") as floats_file:
for line in floats_file:
line = line.strip()
diff --git a/Lib/test/test_fractions.py b/Lib/test/test_fractions.py
index e112f49d2e7944..499e3b6e656faa 100644
--- a/Lib/test/test_fractions.py
+++ b/Lib/test/test_fractions.py
@@ -7,6 +7,7 @@
import operator
import fractions
import functools
+import os
import sys
import typing
import unittest
@@ -15,6 +16,9 @@
from pickle import dumps, loads
F = fractions.Fraction
+#locate file with float format test values
+test_dir = os.path.dirname(__file__) or os.curdir
+format_testfile = os.path.join(test_dir, 'mathdata', 'formatfloat_testcases.txt')
class DummyFloat(object):
"""Dummy float class for testing comparisons with Fractions"""
@@ -1220,6 +1224,30 @@ def test_invalid_formats(self):
with self.assertRaises(ValueError):
format(fraction, spec)
+ @requires_IEEE_754
+ def test_float_format_testfile(self):
+ with open(format_testfile, encoding="utf-8") as testfile:
+ for line in testfile:
+ if line.startswith('--'):
+ continue
+ line = line.strip()
+ if not line:
+ continue
+
+ lhs, rhs = map(str.strip, line.split('->'))
+ fmt, arg = lhs.split()
+ if fmt == '%r':
+ continue
+ fmt2 = fmt[1:]
+ with self.subTest(fmt=fmt, arg=arg):
+ f = F(float(arg))
+ self.assertEqual(format(f, fmt2), rhs)
+ if f: # skip negative zero
+ self.assertEqual(format(-f, fmt2), '-' + rhs)
+ f = F(arg)
+ self.assertEqual(float(format(f, fmt2)), float(rhs))
+ self.assertEqual(float(format(-f, fmt2)), float('-' + rhs))
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_frame.py b/Lib/test/test_frame.py
index 6bb0144e9b1ed7..9491c7facdf077 100644
--- a/Lib/test/test_frame.py
+++ b/Lib/test/test_frame.py
@@ -322,7 +322,7 @@ def f():
sneaky_frame_object = None
gc.enable()
next(g)
- # g.gi_frame should be the the frame object from the callback (the
+ # g.gi_frame should be the frame object from the callback (the
# one that was *requested* second, but *created* first):
self.assertIs(g.gi_frame, sneaky_frame_object)
finally:
diff --git a/Lib/test/test_fstring.py b/Lib/test/test_fstring.py
index 16f01973f99f3e..4f05a149a901b2 100644
--- a/Lib/test/test_fstring.py
+++ b/Lib/test/test_fstring.py
@@ -1027,6 +1027,10 @@ def test_lambda(self):
"f'{lambda x:}'",
"f'{lambda :}'",
])
+ # Ensure the detection of invalid lambdas doesn't trigger detection
+ # for valid lambdas in the second error pass
+ with self.assertRaisesRegex(SyntaxError, "invalid syntax"):
+ compile("lambda name_3=f'{name_4}': {name_3}\n1 $ 1", "", "exec")
# but don't emit the paren warning in general cases
with self.assertRaisesRegex(SyntaxError, "f-string: expecting a valid expression after '{'"):
diff --git a/Lib/test/test_ftplib.py b/Lib/test/test_ftplib.py
index 544228e3bab47b..bebd1bbb9e2703 100644
--- a/Lib/test/test_ftplib.py
+++ b/Lib/test/test_ftplib.py
@@ -325,8 +325,8 @@ def handle_error(self):
if ssl is not None:
- CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
- CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
+ CERTFILE = os.path.join(os.path.dirname(__file__), "certdata", "keycert3.pem")
+ CAFILE = os.path.join(os.path.dirname(__file__), "certdata", "pycacert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
diff --git a/Lib/test/test_functools.py b/Lib/test/test_functools.py
index 5ba7f51c91f3b5..e4de2c5ede15f1 100644
--- a/Lib/test/test_functools.py
+++ b/Lib/test/test_functools.py
@@ -26,10 +26,16 @@
py_functools = import_helper.import_fresh_module('functools',
blocked=['_functools'])
-c_functools = import_helper.import_fresh_module('functools')
+c_functools = import_helper.import_fresh_module('functools',
+ fresh=['_functools'])
decimal = import_helper.import_fresh_module('decimal', fresh=['_decimal'])
+_partial_types = [py_functools.partial]
+if c_functools:
+ _partial_types.append(c_functools.partial)
+
+
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
@@ -201,7 +207,7 @@ def test_repr(self):
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
- if self.partial in (c_functools.partial, py_functools.partial):
+ if self.partial in _partial_types:
name = 'functools.partial'
else:
name = self.partial.__name__
@@ -223,7 +229,7 @@ def test_repr(self):
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
- if self.partial in (c_functools.partial, py_functools.partial):
+ if self.partial in _partial_types:
name = 'functools.partial'
else:
name = self.partial.__name__
@@ -250,7 +256,7 @@ def test_recursive_repr(self):
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
- with self.AllowPickle():
+ with replaced_module('functools', self.module):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
@@ -333,7 +339,7 @@ def test_setstate_subclasses(self):
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
- with self.AllowPickle():
+ with replaced_module('functools', self.module):
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
@@ -387,14 +393,9 @@ def __getitem__(self, key):
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
+ module = c_functools
partial = c_functools.partial
- class AllowPickle:
- def __enter__(self):
- return self
- def __exit__(self, type, value, tb):
- return False
-
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
@@ -437,15 +438,9 @@ def __str__(self):
class TestPartialPy(TestPartial, unittest.TestCase):
+ module = py_functools
partial = py_functools.partial
- class AllowPickle:
- def __init__(self):
- self._cm = replaced_module("functools", py_functools)
- def __enter__(self):
- return self._cm.__enter__()
- def __exit__(self, type, value, tb):
- return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
@@ -1872,9 +1867,10 @@ def orig(): ...
def py_cached_func(x, y):
return 3 * x + y
-@c_functools.lru_cache()
-def c_cached_func(x, y):
- return 3 * x + y
+if c_functools:
+ @c_functools.lru_cache()
+ def c_cached_func(x, y):
+ return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
@@ -1891,18 +1887,20 @@ def cached_staticmeth(x, y):
return 3 * x + y
+@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestLRUC(TestLRU, unittest.TestCase):
- module = c_functools
- cached_func = c_cached_func,
+ if c_functools:
+ module = c_functools
+ cached_func = c_cached_func,
- @module.lru_cache()
- def cached_meth(self, x, y):
- return 3 * x + y
+ @module.lru_cache()
+ def cached_meth(self, x, y):
+ return 3 * x + y
- @staticmethod
- @module.lru_cache()
- def cached_staticmeth(x, y):
- return 3 * x + y
+ @staticmethod
+ @module.lru_cache()
+ def cached_staticmeth(x, y):
+ return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
diff --git a/Lib/test/test_future_stmt/__init__.py b/Lib/test/test_future_stmt/__init__.py
new file mode 100644
index 00000000000000..f2a39a3fe29c7f
--- /dev/null
+++ b/Lib/test/test_future_stmt/__init__.py
@@ -0,0 +1,6 @@
+import os
+from test import support
+
+
+def load_tests(*args):
+ return support.load_package_tests(os.path.dirname(__file__), *args)
diff --git a/Lib/test/badsyntax_future10.py b/Lib/test/test_future_stmt/badsyntax_future.py
similarity index 100%
rename from Lib/test/badsyntax_future10.py
rename to Lib/test/test_future_stmt/badsyntax_future.py
diff --git a/Lib/test/future_test1.py b/Lib/test/test_future_stmt/import_nested_scope_twice.py
similarity index 100%
rename from Lib/test/future_test1.py
rename to Lib/test/test_future_stmt/import_nested_scope_twice.py
diff --git a/Lib/test/future_test2.py b/Lib/test/test_future_stmt/nested_scope.py
similarity index 100%
rename from Lib/test/future_test2.py
rename to Lib/test/test_future_stmt/nested_scope.py
diff --git a/Lib/test/test_future.py b/Lib/test/test_future_stmt/test_future.py
similarity index 74%
rename from Lib/test/test_future.py
rename to Lib/test/test_future_stmt/test_future.py
index 4730bfafbd9cfe..2c8ceb664cb362 100644
--- a/Lib/test/test_future.py
+++ b/Lib/test/test_future_stmt/test_future.py
@@ -10,6 +10,8 @@
import re
import sys
+TOP_LEVEL_MSG = 'from __future__ imports must occur at the beginning of the file'
+
rx = re.compile(r'\((\S+).py, line (\d+)')
def get_error_location(msg):
@@ -18,65 +20,141 @@ def get_error_location(msg):
class FutureTest(unittest.TestCase):
- def check_syntax_error(self, err, basename, lineno, offset=1):
- self.assertIn('%s.py, line %d' % (basename, lineno), str(err))
- self.assertEqual(os.path.basename(err.filename), basename + '.py')
+ def check_syntax_error(self, err, basename,
+ *,
+ lineno,
+ message=TOP_LEVEL_MSG, offset=1):
+ if basename != '':
+ basename += '.py'
+
+ self.assertEqual(f'{message} ({basename}, line {lineno})', str(err))
+ self.assertEqual(os.path.basename(err.filename), basename)
self.assertEqual(err.lineno, lineno)
self.assertEqual(err.offset, offset)
- def test_future1(self):
- with import_helper.CleanImport('future_test1'):
- from test import future_test1
- self.assertEqual(future_test1.result, 6)
+ def assertSyntaxError(self, code,
+ *,
+ lineno=1,
+ message=TOP_LEVEL_MSG, offset=1,
+ parametrize_docstring=True):
+ code = dedent(code.lstrip('\n'))
+ for add_docstring in ([False, True] if parametrize_docstring else [False]):
+ with self.subTest(code=code, add_docstring=add_docstring):
+ if add_docstring:
+ code = '"""Docstring"""\n' + code
+ lineno += 1
+ with self.assertRaises(SyntaxError) as cm:
+ exec(code)
+ self.check_syntax_error(cm.exception, "",
+ lineno=lineno,
+ message=message,
+ offset=offset)
+
+ def test_import_nested_scope_twice(self):
+ # Import the name nested_scopes twice to trigger SF bug #407394
+ with import_helper.CleanImport(
+ 'test.test_future_stmt.import_nested_scope_twice',
+ ):
+ from test.test_future_stmt import import_nested_scope_twice
+ self.assertEqual(import_nested_scope_twice.result, 6)
+
+ def test_nested_scope(self):
+ with import_helper.CleanImport('test.test_future_stmt.nested_scope'):
+ from test.test_future_stmt import nested_scope
+ self.assertEqual(nested_scope.result, 6)
+
+ def test_future_single_import(self):
+ with import_helper.CleanImport(
+ 'test.test_future_stmt.test_future_single_import',
+ ):
+ from test.test_future_stmt import test_future_single_import
+
+ def test_future_multiple_imports(self):
+ with import_helper.CleanImport(
+ 'test.test_future_stmt.test_future_multiple_imports',
+ ):
+ from test.test_future_stmt import test_future_multiple_imports
+
+ def test_future_multiple_features(self):
+ with import_helper.CleanImport(
+ "test.test_future_stmt.test_future_multiple_features",
+ ):
+ from test.test_future_stmt import test_future_multiple_features
+
+ def test_unknown_future_flag(self):
+ code = """
+ from __future__ import nested_scopes
+ from __future__ import rested_snopes # typo error here: nested => rested
+ """
+ self.assertSyntaxError(
+ code, lineno=2,
+ message='future feature rested_snopes is not defined',
+ )
- def test_future2(self):
- with import_helper.CleanImport('future_test2'):
- from test import future_test2
- self.assertEqual(future_test2.result, 6)
+ def test_future_import_not_on_top(self):
+ code = """
+ import some_module
+ from __future__ import annotations
+ """
+ self.assertSyntaxError(code, lineno=2)
- def test_future3(self):
- with import_helper.CleanImport('test_future3'):
- from test import test_future3
+ code = """
+ import __future__
+ from __future__ import annotations
+ """
+ self.assertSyntaxError(code, lineno=2)
- def test_badfuture3(self):
- with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future3
- self.check_syntax_error(cm.exception, "badsyntax_future3", 3)
+ code = """
+ from __future__ import absolute_import
+ "spam, bar, blah"
+ from __future__ import print_function
+ """
+ self.assertSyntaxError(code, lineno=3)
- def test_badfuture4(self):
- with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future4
- self.check_syntax_error(cm.exception, "badsyntax_future4", 3)
+ def test_future_import_with_extra_string(self):
+ code = """
+ '''Docstring'''
+ "this isn't a doc string"
+ from __future__ import nested_scopes
+ """
+ self.assertSyntaxError(code, lineno=3, parametrize_docstring=False)
- def test_badfuture5(self):
- with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future5
- self.check_syntax_error(cm.exception, "badsyntax_future5", 4)
+ def test_multiple_import_statements_on_same_line(self):
+ # With `\`:
+ code = """
+ from __future__ import nested_scopes; import string; from __future__ import \
+ nested_scopes
+ """
+ self.assertSyntaxError(code, offset=54)
- def test_badfuture6(self):
- with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future6
- self.check_syntax_error(cm.exception, "badsyntax_future6", 3)
+ # Without `\`:
+ code = """
+ from __future__ import nested_scopes; import string; from __future__ import nested_scopes
+ """
+ self.assertSyntaxError(code, offset=54)
- def test_badfuture7(self):
- with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future7
- self.check_syntax_error(cm.exception, "badsyntax_future7", 3, 54)
+ def test_future_import_star(self):
+ code = """
+ from __future__ import *
+ """
+ self.assertSyntaxError(code, message='future feature * is not defined')
- def test_badfuture8(self):
- with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future8
- self.check_syntax_error(cm.exception, "badsyntax_future8", 3)
+ def test_future_import_braces(self):
+ code = """
+ from __future__ import braces
+ """
+ # Congrats, you found an easter egg!
+ self.assertSyntaxError(code, message='not a chance')
- def test_badfuture9(self):
- with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future9
- self.check_syntax_error(cm.exception, "badsyntax_future9", 3)
+ code = """
+ from __future__ import nested_scopes, braces
+ """
+ self.assertSyntaxError(code, message='not a chance')
- def test_badfuture10(self):
+ def test_module_with_future_import_not_on_top(self):
with self.assertRaises(SyntaxError) as cm:
- from test import badsyntax_future10
- self.check_syntax_error(cm.exception, "badsyntax_future10", 3)
+ from test.test_future_stmt import badsyntax_future
+ self.check_syntax_error(cm.exception, "badsyntax_future", lineno=3)
def test_ensure_flags_dont_clash(self):
# bpo-39562: test that future flags and compiler flags doesn't clash
@@ -113,10 +191,6 @@ def test_parserhack(self):
else:
self.fail("syntax error didn't occur")
- def test_multiple_features(self):
- with import_helper.CleanImport("test.test_future5"):
- from test import test_future5
-
def test_unicode_literals_exec(self):
scope = {}
exec("from __future__ import unicode_literals; x = ''", {}, scope)
diff --git a/Lib/test/test___future__.py b/Lib/test/test_future_stmt/test_future_flags.py
similarity index 100%
rename from Lib/test/test___future__.py
rename to Lib/test/test_future_stmt/test_future_flags.py
diff --git a/Lib/test/test_future5.py b/Lib/test/test_future_stmt/test_future_multiple_features.py
similarity index 100%
rename from Lib/test/test_future5.py
rename to Lib/test/test_future_stmt/test_future_multiple_features.py
diff --git a/Lib/test/test_future4.py b/Lib/test/test_future_stmt/test_future_multiple_imports.py
similarity index 100%
rename from Lib/test/test_future4.py
rename to Lib/test/test_future_stmt/test_future_multiple_imports.py
diff --git a/Lib/test/test_future3.py b/Lib/test/test_future_stmt/test_future_single_import.py
similarity index 100%
rename from Lib/test/test_future3.py
rename to Lib/test/test_future_stmt/test_future_single_import.py
diff --git a/Lib/test/test_gdb.py b/Lib/test/test_gdb.py
index c05a2d387c429c..5a4394a0993c8d 100644
--- a/Lib/test/test_gdb.py
+++ b/Lib/test/test_gdb.py
@@ -4,7 +4,6 @@
# Lib/test/test_jit_gdb.py
import os
-import platform
import re
import subprocess
import sys
@@ -55,10 +54,6 @@ def get_gdb_version():
if not sysconfig.is_python_build():
raise unittest.SkipTest("test_gdb only works on source builds at the moment.")
-if 'Clang' in platform.python_compiler() and sys.platform == 'darwin':
- raise unittest.SkipTest("test_gdb doesn't work correctly when python is"
- " built with LLVM clang")
-
if ((sysconfig.get_config_var('PGO_PROF_USE_FLAG') or 'xxx') in
(sysconfig.get_config_var('PY_CORE_CFLAGS') or '')):
raise unittest.SkipTest("test_gdb is not reliable on PGO builds")
@@ -247,6 +242,17 @@ def get_stack_trace(self, source=None, script=None,
for pattern in (
'(frame information optimized out)',
'Unable to read information on python frame',
+ # gh-91960: On Python built with "clang -Og", gdb gets
+ # "frame=" for _PyEval_EvalFrameDefault() parameter
+ '(unable to read python frame information)',
+ # gh-104736: On Python built with "clang -Og" on ppc64le,
+ # "py-bt" displays a truncated or not traceback, but "where"
+ # logs this error message:
+ 'Backtrace stopped: frame did not save the PC',
+ # gh-104736: When "bt" command displays something like:
+ # "#1 0x0000000000000000 in ?? ()", the traceback is likely
+ # truncated or wrong.
+ ' ?? ()',
):
if pattern in out:
raise unittest.SkipTest(f"{pattern!r} found in gdb output")
diff --git a/Lib/test/test_generated_cases.py b/Lib/test/test_generated_cases.py
index 54378fced54699..b5eaf824aee706 100644
--- a/Lib/test/test_generated_cases.py
+++ b/Lib/test/test_generated_cases.py
@@ -1,9 +1,31 @@
+import contextlib
+import os
+import sys
import tempfile
import unittest
-import os
+from test import support
from test import test_tools
+
+def skip_if_different_mount_drives():
+ if sys.platform != 'win32':
+ return
+ ROOT = os.path.dirname(os.path.dirname(__file__))
+ root_drive = os.path.splitroot(ROOT)[0]
+ cwd_drive = os.path.splitroot(os.getcwd())[0]
+ if root_drive != cwd_drive:
+ # generate_cases.py uses relpath() which raises ValueError if ROOT
+ # and the current working different have different mount drives
+ # (on Windows).
+ raise unittest.SkipTest(
+ f"the current working directory and the Python source code "
+ f"directory have different mount drives "
+ f"({cwd_drive} and {root_drive})"
+ )
+skip_if_different_mount_drives()
+
+
test_tools.skip_if_missing('cases_generator')
with test_tools.imports_under_tool('cases_generator'):
import generate_cases
@@ -12,6 +34,12 @@
from parsing import StackEffect
+def handle_stderr():
+ if support.verbose > 1:
+ return contextlib.nullcontext()
+ else:
+ return support.captured_stderr()
+
class TestEffects(unittest.TestCase):
def test_effect_sizes(self):
input_effects = [
@@ -81,11 +109,12 @@ def run_cases_test(self, input: str, expected: str):
temp_input.flush()
a = generate_cases.Generator([self.temp_input_filename])
- a.parse()
- a.analyze()
- if a.errors:
- raise RuntimeError(f"Found {a.errors} errors")
- a.write_instructions(self.temp_output_filename, False)
+ with handle_stderr():
+ a.parse()
+ a.analyze()
+ if a.errors:
+ raise RuntimeError(f"Found {a.errors} errors")
+ a.write_instructions(self.temp_output_filename, False)
with open(self.temp_output_filename) as temp_output:
lines = temp_output.readlines()
@@ -532,6 +561,36 @@ def test_macro_cond_effect(self):
"""
self.run_cases_test(input, output)
+ def test_macro_push_push(self):
+ input = """
+ op(A, (-- val1)) {
+ val1 = spam();
+ }
+ op(B, (-- val2)) {
+ val2 = spam();
+ }
+ macro(M) = A + B;
+ """
+ output = """
+ TARGET(M) {
+ PyObject *val1;
+ PyObject *val2;
+ // A
+ {
+ val1 = spam();
+ }
+ // B
+ {
+ val2 = spam();
+ }
+ STACK_GROW(2);
+ stack_pointer[-2] = val1;
+ stack_pointer[-1] = val2;
+ DISPATCH();
+ }
+ """
+ self.run_cases_test(input, output)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_getopt.py b/Lib/test/test_getopt.py
index c96a33b77fe272..c8b3442de4aa77 100644
--- a/Lib/test/test_getopt.py
+++ b/Lib/test/test_getopt.py
@@ -1,8 +1,8 @@
# test_getopt.py
# David Goodger 2000-08-19
-from test.support import verbose, run_doctest
from test.support.os_helper import EnvironmentVarGuard
+import doctest
import unittest
import getopt
@@ -134,48 +134,49 @@ def test_gnu_getopt(self):
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
- def test_libref_examples(self):
- s = """
- Examples from the Library Reference: Doc/lib/libgetopt.tex
+ def test_issue4629(self):
+ longopts, shortopts = getopt.getopt(['--help='], '', ['help='])
+ self.assertEqual(longopts, [('--help', '')])
+ longopts, shortopts = getopt.getopt(['--help=x'], '', ['help='])
+ self.assertEqual(longopts, [('--help', 'x')])
+ self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help'])
- An example using only Unix style options:
+def test_libref_examples():
+ """
+ Examples from the Library Reference: Doc/lib/libgetopt.tex
+ An example using only Unix style options:
- >>> import getopt
- >>> args = '-a -b -cfoo -d bar a1 a2'.split()
- >>> args
- ['-a', '-b', '-cfoo', '-d', 'bar', 'a1', 'a2']
- >>> optlist, args = getopt.getopt(args, 'abc:d:')
- >>> optlist
- [('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')]
- >>> args
- ['a1', 'a2']
- Using long option names is equally easy:
+ >>> import getopt
+ >>> args = '-a -b -cfoo -d bar a1 a2'.split()
+ >>> args
+ ['-a', '-b', '-cfoo', '-d', 'bar', 'a1', 'a2']
+ >>> optlist, args = getopt.getopt(args, 'abc:d:')
+ >>> optlist
+ [('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')]
+ >>> args
+ ['a1', 'a2']
+ Using long option names is equally easy:
- >>> s = '--condition=foo --testing --output-file abc.def -x a1 a2'
- >>> args = s.split()
- >>> args
- ['--condition=foo', '--testing', '--output-file', 'abc.def', '-x', 'a1', 'a2']
- >>> optlist, args = getopt.getopt(args, 'x', [
- ... 'condition=', 'output-file=', 'testing'])
- >>> optlist
- [('--condition', 'foo'), ('--testing', ''), ('--output-file', 'abc.def'), ('-x', '')]
- >>> args
- ['a1', 'a2']
- """
- import types
- m = types.ModuleType("libreftest", s)
- run_doctest(m, verbose)
+ >>> s = '--condition=foo --testing --output-file abc.def -x a1 a2'
+ >>> args = s.split()
+ >>> args
+ ['--condition=foo', '--testing', '--output-file', 'abc.def', '-x', 'a1', 'a2']
+ >>> optlist, args = getopt.getopt(args, 'x', [
+ ... 'condition=', 'output-file=', 'testing'])
+ >>> optlist
+ [('--condition', 'foo'), ('--testing', ''), ('--output-file', 'abc.def'), ('-x', '')]
+ >>> args
+ ['a1', 'a2']
+ """
+
+def load_tests(loader, tests, pattern):
+ tests.addTest(doctest.DocTestSuite())
+ return tests
- def test_issue4629(self):
- longopts, shortopts = getopt.getopt(['--help='], '', ['help='])
- self.assertEqual(longopts, [('--help', '')])
- longopts, shortopts = getopt.getopt(['--help=x'], '', ['help='])
- self.assertEqual(longopts, [('--help', 'x')])
- self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help'])
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_grammar.py b/Lib/test/test_grammar.py
index 8507a07e498532..8501006b799262 100644
--- a/Lib/test/test_grammar.py
+++ b/Lib/test/test_grammar.py
@@ -236,6 +236,10 @@ def check(test, error=False):
check(f"[{num}for x in ()]")
check(f"{num}spam", error=True)
+ # gh-88943: Invalid non-ASCII character following a numerical literal.
+ with self.assertRaisesRegex(SyntaxError, r"invalid character '⁄' \(U\+2044\)"):
+ compile(f"{num}⁄7", "", "eval")
+
with self.assertWarnsRegex(SyntaxWarning, r'invalid \w+ literal'):
compile(f"{num}is x", "", "eval")
with warnings.catch_warnings():
@@ -350,6 +354,11 @@ def test_var_annot_syntax_errors(self):
check_syntax_error(self, "x: int: str")
check_syntax_error(self, "def f():\n"
" nonlocal x: int\n")
+ check_syntax_error(self, "def f():\n"
+ " global x: int\n")
+ check_syntax_error(self, "x: int = y = 1")
+ check_syntax_error(self, "z = w: int = 1")
+ check_syntax_error(self, "x: int = y: int = 1")
# AST pass
check_syntax_error(self, "[x, 0]: int\n")
check_syntax_error(self, "f(): int\n")
@@ -363,6 +372,12 @@ def test_var_annot_syntax_errors(self):
check_syntax_error(self, "def f():\n"
" global x\n"
" x: int\n")
+ check_syntax_error(self, "def f():\n"
+ " x: int\n"
+ " nonlocal x\n")
+ check_syntax_error(self, "def f():\n"
+ " nonlocal x\n"
+ " x: int\n")
def test_var_annot_basic_semantics(self):
# execution order
diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
index fe8105ee2bb3fa..5d5832b62b2f94 100644
--- a/Lib/test/test_httplib.py
+++ b/Lib/test/test_httplib.py
@@ -21,11 +21,13 @@
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
-CERT_localhost = os.path.join(here, 'keycert.pem')
+CERT_localhost = os.path.join(here, 'certdata', 'keycert.pem')
# Self-signed cert file for 'fakehostname'
-CERT_fakehostname = os.path.join(here, 'keycert2.pem')
+CERT_fakehostname = os.path.join(here, 'certdata', 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
-CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
+CERT_selfsigned_pythontestdotnet = os.path.join(
+ here, 'certdata', 'selfsigned_pythontestdotnet.pem',
+)
# constants for testing chunked encoding
chunked_start = (
@@ -1954,6 +1956,7 @@ def test_networked_good_cert(self):
h.close()
self.assertIn('nginx', server_string)
+ @support.requires_resource('walltime')
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py
index cfd8a101dcc1c1..9fa6ecf9c08e27 100644
--- a/Lib/test/test_httpservers.py
+++ b/Lib/test/test_httpservers.py
@@ -699,11 +699,20 @@ def test_html_escape_filename(self):
"This test can't be run reliably as root (issue #13308).")
class CGIHTTPServerTestCase(BaseTestCase):
class request_handler(NoLogRequestHandler, CGIHTTPRequestHandler):
- pass
+ _test_case_self = None # populated by each setUp() method call.
+
+ def __init__(self, *args, **kwargs):
+ with self._test_case_self.assertWarnsRegex(
+ DeprecationWarning,
+ r'http\.server\.CGIHTTPRequestHandler'):
+ # This context also happens to catch and silence the
+ # threading DeprecationWarning from os.fork().
+ super().__init__(*args, **kwargs)
linesep = os.linesep.encode('ascii')
def setUp(self):
+ self.request_handler._test_case_self = self # practical, but yuck.
BaseTestCase.setUp(self)
self.cwd = os.getcwd()
self.parent_dir = tempfile.mkdtemp()
@@ -780,6 +789,7 @@ def setUp(self):
os.chdir(self.parent_dir)
def tearDown(self):
+ self.request_handler._test_case_self = None
try:
os.chdir(self.cwd)
if self._pythonexe_symlink:
diff --git a/Lib/test/test_imaplib.py b/Lib/test/test_imaplib.py
index 2b2f1f76d26db3..b97474acca370f 100644
--- a/Lib/test/test_imaplib.py
+++ b/Lib/test/test_imaplib.py
@@ -10,7 +10,7 @@
import threading
import socket
-from test.support import verbose, run_with_tz, run_with_locale, cpython_only
+from test.support import verbose, run_with_tz, run_with_locale, cpython_only, requires_resource
from test.support import hashlib_helper
from test.support import threading_helper
import unittest
@@ -23,8 +23,8 @@
support.requires_working_socket(module=True)
-CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
-CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
+CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "certdata", "keycert3.pem")
+CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "certdata", "pycacert.pem")
class TestImaplib(unittest.TestCase):
@@ -74,6 +74,7 @@ def test_that_Time2Internaldate_returns_a_result(self):
for t in self.timevalues():
imaplib.Time2Internaldate(t)
+ @socket_helper.skip_if_tcp_blackhole
def test_imap4_host_default_value(self):
# Check whether the IMAP4_PORT is truly unavailable.
with socket.socket() as s:
@@ -456,6 +457,7 @@ def test_simple_with_statement(self):
with self.imap_class(*server.server_address):
pass
+ @requires_resource('walltime')
def test_imaplib_timeout_test(self):
_, server = self._setup(SimpleIMAPHandler)
addr = server.server_address[1]
@@ -549,6 +551,7 @@ class NewIMAPSSLTests(NewIMAPTestsMixin, unittest.TestCase):
imap_class = IMAP4_SSL
server_class = SecureTCPServer
+ @requires_resource('walltime')
def test_ssl_raises(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ssl_context.verify_mode, ssl.CERT_REQUIRED)
@@ -563,6 +566,7 @@ def test_ssl_raises(self):
ssl_context=ssl_context)
client.shutdown()
+ @requires_resource('walltime')
def test_ssl_verified(self):
ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.load_verify_locations(CAFILE)
diff --git a/Lib/test/test_import/__init__.py b/Lib/test/test_import/__init__.py
index 33bce779f6cc01..48553f9d48b010 100644
--- a/Lib/test/test_import/__init__.py
+++ b/Lib/test/test_import/__init__.py
@@ -22,7 +22,6 @@
import types
import unittest
from unittest import mock
-import _testinternalcapi
import _imp
from test.support import os_helper
@@ -30,9 +29,10 @@
STDLIB_DIR, swap_attr, swap_item, cpython_only, is_emscripten,
is_wasi, run_in_subinterp, run_in_subinterp_with_config, Py_TRACE_REFS)
from test.support.import_helper import (
- forget, make_legacy_pyc, unlink, unload, DirsOnSysPath, CleanImport)
+ forget, make_legacy_pyc, unlink, unload, ready_to_import,
+ DirsOnSysPath, CleanImport)
from test.support.os_helper import (
- TESTFN, rmtree, temp_umask, TESTFN_UNENCODABLE, temp_dir)
+ TESTFN, rmtree, temp_umask, TESTFN_UNENCODABLE)
from test.support import script_helper
from test.support import threading_helper
from test.test_importlib.util import uncache
@@ -49,6 +49,10 @@
import _xxsubinterpreters as _interpreters
except ModuleNotFoundError:
_interpreters = None
+try:
+ import _testinternalcapi
+except ImportError:
+ _testinternalcapi = None
skip_if_dont_write_bytecode = unittest.skipIf(
@@ -125,27 +129,6 @@ def wrapper(self):
return deco
-@contextlib.contextmanager
-def _ready_to_import(name=None, source=""):
- # sets up a temporary directory and removes it
- # creates the module file
- # temporarily clears the module from sys.modules (if any)
- # reverts or removes the module when cleaning up
- name = name or "spam"
- with temp_dir() as tempdir:
- path = script_helper.make_script(tempdir, name, source)
- old_module = sys.modules.pop(name, None)
- try:
- sys.path.insert(0, tempdir)
- yield name, path
- sys.path.remove(tempdir)
- finally:
- if old_module is not None:
- sys.modules[name] = old_module
- elif name in sys.modules:
- del sys.modules[name]
-
-
if _testsinglephase is not None:
def restore__testsinglephase(*, _orig=_testsinglephase):
# We started with the module imported and want to restore
@@ -401,7 +384,7 @@ def test_from_import_missing_attr_path_is_canonical(self):
def test_from_import_star_invalid_type(self):
import re
- with _ready_to_import() as (name, path):
+ with ready_to_import() as (name, path):
with open(path, 'w', encoding='utf-8') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
@@ -410,7 +393,7 @@ def test_from_import_star_invalid_type(self):
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
- with _ready_to_import() as (name, path):
+ with ready_to_import() as (name, path):
with open(path, 'w', encoding='utf-8') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
@@ -818,7 +801,7 @@ class FilePermissionTests(unittest.TestCase):
)
def test_creation_mode(self):
mask = 0o022
- with temp_umask(mask), _ready_to_import() as (name, path):
+ with temp_umask(mask), ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
@@ -837,7 +820,7 @@ def test_creation_mode(self):
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
- with temp_umask(0o022), _ready_to_import() as (name, path):
+ with temp_umask(0o022), ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
@@ -853,7 +836,7 @@ def test_cached_mode_issue_2051(self):
@os_helper.skip_unless_working_chmod
def test_cached_readonly(self):
mode = 0o400
- with temp_umask(0o022), _ready_to_import() as (name, path):
+ with temp_umask(0o022), ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
@@ -868,7 +851,7 @@ def test_cached_readonly(self):
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
- with _ready_to_import() as (name, path):
+ with ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w', encoding='utf-8') as f:
f.write("x = 'original'\n")
diff --git a/Lib/test/test_importlib/import_/test_packages.py b/Lib/test/test_importlib/import_/test_packages.py
index eb0831f7d6d54b..0c29d6083265fa 100644
--- a/Lib/test/test_importlib/import_/test_packages.py
+++ b/Lib/test/test_importlib/import_/test_packages.py
@@ -1,7 +1,6 @@
from test.test_importlib import util
import sys
import unittest
-from test import support
from test.support import import_helper
diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py
index 78ef817906b2aa..f9bd632a01ed2e 100644
--- a/Lib/test/test_inspect.py
+++ b/Lib/test/test_inspect.py
@@ -1,6 +1,7 @@
import asyncio
import builtins
import collections
+import copy
import datetime
import functools
import importlib
@@ -32,7 +33,7 @@
from test.support import cpython_only
from test.support import MISSING_C_DOCSTRINGS, ALWAYS_EQ
-from test.support.import_helper import DirsOnSysPath
+from test.support.import_helper import DirsOnSysPath, ready_to_import
from test.support.os_helper import TESTFN
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import inspect_fodder as mod
@@ -42,8 +43,6 @@
from test import inspect_stringized_annotations
from test import inspect_stringized_annotations_2
-from test.test_import import _ready_to_import
-
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
@@ -3830,6 +3829,28 @@ def test(a_po, /, *, b, **kwargs):
P('bar', P.VAR_POSITIONAL)])),
'(foo, /, *bar)')
+ def test_signature_replace_parameters(self):
+ def test(a, b) -> 42:
+ pass
+
+ sig = inspect.signature(test)
+ parameters = sig.parameters
+ sig = sig.replace(parameters=list(parameters.values())[1:])
+ self.assertEqual(list(sig.parameters), ['b'])
+ self.assertEqual(sig.parameters['b'], parameters['b'])
+ self.assertEqual(sig.return_annotation, 42)
+ sig = sig.replace(parameters=())
+ self.assertEqual(dict(sig.parameters), {})
+
+ sig = inspect.signature(test)
+ parameters = sig.parameters
+ sig = copy.replace(sig, parameters=list(parameters.values())[1:])
+ self.assertEqual(list(sig.parameters), ['b'])
+ self.assertEqual(sig.parameters['b'], parameters['b'])
+ self.assertEqual(sig.return_annotation, 42)
+ sig = copy.replace(sig, parameters=())
+ self.assertEqual(dict(sig.parameters), {})
+
def test_signature_replace_anno(self):
def test() -> 42:
pass
@@ -3843,6 +3864,15 @@ def test() -> 42:
self.assertEqual(sig.return_annotation, 42)
self.assertEqual(sig, inspect.signature(test))
+ sig = inspect.signature(test)
+ sig = copy.replace(sig, return_annotation=None)
+ self.assertIs(sig.return_annotation, None)
+ sig = copy.replace(sig, return_annotation=sig.empty)
+ self.assertIs(sig.return_annotation, sig.empty)
+ sig = copy.replace(sig, return_annotation=42)
+ self.assertEqual(sig.return_annotation, 42)
+ self.assertEqual(sig, inspect.signature(test))
+
def test_signature_replaced(self):
def test():
pass
@@ -4187,41 +4217,66 @@ def test_signature_parameter_replace(self):
p = inspect.Parameter('foo', default=42,
kind=inspect.Parameter.KEYWORD_ONLY)
- self.assertIsNot(p, p.replace())
- self.assertEqual(p, p.replace())
+ self.assertIsNot(p.replace(), p)
+ self.assertEqual(p.replace(), p)
+ self.assertIsNot(copy.replace(p), p)
+ self.assertEqual(copy.replace(p), p)
p2 = p.replace(annotation=1)
self.assertEqual(p2.annotation, 1)
p2 = p2.replace(annotation=p2.empty)
- self.assertEqual(p, p2)
+ self.assertEqual(p2, p)
+ p3 = copy.replace(p, annotation=1)
+ self.assertEqual(p3.annotation, 1)
+ p3 = copy.replace(p3, annotation=p3.empty)
+ self.assertEqual(p3, p)
p2 = p2.replace(name='bar')
self.assertEqual(p2.name, 'bar')
self.assertNotEqual(p2, p)
+ p3 = copy.replace(p3, name='bar')
+ self.assertEqual(p3.name, 'bar')
+ self.assertNotEqual(p3, p)
with self.assertRaisesRegex(ValueError,
'name is a required attribute'):
p2 = p2.replace(name=p2.empty)
+ with self.assertRaisesRegex(ValueError,
+ 'name is a required attribute'):
+ p3 = copy.replace(p3, name=p3.empty)
p2 = p2.replace(name='foo', default=None)
self.assertIs(p2.default, None)
self.assertNotEqual(p2, p)
+ p3 = copy.replace(p3, name='foo', default=None)
+ self.assertIs(p3.default, None)
+ self.assertNotEqual(p3, p)
p2 = p2.replace(name='foo', default=p2.empty)
self.assertIs(p2.default, p2.empty)
-
+ p3 = copy.replace(p3, name='foo', default=p3.empty)
+ self.assertIs(p3.default, p3.empty)
p2 = p2.replace(default=42, kind=p2.POSITIONAL_OR_KEYWORD)
self.assertEqual(p2.kind, p2.POSITIONAL_OR_KEYWORD)
self.assertNotEqual(p2, p)
+ p3 = copy.replace(p3, default=42, kind=p3.POSITIONAL_OR_KEYWORD)
+ self.assertEqual(p3.kind, p3.POSITIONAL_OR_KEYWORD)
+ self.assertNotEqual(p3, p)
with self.assertRaisesRegex(ValueError,
"value "
"is not a valid Parameter.kind"):
p2 = p2.replace(kind=p2.empty)
+ with self.assertRaisesRegex(ValueError,
+ "value "
+ "is not a valid Parameter.kind"):
+ p3 = copy.replace(p3, kind=p3.empty)
p2 = p2.replace(kind=p2.KEYWORD_ONLY)
self.assertEqual(p2, p)
+ p3 = copy.replace(p3, kind=p3.KEYWORD_ONLY)
+ self.assertEqual(p3, p)
def test_signature_parameter_positional_only(self):
with self.assertRaisesRegex(TypeError, 'name must be a str'):
@@ -4897,7 +4952,7 @@ def assertInspectEqual(self, path, source):
def test_getsource_reload(self):
# see issue 1218234
- with _ready_to_import('reload_bug', self.src_before) as (name, path):
+ with ready_to_import('reload_bug', self.src_before) as (name, path):
module = importlib.import_module(name)
self.assertInspectEqual(path, module)
with open(path, 'w', encoding='utf-8') as src:
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index 26ae40d93c84eb..022cf21a4709a2 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -1541,8 +1541,8 @@ def test_read_all(self):
self.assertEqual(b"abcdefg", bufio.read())
- @support.requires_resource('cpu')
@threading_helper.requires_working_threading()
+ @support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
@@ -1930,8 +1930,8 @@ def test_truncate_after_write(self):
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
- @support.requires_resource('cpu')
@threading_helper.requires_working_threading()
+ @support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
@@ -4468,10 +4468,12 @@ def run():
self.assertFalse(err.strip('.!'))
@threading_helper.requires_working_threading()
+ @support.requires_resource('walltime')
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
@threading_helper.requires_working_threading()
+ @support.requires_resource('walltime')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
@@ -4645,11 +4647,13 @@ def alarm_handler(sig, frame):
os.close(r)
@requires_alarm
+ @support.requires_resource('walltime')
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
@requires_alarm
+ @support.requires_resource('walltime')
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r", encoding="latin1")
@@ -4723,10 +4727,12 @@ def alarm2(sig, frame):
raise
@requires_alarm
+ @support.requires_resource('walltime')
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@requires_alarm
+ @support.requires_resource('walltime')
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
diff --git a/Lib/test/test_listcomps.py b/Lib/test/test_listcomps.py
index bedd99b4a44fcb..12f7bbd123b30c 100644
--- a/Lib/test/test_listcomps.py
+++ b/Lib/test/test_listcomps.py
@@ -125,7 +125,7 @@ def get_output(moddict, name):
self.assertIs(type(e), raises)
else:
for k, v in (outputs or {}).items():
- self.assertEqual(get_output(newns, k), v)
+ self.assertEqual(get_output(newns, k), v, k)
def test_lambdas_with_iteration_var_as_default(self):
code = """
@@ -563,28 +563,38 @@ def test_iter_var_available_in_locals(self):
def test_comp_in_try_except(self):
template = """
- value = ["a"]
+ value = ["ab"]
+ result = snapshot = None
try:
- [{func}(value) for value in value]
+ result = [{func}(value) for value in value]
except:
- pass
+ snapshot = value
+ raise
"""
- for func in ["str", "int"]:
- code = template.format(func=func)
- raises = func != "str"
- with self.subTest(raises=raises):
- self._check_in_scopes(code, {"value": ["a"]})
+ # No exception.
+ code = template.format(func='len')
+ self._check_in_scopes(code, {"value": ["ab"], "result": [2], "snapshot": None})
+ # Handles exception.
+ code = template.format(func='int')
+ self._check_in_scopes(code, {"value": ["ab"], "result": None, "snapshot": ["ab"]},
+ raises=ValueError)
def test_comp_in_try_finally(self):
- code = """
- def f(value):
- try:
- [{func}(value) for value in value]
- finally:
- return value
- ret = f(["a"])
- """
- self._check_in_scopes(code, {"ret": ["a"]})
+ template = """
+ value = ["ab"]
+ result = snapshot = None
+ try:
+ result = [{func}(value) for value in value]
+ finally:
+ snapshot = value
+ """
+ # No exception.
+ code = template.format(func='len')
+ self._check_in_scopes(code, {"value": ["ab"], "result": [2], "snapshot": ["ab"]})
+ # Handles exception.
+ code = template.format(func='int')
+ self._check_in_scopes(code, {"value": ["ab"], "result": None, "snapshot": ["ab"]},
+ raises=ValueError)
def test_exception_in_post_comp_call(self):
code = """
@@ -596,6 +606,13 @@ def test_exception_in_post_comp_call(self):
"""
self._check_in_scopes(code, {"value": [1, None]})
+ def test_frame_locals(self):
+ code = """
+ val = [sys._getframe().f_locals for a in [0]][0]["a"]
+ """
+ import sys
+ self._check_in_scopes(code, {"val": 0}, ns={"sys": sys})
+
__test__ = {'doctests' : doctests}
diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py
index c2e8ff5d463607..375f65f9d16182 100644
--- a/Lib/test/test_logging.py
+++ b/Lib/test/test_logging.py
@@ -680,6 +680,7 @@ def test_path_objects(self):
support.is_emscripten, "Emscripten cannot fstat unlinked files."
)
@threading_helper.requires_working_threading()
+ @support.requires_resource('walltime')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
@@ -2169,7 +2170,7 @@ def test_output(self):
sslctx = None
else:
here = os.path.dirname(__file__)
- localhost_cert = os.path.join(here, "keycert.pem")
+ localhost_cert = os.path.join(here, "certdata", "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
diff --git a/Lib/test/test_math.py b/Lib/test/test_math.py
index 2bda61012164d1..d5d2197c36b254 100644
--- a/Lib/test/test_math.py
+++ b/Lib/test/test_math.py
@@ -33,8 +33,8 @@
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
-math_testcases = os.path.join(test_dir, 'math_testcases.txt')
-test_file = os.path.join(test_dir, 'cmath_testcases.txt')
+math_testcases = os.path.join(test_dir, 'mathdata', 'math_testcases.txt')
+test_file = os.path.join(test_dir, 'mathdata', 'cmath_testcases.txt')
def to_ulps(x):
@@ -235,6 +235,10 @@ def __init__(self, value):
def __index__(self):
return self.value
+class BadDescr:
+ def __get__(self, obj, objtype=None):
+ raise ValueError
+
class MathTests(unittest.TestCase):
def ftest(self, name, got, expected, ulp_tol=5, abs_tol=0.0):
@@ -324,6 +328,7 @@ def testAtan2(self):
self.ftest('atan2(0, 1)', math.atan2(0, 1), 0)
self.ftest('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
self.ftest('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
+ self.ftest('atan2(1, -1)', math.atan2(1, -1), 3*math.pi/4)
# math.atan2(0, x)
self.ftest('atan2(0., -inf)', math.atan2(0., NINF), math.pi)
@@ -417,16 +422,22 @@ def __ceil__(self):
return 42
class TestNoCeil:
pass
+ class TestBadCeil:
+ __ceil__ = BadDescr()
self.assertEqual(math.ceil(TestCeil()), 42)
self.assertEqual(math.ceil(FloatCeil()), 42)
self.assertEqual(math.ceil(FloatLike(42.5)), 43)
self.assertRaises(TypeError, math.ceil, TestNoCeil())
+ self.assertRaises(ValueError, math.ceil, TestBadCeil())
t = TestNoCeil()
t.__ceil__ = lambda *args: args
self.assertRaises(TypeError, math.ceil, t)
self.assertRaises(TypeError, math.ceil, t, 0)
+ self.assertEqual(math.ceil(FloatLike(+1.0)), +1.0)
+ self.assertEqual(math.ceil(FloatLike(-1.0)), -1.0)
+
@requires_IEEE_754
def testCopysign(self):
self.assertEqual(math.copysign(1, 42), 1.0)
@@ -567,16 +578,22 @@ def __floor__(self):
return 42
class TestNoFloor:
pass
+ class TestBadFloor:
+ __floor__ = BadDescr()
self.assertEqual(math.floor(TestFloor()), 42)
self.assertEqual(math.floor(FloatFloor()), 42)
self.assertEqual(math.floor(FloatLike(41.9)), 41)
self.assertRaises(TypeError, math.floor, TestNoFloor())
+ self.assertRaises(ValueError, math.floor, TestBadFloor())
t = TestNoFloor()
t.__floor__ = lambda *args: args
self.assertRaises(TypeError, math.floor, t)
self.assertRaises(TypeError, math.floor, t, 0)
+ self.assertEqual(math.floor(FloatLike(+1.0)), +1.0)
+ self.assertEqual(math.floor(FloatLike(-1.0)), -1.0)
+
def testFmod(self):
self.assertRaises(TypeError, math.fmod)
self.ftest('fmod(10, 1)', math.fmod(10, 1), 0.0)
@@ -598,6 +615,7 @@ def testFmod(self):
self.assertEqual(math.fmod(-3.0, NINF), -3.0)
self.assertEqual(math.fmod(0.0, 3.0), 0.0)
self.assertEqual(math.fmod(0.0, NINF), 0.0)
+ self.assertRaises(ValueError, math.fmod, INF, INF)
def testFrexp(self):
self.assertRaises(TypeError, math.frexp)
@@ -714,6 +732,11 @@ def msum(iterable):
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals))
+ self.assertEqual(math.fsum([1.0, math.inf]), math.inf)
+ self.assertRaises(OverflowError, math.fsum, [1e+308, 1e+308])
+ self.assertRaises(ValueError, math.fsum, [math.inf, -math.inf])
+ self.assertRaises(TypeError, math.fsum, ['spam'])
+
def testGcd(self):
gcd = math.gcd
self.assertEqual(gcd(0, 0), 0)
@@ -831,6 +854,8 @@ def testHypot(self):
scale = FLOAT_MIN / 2.0 ** exp
self.assertEqual(math.hypot(4*scale, 3*scale), 5*scale)
+ self.assertRaises(TypeError, math.hypot, *([1.0]*18), 'spam')
+
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"hypot() loses accuracy on machines with double rounding")
@@ -966,6 +991,8 @@ class T(tuple):
dist((1, 2, 3, 4), (5, 6, 7))
with self.assertRaises(ValueError): # Check dimension agree
dist((1, 2, 3), (4, 5, 6, 7))
+ with self.assertRaises(TypeError):
+ dist((1,)*17 + ("spam",), (1,)*18)
with self.assertRaises(TypeError): # Rejects invalid types
dist("abc", "xyz")
int_too_big_for_float = 10 ** (sys.float_info.max_10_exp + 5)
@@ -973,6 +1000,10 @@ class T(tuple):
dist((1, int_too_big_for_float), (2, 3))
with self.assertRaises((ValueError, OverflowError)):
dist((2, 3), (1, int_too_big_for_float))
+ with self.assertRaises(TypeError):
+ dist((1,), 2)
+ with self.assertRaises(TypeError):
+ dist([1], 2)
# Verify that the one dimensional case is equivalent to abs()
for i in range(20):
@@ -1111,6 +1142,7 @@ def test_lcm(self):
def testLdexp(self):
self.assertRaises(TypeError, math.ldexp)
+ self.assertRaises(TypeError, math.ldexp, 2.0, 1.1)
self.ftest('ldexp(0,1)', math.ldexp(0,1), 0)
self.ftest('ldexp(1,1)', math.ldexp(1,1), 2)
self.ftest('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
@@ -1153,6 +1185,7 @@ def testLog(self):
2302.5850929940457)
self.assertRaises(ValueError, math.log, -1.5)
self.assertRaises(ValueError, math.log, -10**1000)
+ self.assertRaises(ValueError, math.log, 10, -10)
self.assertRaises(ValueError, math.log, NINF)
self.assertEqual(math.log(INF), INF)
self.assertTrue(math.isnan(math.log(NAN)))
@@ -2378,6 +2411,14 @@ def __float__(self):
# argument to a float.
self.assertFalse(getattr(y, "converted", False))
+ def test_input_exceptions(self):
+ self.assertRaises(TypeError, math.exp, "spam")
+ self.assertRaises(TypeError, math.erf, "spam")
+ self.assertRaises(TypeError, math.atan2, "spam", 1.0)
+ self.assertRaises(TypeError, math.atan2, 1.0, "spam")
+ self.assertRaises(TypeError, math.atan2, 1.0)
+ self.assertRaises(TypeError, math.atan2, 1.0, 2.0, 3.0)
+
# Custom assertions.
def assertIsNaN(self, value):
@@ -2518,7 +2559,7 @@ def test_fractions(self):
def load_tests(loader, tests, pattern):
from doctest import DocFileSuite
- tests.addTest(DocFileSuite("ieee754.txt"))
+ tests.addTest(DocFileSuite(os.path.join("mathdata", "ieee754.txt")))
return tests
if __name__ == '__main__':
diff --git a/Lib/test/test_monitoring.py b/Lib/test/test_monitoring.py
index 845185be737eb2..2100d998ff0808 100644
--- a/Lib/test/test_monitoring.py
+++ b/Lib/test/test_monitoring.py
@@ -501,6 +501,22 @@ def test_two_with_disable(self):
self.assertEqual(sys.monitoring._all_events(), {})
sys.monitoring.restart_events()
+ def test_with_instruction_event(self):
+ """Test that the second tool can set events with instruction events set by the first tool."""
+ def f():
+ pass
+ code = f.__code__
+
+ try:
+ self.assertEqual(sys.monitoring._all_events(), {})
+ sys.monitoring.set_local_events(TEST_TOOL, code, E.INSTRUCTION | E.LINE)
+ sys.monitoring.set_local_events(TEST_TOOL2, code, E.LINE)
+ finally:
+ sys.monitoring.set_events(TEST_TOOL, 0)
+ sys.monitoring.set_events(TEST_TOOL2, 0)
+ self.assertEqual(sys.monitoring._all_events(), {})
+
+
class LineMonitoringTest(MonitoringTestBase, unittest.TestCase):
def test_lines_single(self):
@@ -1152,6 +1168,23 @@ def func1():
('instruction', 'func1', 14),
('line', 'get_events', 11)])
+ def test_turn_off_only_instruction(self):
+ """
+ LINE events should be recorded after INSTRUCTION event is turned off
+ """
+ events = []
+ def line(*args):
+ events.append("line")
+ sys.monitoring.set_events(TEST_TOOL, 0)
+ sys.monitoring.register_callback(TEST_TOOL, E.LINE, line)
+ sys.monitoring.register_callback(TEST_TOOL, E.INSTRUCTION, lambda *args: None)
+ sys.monitoring.set_events(TEST_TOOL, E.LINE | E.INSTRUCTION)
+ sys.monitoring.set_events(TEST_TOOL, E.LINE)
+ events = []
+ a = 0
+ sys.monitoring.set_events(TEST_TOOL, 0)
+ self.assertGreater(len(events), 0)
+
class TestInstallIncrementallly(MonitoringTestBase, unittest.TestCase):
def check_events(self, func, must_include, tool=TEST_TOOL, recorders=(ExceptionRecorder,)):
@@ -1218,9 +1251,11 @@ def test_instruction_then_line(self):
self.check_events(self.func2,
recorders = recorders, must_include = self.MUST_INCLUDE_CI)
+LOCAL_RECORDERS = CallRecorder, LineRecorder, CReturnRecorder, CRaiseRecorder
+
class TestLocalEvents(MonitoringTestBase, unittest.TestCase):
- def check_events(self, func, expected, tool=TEST_TOOL, recorders=(ExceptionRecorder,)):
+ def check_events(self, func, expected, tool=TEST_TOOL, recorders=()):
try:
self.assertEqual(sys.monitoring._all_events(), {})
event_list = []
@@ -1248,7 +1283,7 @@ def func1():
line2 = 2
line3 = 3
- self.check_events(func1, recorders = MANY_RECORDERS, expected = [
+ self.check_events(func1, recorders = LOCAL_RECORDERS, expected = [
('line', 'func1', 1),
('line', 'func1', 2),
('line', 'func1', 3)])
@@ -1260,7 +1295,7 @@ def func2():
[].append(2)
line3 = 3
- self.check_events(func2, recorders = MANY_RECORDERS, expected = [
+ self.check_events(func2, recorders = LOCAL_RECORDERS, expected = [
('line', 'func2', 1),
('line', 'func2', 2),
('call', 'append', [2]),
@@ -1277,15 +1312,17 @@ def func3():
line = 5
line = 6
- self.check_events(func3, recorders = MANY_RECORDERS, expected = [
+ self.check_events(func3, recorders = LOCAL_RECORDERS, expected = [
('line', 'func3', 1),
('line', 'func3', 2),
('line', 'func3', 3),
- ('raise', KeyError),
('line', 'func3', 4),
('line', 'func3', 5),
('line', 'func3', 6)])
+ def test_set_non_local_event(self):
+ with self.assertRaises(ValueError):
+ sys.monitoring.set_local_events(TEST_TOOL, just_call.__code__, E.RAISE)
def line_from_offset(code, offset):
for start, end, line in code.co_lines():
@@ -1344,10 +1381,10 @@ def func():
self.check_events(func, recorders = JUMP_AND_BRANCH_RECORDERS, expected = [
('branch', 'func', 2, 2),
- ('branch', 'func', 3, 6),
+ ('branch', 'func', 3, 4),
('jump', 'func', 6, 2),
('branch', 'func', 2, 2),
- ('branch', 'func', 3, 4),
+ ('branch', 'func', 3, 3),
('jump', 'func', 4, 2),
('branch', 'func', 2, 2)])
@@ -1357,13 +1394,13 @@ def func():
('line', 'func', 2),
('branch', 'func', 2, 2),
('line', 'func', 3),
- ('branch', 'func', 3, 6),
+ ('branch', 'func', 3, 4),
('line', 'func', 6),
('jump', 'func', 6, 2),
('line', 'func', 2),
('branch', 'func', 2, 2),
('line', 'func', 3),
- ('branch', 'func', 3, 4),
+ ('branch', 'func', 3, 3),
('line', 'func', 4),
('jump', 'func', 4, 2),
('line', 'func', 2),
@@ -1396,8 +1433,8 @@ def func():
('line', 'func', 5),
('line', 'meth', 1),
('jump', 'func', 5, 5),
- ('jump', 'func', 5, '[offset=112]'),
- ('branch', 'func', '[offset=118]', '[offset=120]'),
+ ('jump', 'func', 5, '[offset=114]'),
+ ('branch', 'func', '[offset=120]', '[offset=122]'),
('line', 'get_events', 11)])
self.check_events(func, recorders = FLOW_AND_LINE_RECORDERS, expected = [
@@ -1412,8 +1449,8 @@ def func():
('line', 'meth', 1),
('return', None),
('jump', 'func', 5, 5),
- ('jump', 'func', 5, '[offset=112]'),
- ('branch', 'func', '[offset=118]', '[offset=120]'),
+ ('jump', 'func', 5, '[offset=114]'),
+ ('branch', 'func', '[offset=120]', '[offset=122]'),
('return', None),
('line', 'get_events', 11)])
@@ -1698,3 +1735,56 @@ def run():
self.assertEqual(caught, "inner")
finally:
sys.monitoring.set_events(TEST_TOOL, 0)
+
+ def test_108390(self):
+
+ class Foo:
+ def __init__(self, set_event):
+ if set_event:
+ sys.monitoring.set_events(TEST_TOOL, E.PY_RESUME)
+
+ def make_foo_optimized_then_set_event():
+ for i in range(100):
+ Foo(i == 99)
+
+ try:
+ make_foo_optimized_then_set_event()
+ finally:
+ sys.monitoring.set_events(TEST_TOOL, 0)
+
+ def test_gh108976(self):
+ sys.monitoring.use_tool_id(0, "test")
+ self.addCleanup(sys.monitoring.free_tool_id, 0)
+ sys.monitoring.set_events(0, 0)
+ sys.monitoring.register_callback(0, E.LINE, lambda *args: sys.monitoring.set_events(0, 0))
+ sys.monitoring.register_callback(0, E.INSTRUCTION, lambda *args: 0)
+ sys.monitoring.set_events(0, E.LINE | E.INSTRUCTION)
+ sys.monitoring.set_events(0, 0)
+
+
+class TestOptimizer(MonitoringTestBase, unittest.TestCase):
+
+ def setUp(self):
+ import _testinternalcapi
+ self.old_opt = _testinternalcapi.get_optimizer()
+ opt = _testinternalcapi.get_counter_optimizer()
+ _testinternalcapi.set_optimizer(opt)
+ super(TestOptimizer, self).setUp()
+
+ def tearDown(self):
+ import _testinternalcapi
+ super(TestOptimizer, self).tearDown()
+ _testinternalcapi.set_optimizer(self.old_opt)
+
+ def test_for_loop(self):
+ def test_func(x):
+ i = 0
+ while i < x:
+ i += 1
+
+ code = test_func.__code__
+ sys.monitoring.set_local_events(TEST_TOOL, code, E.PY_START)
+ self.assertEqual(sys.monitoring.get_local_events(TEST_TOOL, code), E.PY_START)
+ test_func(1000)
+ sys.monitoring.set_local_events(TEST_TOOL, code, 0)
+ self.assertEqual(sys.monitoring.get_local_events(TEST_TOOL, code), 0)
diff --git a/Lib/test/test_msvcrt.py b/Lib/test/test_msvcrt.py
new file mode 100644
index 00000000000000..81ec13026014e6
--- /dev/null
+++ b/Lib/test/test_msvcrt.py
@@ -0,0 +1,109 @@
+import os
+import sys
+import unittest
+
+from test.support import os_helper
+from test.support.os_helper import TESTFN, TESTFN_ASCII
+
+if sys.platform != "win32":
+ raise unittest.SkipTest("windows related tests")
+
+import _winapi
+import msvcrt;
+
+from _testconsole import write_input, flush_console_input_buffer
+
+
+class TestFileOperations(unittest.TestCase):
+ def test_locking(self):
+ with open(TESTFN, "w") as f:
+ self.addCleanup(os_helper.unlink, TESTFN)
+
+ msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, 1)
+ self.assertRaises(OSError, msvcrt.locking, f.fileno(), msvcrt.LK_NBLCK, 1)
+
+ def test_unlockfile(self):
+ with open(TESTFN, "w") as f:
+ self.addCleanup(os_helper.unlink, TESTFN)
+
+ msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, 1)
+ msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, 1)
+ msvcrt.locking(f.fileno(), msvcrt.LK_LOCK, 1)
+
+ def test_setmode(self):
+ with open(TESTFN, "w") as f:
+ self.addCleanup(os_helper.unlink, TESTFN)
+
+ msvcrt.setmode(f.fileno(), os.O_BINARY)
+ msvcrt.setmode(f.fileno(), os.O_TEXT)
+
+ def test_open_osfhandle(self):
+ h = _winapi.CreateFile(TESTFN_ASCII, _winapi.GENERIC_WRITE, 0, 0, 1, 128, 0)
+ self.addCleanup(os_helper.unlink, TESTFN_ASCII)
+
+ try:
+ fd = msvcrt.open_osfhandle(h, os.O_RDONLY)
+ h = None
+ os.close(fd)
+ finally:
+ if h:
+ _winapi.CloseHandle(h)
+
+ def test_get_osfhandle(self):
+ with open(TESTFN, "w") as f:
+ self.addCleanup(os_helper.unlink, TESTFN)
+
+ msvcrt.get_osfhandle(f.fileno())
+
+
+c = '\u5b57' # unicode CJK char (meaning 'character') for 'wide-char' tests
+c_encoded = b'\x57\x5b' # utf-16-le (which windows internally used) encoded char for this CJK char
+
+
+class TestConsoleIO(unittest.TestCase):
+ def test_kbhit(self):
+ h = msvcrt.get_osfhandle(sys.stdin.fileno())
+ flush_console_input_buffer(h)
+ self.assertEqual(msvcrt.kbhit(), 0)
+
+ def test_getch(self):
+ msvcrt.ungetch(b'c')
+ self.assertEqual(msvcrt.getch(), b'c')
+
+ def test_getwch(self):
+ with open('CONIN$', 'rb', buffering=0) as stdin:
+ h = msvcrt.get_osfhandle(stdin.fileno())
+ flush_console_input_buffer(h)
+
+ write_input(stdin, c_encoded)
+ self.assertEqual(msvcrt.getwch(), c)
+
+ def test_getche(self):
+ msvcrt.ungetch(b'c')
+ self.assertEqual(msvcrt.getche(), b'c')
+
+ def test_getwche(self):
+ with open('CONIN$', 'rb', buffering=0) as stdin:
+ h = msvcrt.get_osfhandle(stdin.fileno())
+ flush_console_input_buffer(h)
+
+ write_input(stdin, c_encoded)
+ self.assertEqual(msvcrt.getwche(), c)
+
+ def test_putch(self):
+ msvcrt.putch(b'c')
+
+ def test_putwch(self):
+ msvcrt.putwch(c)
+
+
+class TestOther(unittest.TestCase):
+ def test_heap_min(self):
+ try:
+ msvcrt.heapmin()
+ except OSError:
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_netrc.py b/Lib/test/test_netrc.py
index 573d636de956d1..81e11a293cc4c8 100644
--- a/Lib/test/test_netrc.py
+++ b/Lib/test/test_netrc.py
@@ -1,5 +1,5 @@
import netrc, os, unittest, sys, textwrap
-from test.support import os_helper, run_unittest
+from test.support import os_helper
try:
import pwd
@@ -308,8 +308,6 @@ def test_security(self):
self.assertEqual(nrc.hosts['foo.domain.com'],
('anonymous', '', 'pass'))
-def test_main():
- run_unittest(NetrcTestCase)
if __name__ == "__main__":
- test_main()
+ unittest.main()
diff --git a/Lib/test/test_opcache.py b/Lib/test/test_opcache.py
index 692e03fbb5e084..2b2783d57be8f4 100644
--- a/Lib/test/test_opcache.py
+++ b/Lib/test/test_opcache.py
@@ -4,13 +4,17 @@
import threading
import types
import unittest
-from test.support import threading_helper
+from test.support import threading_helper, check_impl_detail
+
+# Skip this module on other interpreters, it is cpython specific:
+if check_impl_detail(cpython=False):
+ raise unittest.SkipTest('implementation detail specific to cpython')
+
import _testinternalcapi
def disabling_optimizer(func):
def wrapper(*args, **kwargs):
- import _testinternalcapi
old_opt = _testinternalcapi.get_optimizer()
_testinternalcapi.set_optimizer(None)
try:
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index 99e9ed213e5615..66aece2c4b3eb9 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -737,7 +737,7 @@ def test_access_denied(self):
# denied. See issue 28075.
# os.environ['TEMP'] should be located on a volume that
# supports file ACLs.
- fname = os.path.join(os.environ['TEMP'], self.fname)
+ fname = os.path.join(os.environ['TEMP'], self.fname + "_access")
self.addCleanup(os_helper.unlink, fname)
create_file(fname, b'ABC')
# Deny the right to [S]YNCHRONIZE on the file to
@@ -913,6 +913,13 @@ def set_time(filename):
os.utime(self.fname, None)
self._test_utime_current(set_time)
+ def test_utime_nonexistent(self):
+ now = time.time()
+ filename = 'nonexistent'
+ with self.assertRaises(FileNotFoundError) as cm:
+ os.utime(filename, (now, now))
+ self.assertEqual(cm.exception.filename, filename)
+
def get_file_system(self, path):
if sys.platform == 'win32':
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
diff --git a/Lib/test/test_pdb.py b/Lib/test/test_pdb.py
index 734b5c83cdff7d..8fed1d0f7162fd 100644
--- a/Lib/test/test_pdb.py
+++ b/Lib/test/test_pdb.py
@@ -664,8 +664,10 @@ def test_pdb_alias_command():
... o.method()
>>> with PdbTestInput([ # doctest: +ELLIPSIS
+ ... 'alias pi',
... 'alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = {%1.__dict__[k]}")',
... 'alias ps pi self',
+ ... 'alias ps',
... 'pi o',
... 's',
... 'ps',
@@ -674,8 +676,12 @@ def test_pdb_alias_command():
... test_function()
> (4)test_function()
-> o.method()
+ (Pdb) alias pi
+ *** Unknown alias 'pi'
(Pdb) alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = {%1.__dict__[k]}")
(Pdb) alias ps pi self
+ (Pdb) alias ps
+ ps = pi self
(Pdb) pi o
o.attr1 = 10
o.attr2 = str
@@ -848,9 +854,7 @@ def test_post_mortem_chained():
... try:
... test_function_reraise()
... except Exception as e:
- ... # same as pdb.post_mortem(e), but with custom pdb instance.
- ... instance.reset()
- ... instance.interaction(None, e)
+ ... pdb._post_mortem(e, instance)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'exceptions',
@@ -907,11 +911,18 @@ def test_post_mortem_chained():
def test_post_mortem_cause_no_context():
"""Test post mortem traceback debugging of chained exception
+ >>> def make_exc_with_stack(type_, *content, from_=None):
+ ... try:
+ ... raise type_(*content) from from_
+ ... except Exception as out:
+ ... return out
+ ...
+
>>> def main():
... try:
... raise ValueError('Context Not Shown')
... except Exception as e1:
- ... raise ValueError("With Cause") from TypeError('The Cause')
+ ... raise ValueError("With Cause") from make_exc_with_stack(TypeError,'The Cause')
>>> def test_function():
... import pdb;
@@ -919,12 +930,11 @@ def test_post_mortem_cause_no_context():
... try:
... main()
... except Exception as e:
- ... # same as pdb.post_mortem(e), but with custom pdb instance.
- ... instance.reset()
- ... instance.interaction(None, e)
+ ... pdb._post_mortem(e, instance)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'exceptions',
+ ... 'exceptions 0',
... 'exceptions 1',
... 'up',
... 'down',
@@ -934,20 +944,23 @@ def test_post_mortem_cause_no_context():
... test_function()
... except ValueError:
... print('Ok.')
- > (5)main()
- -> raise ValueError("With Cause") from TypeError('The Cause')
+ > (5)main()
+ -> raise ValueError("With Cause") from make_exc_with_stack(TypeError,'The Cause')
(Pdb) exceptions
- 0 TypeError('The Cause')
- > 1 ValueError('With Cause')
+ 0 TypeError('The Cause')
+ > 1 ValueError('With Cause')
+ (Pdb) exceptions 0
+ > (3)make_exc_with_stack()
+ -> raise type_(*content) from from_
(Pdb) exceptions 1
- > (5)main()
- -> raise ValueError("With Cause") from TypeError('The Cause')
+ > (5)main()
+ -> raise ValueError("With Cause") from make_exc_with_stack(TypeError,'The Cause')
(Pdb) up
- > (5)test_function()
+ > (5)test_function()
-> main()
(Pdb) down
- > (5)main()
- -> raise ValueError("With Cause") from TypeError('The Cause')
+ > (5)main()
+ -> raise ValueError("With Cause") from make_exc_with_stack(TypeError,'The Cause')
(Pdb) exit"""
@@ -971,9 +984,7 @@ def test_post_mortem_context_of_the_cause():
... try:
... main()
... except Exception as e:
- ... # same as pdb.post_mortem(e), but with custom pdb instance.
- ... instance.reset()
- ... instance.interaction(None, e)
+ ... pdb._post_mortem(e, instance)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'exceptions',
@@ -1046,9 +1057,7 @@ def test_post_mortem_from_none():
... try:
... main()
... except Exception as e:
- ... # same as pdb.post_mortem(e), but with custom pdb instance.
- ... instance.reset()
- ... instance.interaction(None, e)
+ ... pdb._post_mortem(e, instance)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'exceptions',
@@ -1066,6 +1075,64 @@ def test_post_mortem_from_none():
"""
+def test_post_mortem_from_no_stack():
+ """Test post mortem traceback debugging of chained exception
+
+ especially when one exception has no stack.
+
+ >>> def main():
+ ... raise Exception() from Exception()
+
+
+ >>> def test_function():
+ ... import pdb;
+ ... instance = pdb.Pdb(nosigint=True, readrc=False)
+ ... try:
+ ... main()
+ ... except Exception as e:
+ ... pdb._post_mortem(e, instance)
+
+ >>> with PdbTestInput( # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
+ ... ["exceptions",
+ ... "exceptions 0",
+ ... "exit"],
+ ... ):
+ ... try:
+ ... test_function()
+ ... except ValueError:
+ ... print('Correctly reraised.')
+ > (2)main()
+ -> raise Exception() from Exception()
+ (Pdb) exceptions
+ - Exception()
+ > 1 Exception()
+ (Pdb) exceptions 0
+ *** This exception does not have a traceback, cannot jump to it
+ (Pdb) exit
+ """
+
+
+def test_post_mortem_single_no_stack():
+ """Test post mortem called when origin exception has no stack
+
+
+ >>> def test_function():
+ ... import pdb;
+ ... instance = pdb.Pdb(nosigint=True, readrc=False)
+ ... import sys
+ ... sys.last_exc = Exception()
+ ... pdb._post_mortem(sys.last_exc, instance)
+
+ >>> with PdbTestInput( # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
+ ... []
+ ... ):
+ ... try:
+ ... test_function()
+ ... except ValueError as e:
+ ... print(e)
+ A valid traceback must be passed if no exception is being handled
+ """
+
def test_post_mortem_complex():
"""Test post mortem traceback debugging of chained exception
@@ -1130,9 +1197,7 @@ def test_post_mortem_complex():
... try:
... main()
... except Exception as e:
- ... # same as pdb.post_mortem(e), but with custom pdb instance.
- ... instance.reset()
- ... instance.interaction(None, e)
+ ... pdb._post_mortem(e, instance)
>>> with PdbTestInput( # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... ["exceptions",
@@ -1957,6 +2022,46 @@ def test_pdb_multiline_statement():
(Pdb) c
"""
+def test_pdb_show_attribute_and_item():
+ """Test for multiline statement
+
+ >>> def test_function():
+ ... n = lambda x: x
+ ... c = {"a": 1}
+ ... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
+ ... pass
+
+ >>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
+ ... 'c["a"]',
+ ... 'c.get("a")',
+ ... 'n(1)',
+ ... 'j=1',
+ ... 'j+1',
+ ... 'r"a"',
+ ... 'next(iter([1]))',
+ ... 'list((0, 1))',
+ ... 'c'
+ ... ]):
+ ... test_function()
+ > (5)test_function()
+ -> pass
+ (Pdb) c["a"]
+ 1
+ (Pdb) c.get("a")
+ 1
+ (Pdb) n(1)
+ 1
+ (Pdb) j=1
+ (Pdb) j+1
+ 2
+ (Pdb) r"a"
+ 'a'
+ (Pdb) next(iter([1]))
+ 1
+ (Pdb) list((0, 1))
+ [0, 1]
+ (Pdb) c
+ """
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
@@ -2198,6 +2303,24 @@ def test_pdb_issue_gh_101517():
(Pdb) continue
"""
+def test_pdb_issue_gh_108976():
+ """See GH-108976
+ Make sure setting f_trace_opcodes = True won't crash pdb
+ >>> def test_function():
+ ... import sys
+ ... sys._getframe().f_trace_opcodes = True
+ ... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
+ ... a = 1
+ >>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
+ ... 'continue'
+ ... ]):
+ ... test_function()
+ bdb.Bdb.dispatch: unknown debugging event: 'opcode'
+ > (5)test_function()
+ -> a = 1
+ (Pdb) continue
+ """
+
def test_pdb_ambiguous_statements():
"""See GH-104301
diff --git a/Lib/test/test_peg_generator/__init__.py b/Lib/test/test_peg_generator/__init__.py
index c23542e254c99f..b32db4426f251d 100644
--- a/Lib/test/test_peg_generator/__init__.py
+++ b/Lib/test/test_peg_generator/__init__.py
@@ -1,5 +1,4 @@
import os.path
-import unittest
from test import support
from test.support import load_package_tests
diff --git a/Lib/test/test_peg_generator/test_pegen.py b/Lib/test/test_peg_generator/test_pegen.py
index 3af2c0cf47d20a..86db767b99a228 100644
--- a/Lib/test/test_peg_generator/test_pegen.py
+++ b/Lib/test/test_peg_generator/test_pegen.py
@@ -42,6 +42,15 @@ def test_parse_grammar(self) -> None:
)
self.assertEqual(repr(rules["term"]), expected_repr)
+ def test_repeated_rules(self) -> None:
+ grammar_source = """
+ start: the_rule NEWLINE
+ the_rule: 'b' NEWLINE
+ the_rule: 'a' NEWLINE
+ """
+ with self.assertRaisesRegex(GrammarError, "Repeated rule 'the_rule'"):
+ parse_string(grammar_source, GrammarParser)
+
def test_long_rule_str(self) -> None:
grammar_source = """
start: zero | one | one zero | one one | one zero zero | one zero one | one one zero | one one one
diff --git a/Lib/test/test_pep646_syntax.py b/Lib/test/test_pep646_syntax.py
index 3ffa82dc55fa23..aac089b190bc11 100644
--- a/Lib/test/test_pep646_syntax.py
+++ b/Lib/test/test_pep646_syntax.py
@@ -1,3 +1,6 @@
+import doctest
+import unittest
+
doctests = """
Setup
@@ -317,10 +320,10 @@
__test__ = {'doctests' : doctests}
-def test_main(verbose=False):
- from test import support
- from test import test_pep646_syntax
- support.run_doctest(test_pep646_syntax, verbose)
+def load_tests(loader, tests, pattern):
+ tests.addTest(doctest.DocTestSuite())
+ return tests
+
if __name__ == "__main__":
- test_main(verbose=True)
+ unittest.main()
diff --git a/Lib/test/test_perf_profiler.py b/Lib/test/test_perf_profiler.py
index 5418f9f35485f8..fe8707a156e9dc 100644
--- a/Lib/test/test_perf_profiler.py
+++ b/Lib/test/test_perf_profiler.py
@@ -17,6 +17,11 @@
if not support.has_subprocess_support:
raise unittest.SkipTest("test module requires subprocess")
+if support.check_sanitizer(address=True, memory=True, ub=True):
+ # gh-109580: Skip the test because it does crash randomly if Python is
+ # built with ASAN.
+ raise unittest.SkipTest("test crash randomly on ASAN/MSAN/UBSAN build")
+
def supports_trampoline_profiling():
perf_trampoline = sysconfig.get_config_var("PY_HAVE_PERF_TRAMPOLINE")
@@ -287,7 +292,6 @@ def run_perf(cwd, *args, **env_vars):
@unittest.skipUnless(perf_command_works(), "perf command doesn't work")
@unittest.skipUnless(is_unwinding_reliable(), "Unwinding is unreliable")
-@support.skip_if_sanitizer(address=True, memory=True, ub=True)
class TestPerfProfiler(unittest.TestCase):
def setUp(self):
super().setUp()
diff --git a/Lib/test/test_poll.py b/Lib/test/test_poll.py
index 02165a0244ddf4..1847ae95db9292 100644
--- a/Lib/test/test_poll.py
+++ b/Lib/test/test_poll.py
@@ -8,7 +8,7 @@
import time
import unittest
from test.support import (
- cpython_only, requires_subprocess, requires_working_socket
+ cpython_only, requires_subprocess, requires_working_socket, requires_resource
)
from test.support import threading_helper
from test.support.os_helper import TESTFN
@@ -124,6 +124,7 @@ def fileno(self):
# select(), modified to use poll() instead.
@requires_subprocess()
+ @requires_resource('walltime')
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
diff --git a/Lib/test/test_poplib.py b/Lib/test/test_poplib.py
index fa41ba0b6e4637..869f9431b928bb 100644
--- a/Lib/test/test_poplib.py
+++ b/Lib/test/test_poplib.py
@@ -29,8 +29,8 @@
import ssl
SUPPORTS_SSL = True
- CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
- CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
+ CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "certdata", "keycert3.pem")
+ CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "certdata", "pycacert.pem")
requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported')
diff --git a/Lib/test/test_py_compile.py b/Lib/test/test_py_compile.py
index 5e0a44ad9691ec..c4e6551f605782 100644
--- a/Lib/test/test_py_compile.py
+++ b/Lib/test/test_py_compile.py
@@ -132,7 +132,9 @@ def test_exceptions_propagate(self):
os.chmod(self.directory, mode.st_mode)
def test_bad_coding(self):
- bad_coding = os.path.join(os.path.dirname(__file__), 'bad_coding2.py')
+ bad_coding = os.path.join(os.path.dirname(__file__),
+ 'tokenizedata',
+ 'bad_coding2.py')
with support.captured_stderr():
self.assertIsNone(py_compile.compile(bad_coding, doraise=False))
self.assertFalse(os.path.exists(
@@ -195,7 +197,9 @@ def test_invalidation_mode(self):
self.assertEqual(flags, 0b1)
def test_quiet(self):
- bad_coding = os.path.join(os.path.dirname(__file__), 'bad_coding2.py')
+ bad_coding = os.path.join(os.path.dirname(__file__),
+ 'tokenizedata',
+ 'bad_coding2.py')
with support.captured_stderr() as stderr:
self.assertIsNone(py_compile.compile(bad_coding, doraise=False, quiet=2))
self.assertIsNone(py_compile.compile(bad_coding, doraise=True, quiet=2))
@@ -260,14 +264,18 @@ def test_with_files(self):
self.assertTrue(os.path.exists(self.cache_path))
def test_bad_syntax(self):
- bad_syntax = os.path.join(os.path.dirname(__file__), 'badsyntax_3131.py')
+ bad_syntax = os.path.join(os.path.dirname(__file__),
+ 'tokenizedata',
+ 'badsyntax_3131.py')
rc, stdout, stderr = self.pycompilecmd_failure(bad_syntax)
self.assertEqual(rc, 1)
self.assertEqual(stdout, b'')
self.assertIn(b'SyntaxError', stderr)
def test_bad_syntax_with_quiet(self):
- bad_syntax = os.path.join(os.path.dirname(__file__), 'badsyntax_3131.py')
+ bad_syntax = os.path.join(os.path.dirname(__file__),
+ 'tokenizedata',
+ 'badsyntax_3131.py')
rc, stdout, stderr = self.pycompilecmd_failure('-q', bad_syntax)
self.assertEqual(rc, 1)
self.assertEqual(stdout, b'')
diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc.py
index 499eeb98ad6138..70c5ebd694ca88 100644
--- a/Lib/test/test_pydoc.py
+++ b/Lib/test/test_pydoc.py
@@ -24,6 +24,7 @@
from io import StringIO
from collections import namedtuple
from urllib.request import urlopen, urlcleanup
+from test import support
from test.support import import_helper
from test.support import os_helper
from test.support.script_helper import (assert_python_ok,
@@ -1236,22 +1237,56 @@ def test_bound_builtin_classmethod_o(self):
self.assertEqual(self._get_summary_line(dict.__class_getitem__),
"__class_getitem__(object, /) method of builtins.type instance")
+ @support.cpython_only
def test_module_level_callable_unrepresentable_default(self):
- self.assertEqual(self._get_summary_line(getattr),
- "getattr(...)")
+ import _testcapi
+ builtin = _testcapi.func_with_unrepresentable_signature
+ self.assertEqual(self._get_summary_line(builtin),
+ "func_with_unrepresentable_signature(a, b=)")
+ @support.cpython_only
def test_builtin_staticmethod_unrepresentable_default(self):
self.assertEqual(self._get_summary_line(str.maketrans),
"maketrans(x, y=, z=, /)")
+ import _testcapi
+ cls = _testcapi.DocStringUnrepresentableSignatureTest
+ self.assertEqual(self._get_summary_line(cls.staticmeth),
+ "staticmeth(a, b=)")
+ @support.cpython_only
def test_unbound_builtin_method_unrepresentable_default(self):
self.assertEqual(self._get_summary_line(dict.pop),
"pop(self, key, default=, /)")
+ import _testcapi
+ cls = _testcapi.DocStringUnrepresentableSignatureTest
+ self.assertEqual(self._get_summary_line(cls.meth),
+ "meth(self, /, a, b=)")
+ @support.cpython_only
def test_bound_builtin_method_unrepresentable_default(self):
self.assertEqual(self._get_summary_line({}.pop),
"pop(key, default=, /) "
"method of builtins.dict instance")
+ import _testcapi
+ obj = _testcapi.DocStringUnrepresentableSignatureTest()
+ self.assertEqual(self._get_summary_line(obj.meth),
+ "meth(a, b=) "
+ "method of _testcapi.DocStringUnrepresentableSignatureTest instance")
+
+ @support.cpython_only
+ def test_unbound_builtin_classmethod_unrepresentable_default(self):
+ import _testcapi
+ cls = _testcapi.DocStringUnrepresentableSignatureTest
+ descr = cls.__dict__['classmeth']
+ self.assertEqual(self._get_summary_line(descr),
+ "classmeth(type, /, a, b=)")
+
+ @support.cpython_only
+ def test_bound_builtin_classmethod_unrepresentable_default(self):
+ import _testcapi
+ cls = _testcapi.DocStringUnrepresentableSignatureTest
+ self.assertEqual(self._get_summary_line(cls.classmeth),
+ "classmeth(a, b=) method of builtins.type instance")
def test_overridden_text_signature(self):
class C:
diff --git a/Lib/test/test_pyexpat.py b/Lib/test/test_pyexpat.py
index abe1ad517d2246..a542abaf1f35aa 100644
--- a/Lib/test/test_pyexpat.py
+++ b/Lib/test/test_pyexpat.py
@@ -1,18 +1,19 @@
# XXX TypeErrors on calling handlers, or on bad return values from a
# handler, are obscure and unhelpful.
-from io import BytesIO
import os
-import platform
import sys
import sysconfig
import unittest
import traceback
+from io import BytesIO
+from test import support
+from test.support import os_helper
from xml.parsers import expat
from xml.parsers.expat import errors
-from test.support import sortdict, is_emscripten, is_wasi
+from test.support import sortdict
class SetAttributeTest(unittest.TestCase):
@@ -439,37 +440,59 @@ def test7(self):
# Test handling of exception from callback:
class HandlerExceptionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
- raise RuntimeError(name)
+ raise RuntimeError(f'StartElementHandler: <{name}>')
def check_traceback_entry(self, entry, filename, funcname):
- self.assertEqual(os.path.basename(entry[0]), filename)
- self.assertEqual(entry[2], funcname)
+ self.assertEqual(os.path.basename(entry.filename), filename)
+ self.assertEqual(entry.name, funcname)
+ @support.cpython_only
def test_exception(self):
+ # gh-66652: test _PyTraceback_Add() used by pyexpat.c to inject frames
+
+ # Change the current directory to the Python source code directory
+ # if it is available.
+ src_dir = sysconfig.get_config_var('abs_builddir')
+ if src_dir:
+ have_source = os.path.isdir(src_dir)
+ else:
+ have_source = False
+ if have_source:
+ with os_helper.change_cwd(src_dir):
+ self._test_exception(have_source)
+ else:
+ self._test_exception(have_source)
+
+ def _test_exception(self, have_source):
+ # Use path relative to the current directory which should be the Python
+ # source code directory (if it is available).
+ PYEXPAT_C = os.path.join('Modules', 'pyexpat.c')
+
parser = expat.ParserCreate()
parser.StartElementHandler = self.StartElementHandler
try:
parser.Parse(b"", True)
- self.fail()
- except RuntimeError as e:
- self.assertEqual(e.args[0], 'a',
- "Expected RuntimeError for element 'a', but" + \
- " found %r" % e.args[0])
- # Check that the traceback contains the relevant line in pyexpat.c
- entries = traceback.extract_tb(e.__traceback__)
- self.assertEqual(len(entries), 3)
- self.check_traceback_entry(entries[0],
- "test_pyexpat.py", "test_exception")
- self.check_traceback_entry(entries[1],
- "pyexpat.c", "StartElement")
- self.check_traceback_entry(entries[2],
- "test_pyexpat.py", "StartElementHandler")
- if (sysconfig.is_python_build()
- and not (sys.platform == 'win32' and platform.machine() == 'ARM')
- and not is_emscripten
- and not is_wasi
- ):
- self.assertIn('call_with_frame("StartElement"', entries[1][3])
+
+ self.fail("the parser did not raise RuntimeError")
+ except RuntimeError as exc:
+ self.assertEqual(exc.args[0], 'StartElementHandler: ', exc)
+ entries = traceback.extract_tb(exc.__traceback__)
+
+ self.assertEqual(len(entries), 3, entries)
+ self.check_traceback_entry(entries[0],
+ "test_pyexpat.py", "_test_exception")
+ self.check_traceback_entry(entries[1],
+ os.path.basename(PYEXPAT_C),
+ "StartElement")
+ self.check_traceback_entry(entries[2],
+ "test_pyexpat.py", "StartElementHandler")
+
+ # Check that the traceback contains the relevant line in
+ # Modules/pyexpat.c. Skip the test if Modules/pyexpat.c is not
+ # available.
+ if have_source and os.path.exists(PYEXPAT_C):
+ self.assertIn('call_with_frame("StartElement"',
+ entries[1].line)
# Test Current* members:
diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py
index 0c1400c8105037..408e667fffa0f0 100644
--- a/Lib/test/test_regrtest.py
+++ b/Lib/test/test_regrtest.py
@@ -5,22 +5,27 @@
"""
import contextlib
+import dataclasses
import glob
import io
import locale
import os.path
import platform
+import random
import re
+import shlex
import subprocess
import sys
import sysconfig
import tempfile
import textwrap
import unittest
-from test import libregrtest
from test import support
-from test.support import os_helper
-from test.libregrtest import utils, setup
+from test.support import os_helper, TestStats, without_optimizer
+from test.libregrtest import cmdline
+from test.libregrtest import utils
+from test.libregrtest import setup
+from test.libregrtest.utils import normalize_test_name
if not support.has_subprocess_support:
raise unittest.SkipTest("test module requires subprocess")
@@ -32,6 +37,7 @@
EXITCODE_BAD_TEST = 2
EXITCODE_ENV_CHANGED = 3
EXITCODE_NO_TESTS_RAN = 4
+EXITCODE_RERUN_FAIL = 5
EXITCODE_INTERRUPTED = 130
TEST_INTERRUPTED = textwrap.dedent("""
@@ -49,9 +55,13 @@ class ParseArgsTestCase(unittest.TestCase):
Test regrtest's argument parsing, function _parse_args().
"""
+ @staticmethod
+ def parse_args(args):
+ return cmdline._parse_args(args)
+
def checkError(self, args, msg):
with support.captured_stderr() as err, self.assertRaises(SystemExit):
- libregrtest._parse_args(args)
+ self.parse_args(args)
self.assertIn(msg, err.getvalue())
def test_help(self):
@@ -59,83 +69,78 @@ def test_help(self):
with self.subTest(opt=opt):
with support.captured_stdout() as out, \
self.assertRaises(SystemExit):
- libregrtest._parse_args([opt])
+ self.parse_args([opt])
self.assertIn('Run Python regression tests.', out.getvalue())
def test_timeout(self):
- ns = libregrtest._parse_args(['--timeout', '4.2'])
+ ns = self.parse_args(['--timeout', '4.2'])
self.assertEqual(ns.timeout, 4.2)
self.checkError(['--timeout'], 'expected one argument')
self.checkError(['--timeout', 'foo'], 'invalid float value')
def test_wait(self):
- ns = libregrtest._parse_args(['--wait'])
+ ns = self.parse_args(['--wait'])
self.assertTrue(ns.wait)
- def test_worker_args(self):
- ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
- self.assertEqual(ns.worker_args, '[[], {}]')
- self.checkError(['--worker-args'], 'expected one argument')
-
def test_start(self):
for opt in '-S', '--start':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, 'foo'])
+ ns = self.parse_args([opt, 'foo'])
self.assertEqual(ns.start, 'foo')
self.checkError([opt], 'expected one argument')
def test_verbose(self):
- ns = libregrtest._parse_args(['-v'])
+ ns = self.parse_args(['-v'])
self.assertEqual(ns.verbose, 1)
- ns = libregrtest._parse_args(['-vvv'])
+ ns = self.parse_args(['-vvv'])
self.assertEqual(ns.verbose, 3)
- ns = libregrtest._parse_args(['--verbose'])
+ ns = self.parse_args(['--verbose'])
self.assertEqual(ns.verbose, 1)
- ns = libregrtest._parse_args(['--verbose'] * 3)
+ ns = self.parse_args(['--verbose'] * 3)
self.assertEqual(ns.verbose, 3)
- ns = libregrtest._parse_args([])
+ ns = self.parse_args([])
self.assertEqual(ns.verbose, 0)
- def test_verbose2(self):
- for opt in '-w', '--verbose2':
+ def test_rerun(self):
+ for opt in '-w', '--rerun', '--verbose2':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
- self.assertTrue(ns.verbose2)
+ ns = self.parse_args([opt])
+ self.assertTrue(ns.rerun)
def test_verbose3(self):
for opt in '-W', '--verbose3':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.verbose3)
def test_quiet(self):
for opt in '-q', '--quiet':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
def test_slowest(self):
for opt in '-o', '--slowest':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.print_slow)
def test_header(self):
- ns = libregrtest._parse_args(['--header'])
+ ns = self.parse_args(['--header'])
self.assertTrue(ns.header)
- ns = libregrtest._parse_args(['--verbose'])
+ ns = self.parse_args(['--verbose'])
self.assertTrue(ns.header)
def test_randomize(self):
for opt in '-r', '--randomize':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.randomize)
def test_randseed(self):
- ns = libregrtest._parse_args(['--randseed', '12345'])
+ ns = self.parse_args(['--randseed', '12345'])
self.assertEqual(ns.random_seed, 12345)
self.assertTrue(ns.randomize)
self.checkError(['--randseed'], 'expected one argument')
@@ -144,7 +149,7 @@ def test_randseed(self):
def test_fromfile(self):
for opt in '-f', '--fromfile':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, 'foo'])
+ ns = self.parse_args([opt, 'foo'])
self.assertEqual(ns.fromfile, 'foo')
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo', '-s'], "don't go together")
@@ -152,20 +157,20 @@ def test_fromfile(self):
def test_exclude(self):
for opt in '-x', '--exclude':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.exclude)
def test_single(self):
for opt in '-s', '--single':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.single)
self.checkError([opt, '-f', 'foo'], "don't go together")
def test_ignore(self):
for opt in '-i', '--ignore':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, 'pattern'])
+ ns = self.parse_args([opt, 'pattern'])
self.assertEqual(ns.ignore_tests, ['pattern'])
self.checkError([opt], 'expected one argument')
@@ -175,7 +180,7 @@ def test_ignore(self):
print('matchfile2', file=fp)
filename = os.path.abspath(os_helper.TESTFN)
- ns = libregrtest._parse_args(['-m', 'match',
+ ns = self.parse_args(['-m', 'match',
'--ignorefile', filename])
self.assertEqual(ns.ignore_tests,
['matchfile1', 'matchfile2'])
@@ -183,11 +188,11 @@ def test_ignore(self):
def test_match(self):
for opt in '-m', '--match':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, 'pattern'])
+ ns = self.parse_args([opt, 'pattern'])
self.assertEqual(ns.match_tests, ['pattern'])
self.checkError([opt], 'expected one argument')
- ns = libregrtest._parse_args(['-m', 'pattern1',
+ ns = self.parse_args(['-m', 'pattern1',
'-m', 'pattern2'])
self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
@@ -197,7 +202,7 @@ def test_match(self):
print('matchfile2', file=fp)
filename = os.path.abspath(os_helper.TESTFN)
- ns = libregrtest._parse_args(['-m', 'match',
+ ns = self.parse_args(['-m', 'match',
'--matchfile', filename])
self.assertEqual(ns.match_tests,
['match', 'matchfile1', 'matchfile2'])
@@ -205,65 +210,65 @@ def test_match(self):
def test_failfast(self):
for opt in '-G', '--failfast':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, '-v'])
+ ns = self.parse_args([opt, '-v'])
self.assertTrue(ns.failfast)
- ns = libregrtest._parse_args([opt, '-W'])
+ ns = self.parse_args([opt, '-W'])
self.assertTrue(ns.failfast)
self.checkError([opt], '-G/--failfast needs either -v or -W')
def test_use(self):
for opt in '-u', '--use':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, 'gui,network'])
+ ns = self.parse_args([opt, 'gui,network'])
self.assertEqual(ns.use_resources, ['gui', 'network'])
- ns = libregrtest._parse_args([opt, 'gui,none,network'])
+ ns = self.parse_args([opt, 'gui,none,network'])
self.assertEqual(ns.use_resources, ['network'])
- expected = list(libregrtest.ALL_RESOURCES)
+ expected = list(cmdline.ALL_RESOURCES)
expected.remove('gui')
- ns = libregrtest._parse_args([opt, 'all,-gui'])
+ ns = self.parse_args([opt, 'all,-gui'])
self.assertEqual(ns.use_resources, expected)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid resource')
# all + a resource not part of "all"
- ns = libregrtest._parse_args([opt, 'all,tzdata'])
+ ns = self.parse_args([opt, 'all,tzdata'])
self.assertEqual(ns.use_resources,
- list(libregrtest.ALL_RESOURCES) + ['tzdata'])
+ list(cmdline.ALL_RESOURCES) + ['tzdata'])
# test another resource which is not part of "all"
- ns = libregrtest._parse_args([opt, 'extralargefile'])
+ ns = self.parse_args([opt, 'extralargefile'])
self.assertEqual(ns.use_resources, ['extralargefile'])
def test_memlimit(self):
for opt in '-M', '--memlimit':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, '4G'])
+ ns = self.parse_args([opt, '4G'])
self.assertEqual(ns.memlimit, '4G')
self.checkError([opt], 'expected one argument')
def test_testdir(self):
- ns = libregrtest._parse_args(['--testdir', 'foo'])
+ ns = self.parse_args(['--testdir', 'foo'])
self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
self.checkError(['--testdir'], 'expected one argument')
def test_runleaks(self):
for opt in '-L', '--runleaks':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.runleaks)
def test_huntrleaks(self):
for opt in '-R', '--huntrleaks':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, ':'])
+ ns = self.parse_args([opt, ':'])
self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
- ns = libregrtest._parse_args([opt, '6:'])
+ ns = self.parse_args([opt, '6:'])
self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
- ns = libregrtest._parse_args([opt, ':3'])
+ ns = self.parse_args([opt, ':3'])
self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
- ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
+ ns = self.parse_args([opt, '6:3:leaks.log'])
self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
self.checkError([opt], 'expected one argument')
self.checkError([opt, '6'],
@@ -274,7 +279,7 @@ def test_huntrleaks(self):
def test_multiprocess(self):
for opt in '-j', '--multiprocess':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, '2'])
+ ns = self.parse_args([opt, '2'])
self.assertEqual(ns.use_mp, 2)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid int value')
@@ -284,13 +289,13 @@ def test_multiprocess(self):
def test_coverage(self):
for opt in '-T', '--coverage':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.trace)
def test_coverdir(self):
for opt in '-D', '--coverdir':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, 'foo'])
+ ns = self.parse_args([opt, 'foo'])
self.assertEqual(ns.coverdir,
os.path.join(os_helper.SAVEDCWD, 'foo'))
self.checkError([opt], 'expected one argument')
@@ -298,13 +303,13 @@ def test_coverdir(self):
def test_nocoverdir(self):
for opt in '-N', '--nocoverdir':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertIsNone(ns.coverdir)
def test_threshold(self):
for opt in '-t', '--threshold':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt, '1000'])
+ ns = self.parse_args([opt, '1000'])
self.assertEqual(ns.threshold, 1000)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid int value')
@@ -313,7 +318,7 @@ def test_nowindows(self):
for opt in '-n', '--nowindows':
with self.subTest(opt=opt):
with contextlib.redirect_stderr(io.StringIO()) as stderr:
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.nowindows)
err = stderr.getvalue()
self.assertIn('the --nowindows (-n) option is deprecated', err)
@@ -321,39 +326,39 @@ def test_nowindows(self):
def test_forever(self):
for opt in '-F', '--forever':
with self.subTest(opt=opt):
- ns = libregrtest._parse_args([opt])
+ ns = self.parse_args([opt])
self.assertTrue(ns.forever)
def test_unrecognized_argument(self):
self.checkError(['--xxx'], 'usage:')
def test_long_option__partial(self):
- ns = libregrtest._parse_args(['--qui'])
+ ns = self.parse_args(['--qui'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
def test_two_options(self):
- ns = libregrtest._parse_args(['--quiet', '--exclude'])
+ ns = self.parse_args(['--quiet', '--exclude'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
self.assertTrue(ns.exclude)
def test_option_with_empty_string_value(self):
- ns = libregrtest._parse_args(['--start', ''])
+ ns = self.parse_args(['--start', ''])
self.assertEqual(ns.start, '')
def test_arg(self):
- ns = libregrtest._parse_args(['foo'])
+ ns = self.parse_args(['foo'])
self.assertEqual(ns.args, ['foo'])
def test_option_and_arg(self):
- ns = libregrtest._parse_args(['--quiet', 'foo'])
+ ns = self.parse_args(['--quiet', 'foo'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
self.assertEqual(ns.args, ['foo'])
def test_arg_option_arg(self):
- ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
+ ns = self.parse_args(['test_unaryop', '-v', 'test_binop'])
self.assertEqual(ns.verbose, 1)
self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
@@ -362,6 +367,13 @@ def test_unknown_option(self):
'unrecognized arguments: --unknown-option')
+@dataclasses.dataclass(slots=True)
+class Rerun:
+ name: str
+ match: str | None
+ success: bool
+
+
class BaseTestCase(unittest.TestCase):
TEST_UNIQUE_ID = 1
TESTNAME_PREFIX = 'test_regrtest_'
@@ -409,8 +421,12 @@ def regex_search(self, regex, output):
self.fail("%r not found in %r" % (regex, output))
return match
- def check_line(self, output, regex):
- regex = re.compile(r'^' + regex, re.MULTILINE)
+ def check_line(self, output, pattern, full=False, regex=True):
+ if not regex:
+ pattern = re.escape(pattern)
+ if full:
+ pattern += '\n'
+ regex = re.compile(r'^' + pattern, re.MULTILINE)
self.assertRegex(output, regex)
def parse_executed_tests(self, output):
@@ -419,29 +435,47 @@ def parse_executed_tests(self, output):
parser = re.finditer(regex, output, re.MULTILINE)
return list(match.group(1) for match in parser)
- def check_executed_tests(self, output, tests, skipped=(), failed=(),
+ def check_executed_tests(self, output, tests, *, stats,
+ skipped=(), failed=(),
env_changed=(), omitted=(),
- rerun={}, no_test_ran=(),
- randomize=False, interrupted=False,
- fail_env_changed=False):
+ rerun=None, run_no_tests=(),
+ resource_denied=(),
+ randomize=False, parallel=False, interrupted=False,
+ fail_env_changed=False,
+ forever=False, filtered=False):
if isinstance(tests, str):
tests = [tests]
if isinstance(skipped, str):
skipped = [skipped]
+ if isinstance(resource_denied, str):
+ resource_denied = [resource_denied]
if isinstance(failed, str):
failed = [failed]
if isinstance(env_changed, str):
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
- if isinstance(no_test_ran, str):
- no_test_ran = [no_test_ran]
+ if isinstance(run_no_tests, str):
+ run_no_tests = [run_no_tests]
+ if isinstance(stats, int):
+ stats = TestStats(stats)
+ if parallel:
+ randomize = True
+
+ rerun_failed = []
+ if rerun is not None:
+ failed = [rerun.name]
+ if not rerun.success:
+ rerun_failed.append(rerun.name)
executed = self.parse_executed_tests(output)
+ total_tests = list(tests)
+ if rerun is not None:
+ total_tests.append(rerun.name)
if randomize:
- self.assertEqual(set(executed), set(tests), output)
+ self.assertEqual(set(executed), set(total_tests), output)
else:
- self.assertEqual(executed, tests, output)
+ self.assertEqual(executed, total_tests, output)
def plural(count):
return 's' if count != 1 else ''
@@ -457,6 +491,10 @@ def list_regex(line_format, tests):
regex = list_regex('%s test%s skipped', skipped)
self.check_line(output, regex)
+ if resource_denied:
+ regex = list_regex(r'%s test%s skipped \(resource denied\)', resource_denied)
+ self.check_line(output, regex)
+
if failed:
regex = list_regex('%s test%s failed', failed)
self.check_line(output, regex)
@@ -470,53 +508,95 @@ def list_regex(line_format, tests):
regex = list_regex('%s test%s omitted', omitted)
self.check_line(output, regex)
- if rerun:
- regex = list_regex('%s re-run test%s', rerun.keys())
+ if rerun is not None:
+ regex = list_regex('%s re-run test%s', [rerun.name])
self.check_line(output, regex)
- regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
+ regex = LOG_PREFIX + r"Re-running 1 failed tests in verbose mode"
+ self.check_line(output, regex)
+ regex = fr"Re-running {rerun.name} in verbose mode"
+ if rerun.match:
+ regex = fr"{regex} \(matching: {rerun.match}\)"
self.check_line(output, regex)
- for name, match in rerun.items():
- regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
- self.check_line(output, regex)
- if no_test_ran:
- regex = list_regex('%s test%s run no tests', no_test_ran)
+ if run_no_tests:
+ regex = list_regex('%s test%s run no tests', run_no_tests)
self.check_line(output, regex)
- good = (len(tests) - len(skipped) - len(failed)
- - len(omitted) - len(env_changed) - len(no_test_ran))
+ good = (len(tests) - len(skipped) - len(resource_denied) - len(failed)
+ - len(omitted) - len(env_changed) - len(run_no_tests))
if good:
- regex = r'%s test%s OK\.$' % (good, plural(good))
- if not skipped and not failed and good > 1:
+ regex = r'%s test%s OK\.' % (good, plural(good))
+ if not skipped and not failed and (rerun is None or rerun.success) and good > 1:
regex = 'All %s' % regex
- self.check_line(output, regex)
+ self.check_line(output, regex, full=True)
if interrupted:
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
- result = []
+ # Total tests
+ text = f'run={stats.tests_run:,}'
+ if filtered:
+ text = fr'{text} \(filtered\)'
+ parts = [text]
+ if stats.failures:
+ parts.append(f'failures={stats.failures:,}')
+ if stats.skipped:
+ parts.append(f'skipped={stats.skipped:,}')
+ line = fr'Total tests: {" ".join(parts)}'
+ self.check_line(output, line, full=True)
+
+ # Total test files
+ run = len(total_tests) - len(resource_denied)
+ if rerun is not None:
+ total_failed = len(rerun_failed)
+ total_rerun = 1
+ else:
+ total_failed = len(failed)
+ total_rerun = 0
+ if interrupted:
+ run = 0
+ text = f'run={run}'
+ if not forever:
+ text = f'{text}/{len(tests)}'
+ if filtered:
+ text = fr'{text} \(filtered\)'
+ report = [text]
+ for name, ntest in (
+ ('failed', total_failed),
+ ('env_changed', len(env_changed)),
+ ('skipped', len(skipped)),
+ ('resource_denied', len(resource_denied)),
+ ('rerun', total_rerun),
+ ('run_no_tests', len(run_no_tests)),
+ ):
+ if ntest:
+ report.append(f'{name}={ntest}')
+ line = fr'Total test files: {" ".join(report)}'
+ self.check_line(output, line, full=True)
+
+ # Result
+ state = []
if failed:
- result.append('FAILURE')
+ state.append('FAILURE')
elif fail_env_changed and env_changed:
- result.append('ENV CHANGED')
+ state.append('ENV CHANGED')
if interrupted:
- result.append('INTERRUPTED')
- if not any((good, result, failed, interrupted, skipped,
+ state.append('INTERRUPTED')
+ if not any((good, failed, interrupted, skipped,
env_changed, fail_env_changed)):
- result.append("NO TESTS RAN")
- elif not result:
- result.append('SUCCESS')
- result = ', '.join(result)
- if rerun:
- self.check_line(output, 'Tests result: FAILURE')
- result = 'FAILURE then %s' % result
-
- self.check_line(output, 'Tests result: %s' % result)
+ state.append("NO TESTS RAN")
+ elif not state:
+ state.append('SUCCESS')
+ state = ', '.join(state)
+ if rerun is not None:
+ new_state = 'SUCCESS' if rerun.success else 'FAILURE'
+ state = 'FAILURE then ' + new_state
+ self.check_line(output, f'Result: {state}', full=True)
def parse_random_seed(self, output):
match = self.regex_search(r'Using random seed ([0-9]+)', output)
randseed = int(match.group(1))
- self.assertTrue(0 <= randseed <= 10000000, randseed)
+ self.assertTrue(0 <= randseed <= 100_000_000, randseed)
return randseed
def run_command(self, args, input=None, exitcode=0, **kw):
@@ -530,13 +610,13 @@ def run_command(self, args, input=None, exitcode=0, **kw):
stdout=subprocess.PIPE,
**kw)
if proc.returncode != exitcode:
- msg = ("Command %s failed with exit code %s\n"
+ msg = ("Command %s failed with exit code %s, but exit code %s expected!\n"
"\n"
"stdout:\n"
"---\n"
"%s\n"
"---\n"
- % (str(args), proc.returncode, proc.stdout))
+ % (str(args), proc.returncode, exitcode, proc.stdout))
if proc.stderr:
msg += ("\n"
"stderr:\n"
@@ -604,7 +684,8 @@ def setUp(self):
def check_output(self, output):
self.parse_random_seed(output)
- self.check_executed_tests(output, self.tests, randomize=True)
+ self.check_executed_tests(output, self.tests,
+ randomize=True, stats=len(self.tests))
def run_tests(self, args):
output = self.run_python(args)
@@ -704,6 +785,40 @@ def run_tests(self, *testargs, **kw):
cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
return self.run_python(cmdargs, **kw)
+ def test_success(self):
+ code = textwrap.dedent("""
+ import unittest
+
+ class PassingTests(unittest.TestCase):
+ def test_test1(self):
+ pass
+
+ def test_test2(self):
+ pass
+
+ def test_test3(self):
+ pass
+ """)
+ tests = [self.create_test(f'ok{i}', code=code) for i in range(1, 6)]
+
+ output = self.run_tests(*tests)
+ self.check_executed_tests(output, tests,
+ stats=3 * len(tests))
+
+ def test_skip(self):
+ code = textwrap.dedent("""
+ import unittest
+ raise unittest.SkipTest("nope")
+ """)
+ test_ok = self.create_test('ok')
+ test_skip = self.create_test('skip', code=code)
+ tests = [test_ok, test_skip]
+
+ output = self.run_tests(*tests)
+ self.check_executed_tests(output, tests,
+ skipped=[test_skip],
+ stats=1)
+
def test_failing_test(self):
# test a failing test
code = textwrap.dedent("""
@@ -718,7 +833,8 @@ def test_failing(self):
tests = [test_ok, test_failing]
output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST)
- self.check_executed_tests(output, tests, failed=test_failing)
+ self.check_executed_tests(output, tests, failed=test_failing,
+ stats=TestStats(2, 1))
def test_resources(self):
# test -u command line option
@@ -737,17 +853,19 @@ def test_pass(self):
# -u all: 2 resources enabled
output = self.run_tests('-u', 'all', *test_names)
- self.check_executed_tests(output, test_names)
+ self.check_executed_tests(output, test_names, stats=2)
# -u audio: 1 resource enabled
output = self.run_tests('-uaudio', *test_names)
self.check_executed_tests(output, test_names,
- skipped=tests['network'])
+ resource_denied=tests['network'],
+ stats=1)
# no option: 0 resources enabled
- output = self.run_tests(*test_names)
+ output = self.run_tests(*test_names, exitcode=EXITCODE_NO_TESTS_RAN)
self.check_executed_tests(output, test_names,
- skipped=test_names)
+ resource_denied=test_names,
+ stats=0)
def test_random(self):
# test -r and --randseed command line option
@@ -795,7 +913,8 @@ def test_fromfile(self):
previous = name
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ stats = len(tests)
+ self.check_executed_tests(output, tests, stats=stats)
# test format '[2/7] test_opcodes'
with open(filename, "w") as fp:
@@ -803,7 +922,7 @@ def test_fromfile(self):
print("[%s/%s] %s" % (index, len(tests), name), file=fp)
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=stats)
# test format 'test_opcodes'
with open(filename, "w") as fp:
@@ -811,7 +930,7 @@ def test_fromfile(self):
print(name, file=fp)
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=stats)
# test format 'Lib/test/test_opcodes.py'
with open(filename, "w") as fp:
@@ -819,20 +938,20 @@ def test_fromfile(self):
print('Lib/test/%s.py' % name, file=fp)
output = self.run_tests('--fromfile', filename)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=stats)
def test_interrupted(self):
code = TEST_INTERRUPTED
test = self.create_test('sigint', code=code)
output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED)
self.check_executed_tests(output, test, omitted=test,
- interrupted=True)
+ interrupted=True, stats=0)
def test_slowest(self):
# test --slowest
tests = [self.create_test() for index in range(3)]
output = self.run_tests("--slowest", *tests)
- self.check_executed_tests(output, tests)
+ self.check_executed_tests(output, tests, stats=len(tests))
regex = ('10 slowest tests:\n'
'(?:- %s: .*\n){%s}'
% (self.TESTNAME_REGEX, len(tests)))
@@ -851,7 +970,8 @@ def test_slowest_interrupted(self):
args = ("--slowest", test)
output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED)
self.check_executed_tests(output, test,
- omitted=test, interrupted=True)
+ omitted=test, interrupted=True,
+ stats=0)
regex = ('10 slowest tests:\n')
self.check_line(output, regex)
@@ -860,7 +980,7 @@ def test_coverage(self):
# test --coverage
test = self.create_test('coverage')
output = self.run_tests("--coverage", test)
- self.check_executed_tests(output, [test])
+ self.check_executed_tests(output, [test], stats=1)
regex = (r'lines +cov% +module +\(path\)\n'
r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
self.check_line(output, regex)
@@ -889,20 +1009,38 @@ def test_run(self):
builtins.__dict__['RUN'] = 1
""")
test = self.create_test('forever', code=code)
- output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
- self.check_executed_tests(output, [test]*3, failed=test)
- def check_leak(self, code, what):
+ # --forever
+ output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, [test]*3, failed=test,
+ stats=TestStats(3, 1),
+ forever=True)
+
+ # --forever --rerun
+ output = self.run_tests('--forever', '--rerun', test, exitcode=0)
+ self.check_executed_tests(output, [test]*3,
+ rerun=Rerun(test,
+ match='test_run',
+ success=True),
+ stats=TestStats(4, 1),
+ forever=True)
+
+ @without_optimizer
+ def check_leak(self, code, what, *, run_workers=False):
test = self.create_test('huntrleaks', code=code)
filename = 'reflog.txt'
self.addCleanup(os_helper.unlink, filename)
- output = self.run_tests('--huntrleaks', '6:3:', test,
+ cmd = ['--huntrleaks', '3:3:']
+ if run_workers:
+ cmd.append('-j1')
+ cmd.append(test)
+ output = self.run_tests(*cmd,
exitcode=EXITCODE_BAD_TEST,
stderr=subprocess.STDOUT)
- self.check_executed_tests(output, [test], failed=test)
+ self.check_executed_tests(output, [test], failed=test, stats=1)
- line = 'beginning 9 repetitions\n123456789\n.........\n'
+ line = 'beginning 6 repetitions\n123456\n......\n'
self.check_line(output, re.escape(line))
line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
@@ -913,7 +1051,7 @@ def check_leak(self, code, what):
self.assertIn(line2, reflog)
@unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
- def test_huntrleaks(self):
+ def check_huntrleaks(self, *, run_workers: bool):
# test --huntrleaks
code = textwrap.dedent("""
import unittest
@@ -924,7 +1062,13 @@ class RefLeakTest(unittest.TestCase):
def test_leak(self):
GLOBAL_LIST.append(object())
""")
- self.check_leak(code, 'references')
+ self.check_leak(code, 'references', run_workers=run_workers)
+
+ def test_huntrleaks(self):
+ self.check_huntrleaks(run_workers=False)
+
+ def test_huntrleaks_mp(self):
+ self.check_huntrleaks(run_workers=True)
@unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
def test_huntrleaks_fd_leak(self):
@@ -982,7 +1126,7 @@ def test_crashed(self):
tests = [crash_test]
output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, tests, failed=crash_test,
- randomize=True)
+ parallel=True, stats=0)
def parse_methods(self, output):
regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
@@ -1002,8 +1146,6 @@ def test_method3(self):
def test_method4(self):
pass
""")
- all_methods = ['test_method1', 'test_method2',
- 'test_method3', 'test_method4']
testname = self.create_test(code=code)
# only run a subset
@@ -1077,13 +1219,14 @@ def test_env_changed(self):
# don't fail by default
output = self.run_tests(testname)
- self.check_executed_tests(output, [testname], env_changed=testname)
+ self.check_executed_tests(output, [testname],
+ env_changed=testname, stats=1)
# fail with --fail-env-changed
output = self.run_tests("--fail-env-changed", testname,
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname], env_changed=testname,
- fail_env_changed=True)
+ fail_env_changed=True, stats=1)
def test_rerun_fail(self):
# FAILURE then FAILURE
@@ -1100,30 +1243,55 @@ def test_fail_always(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
- failed=testname, rerun={testname: "test_fail_always"})
+ rerun=Rerun(testname,
+ "test_fail_always",
+ success=False),
+ stats=TestStats(3, 2))
def test_rerun_success(self):
# FAILURE then SUCCESS
- code = textwrap.dedent("""
- import builtins
+ marker_filename = os.path.abspath("regrtest_marker_filename")
+ self.addCleanup(os_helper.unlink, marker_filename)
+ self.assertFalse(os.path.exists(marker_filename))
+
+ code = textwrap.dedent(f"""
+ import os.path
import unittest
+ marker_filename = {marker_filename!r}
+
class Tests(unittest.TestCase):
def test_succeed(self):
return
def test_fail_once(self):
- if not hasattr(builtins, '_test_failed'):
- builtins._test_failed = True
+ if not os.path.exists(marker_filename):
+ open(marker_filename, "w").close()
self.fail("bug")
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=0)
+ # FAILURE then SUCCESS => exit code 0
+ output = self.run_tests("--rerun", testname, exitcode=0)
+ self.check_executed_tests(output, [testname],
+ rerun=Rerun(testname,
+ match="test_fail_once",
+ success=True),
+ stats=TestStats(3, 1))
+ os_helper.unlink(marker_filename)
+
+ # with --fail-rerun, exit code EXITCODE_RERUN_FAIL
+ # on "FAILURE then SUCCESS" state.
+ output = self.run_tests("--rerun", "--fail-rerun", testname,
+ exitcode=EXITCODE_RERUN_FAIL)
self.check_executed_tests(output, [testname],
- rerun={testname: "test_fail_once"})
+ rerun=Rerun(testname,
+ match="test_fail_once",
+ success=True),
+ stats=TestStats(3, 1))
+ os_helper.unlink(marker_filename)
def test_rerun_setup_class_hook_failure(self):
# FAILURE then FAILURE
@@ -1140,10 +1308,13 @@ def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "ExampleTests"})
+ rerun=Rerun(testname,
+ match="ExampleTests",
+ success=False),
+ stats=0)
def test_rerun_teardown_class_hook_failure(self):
# FAILURE then FAILURE
@@ -1160,10 +1331,13 @@ def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "ExampleTests"})
+ rerun=Rerun(testname,
+ match="ExampleTests",
+ success=False),
+ stats=2)
def test_rerun_setup_module_hook_failure(self):
# FAILURE then FAILURE
@@ -1179,10 +1353,13 @@ def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: testname})
+ rerun=Rerun(testname,
+ match=None,
+ success=False),
+ stats=0)
def test_rerun_teardown_module_hook_failure(self):
# FAILURE then FAILURE
@@ -1198,10 +1375,13 @@ def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
- self.check_executed_tests(output, testname,
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
+ self.check_executed_tests(output, [testname],
failed=[testname],
- rerun={testname: testname})
+ rerun=Rerun(testname,
+ match=None,
+ success=False),
+ stats=2)
def test_rerun_setup_hook_failure(self):
# FAILURE then FAILURE
@@ -1217,10 +1397,13 @@ def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "test_success"})
+ rerun=Rerun(testname,
+ match="test_success",
+ success=False),
+ stats=2)
def test_rerun_teardown_hook_failure(self):
# FAILURE then FAILURE
@@ -1236,10 +1419,13 @@ def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "test_success"})
+ rerun=Rerun(testname,
+ match="test_success",
+ success=False),
+ stats=2)
def test_rerun_async_setup_hook_failure(self):
# FAILURE then FAILURE
@@ -1255,10 +1441,12 @@ async def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
- failed=[testname],
- rerun={testname: "test_success"})
+ rerun=Rerun(testname,
+ match="test_success",
+ success=False),
+ stats=2)
def test_rerun_async_teardown_hook_failure(self):
# FAILURE then FAILURE
@@ -1274,10 +1462,13 @@ async def test_success(self):
""")
testname = self.create_test(code=code)
- output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
+ output = self.run_tests("--rerun", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
- rerun={testname: "test_success"})
+ rerun=Rerun(testname,
+ match="test_success",
+ success=False),
+ stats=2)
def test_no_tests_ran(self):
code = textwrap.dedent("""
@@ -1291,7 +1482,9 @@ def test_bug(self):
output = self.run_tests(testname, "-m", "nosuchtest",
exitcode=EXITCODE_NO_TESTS_RAN)
- self.check_executed_tests(output, [testname], no_test_ran=testname)
+ self.check_executed_tests(output, [testname],
+ run_no_tests=testname,
+ stats=0, filtered=True)
def test_no_tests_ran_skip(self):
code = textwrap.dedent("""
@@ -1304,7 +1497,8 @@ def test_skipped(self):
testname = self.create_test(code=code)
output = self.run_tests(testname)
- self.check_executed_tests(output, [testname])
+ self.check_executed_tests(output, [testname],
+ stats=TestStats(1, skipped=1))
def test_no_tests_ran_multiple_tests_nonexistent(self):
code = textwrap.dedent("""
@@ -1320,7 +1514,8 @@ def test_bug(self):
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
exitcode=EXITCODE_NO_TESTS_RAN)
self.check_executed_tests(output, [testname, testname2],
- no_test_ran=[testname, testname2])
+ run_no_tests=[testname, testname2],
+ stats=0, filtered=True)
def test_no_test_ran_some_test_exist_some_not(self):
code = textwrap.dedent("""
@@ -1343,7 +1538,8 @@ def test_other_bug(self):
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
"-m", "test_other_bug", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
- no_test_ran=[testname])
+ run_no_tests=[testname],
+ stats=1, filtered=True)
@support.cpython_only
def test_uncollectable(self):
@@ -1370,7 +1566,8 @@ def test_garbage(self):
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
def test_multiprocessing_timeout(self):
code = textwrap.dedent(r"""
@@ -1396,7 +1593,7 @@ def test_sleep(self):
output = self.run_tests("-j2", "--timeout=1.0", testname,
exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
- failed=testname)
+ failed=testname, stats=0)
self.assertRegex(output,
re.compile('%s timed out' % testname, re.MULTILINE))
@@ -1430,7 +1627,8 @@ def test_unraisable_exc(self):
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
self.assertIn("Warning -- Unraisable exception", output)
self.assertIn("Exception: weakref callback bug", output)
@@ -1462,7 +1660,8 @@ def test_threading_excepthook(self):
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
self.assertIn("Warning -- Uncaught thread exception", output)
self.assertIn("Exception: bug in thread", output)
@@ -1503,7 +1702,8 @@ def test_print_warning(self):
output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
- fail_env_changed=True)
+ fail_env_changed=True,
+ stats=1)
self.assertRegex(output, regex)
def test_unicode_guard_env(self):
@@ -1550,15 +1750,15 @@ def test_leak_tmp_file(self):
self.check_executed_tests(output, testnames,
env_changed=testnames,
fail_env_changed=True,
- randomize=True)
+ parallel=True,
+ stats=len(testnames))
for testname in testnames:
self.assertIn(f"Warning -- {testname} leaked temporary "
f"files (1): mytmpfile",
output)
- def test_mp_decode_error(self):
- # gh-101634: If a worker stdout cannot be decoded, report a failed test
- # and a non-zero exit code.
+ def test_worker_decode_error(self):
+ # gh-109425: Use "backslashreplace" error handler to decode stdout.
if sys.platform == 'win32':
encoding = locale.getencoding()
else:
@@ -1566,30 +1766,143 @@ def test_mp_decode_error(self):
if encoding is None:
encoding = sys.__stdout__.encoding
if encoding is None:
- self.skipTest(f"cannot get regrtest worker encoding")
-
- nonascii = b"byte:\xa0\xa9\xff\n"
+ self.skipTest("cannot get regrtest worker encoding")
+
+ nonascii = bytes(ch for ch in range(128, 256))
+ corrupted_output = b"nonascii:%s\n" % (nonascii,)
+ # gh-108989: On Windows, assertion errors are written in UTF-16: when
+ # decoded each letter is follow by a NUL character.
+ assertion_failed = 'Assertion failed: tstate_is_alive(tstate)\n'
+ corrupted_output += assertion_failed.encode('utf-16-le')
try:
- nonascii.decode(encoding)
+ corrupted_output.decode(encoding)
except UnicodeDecodeError:
pass
else:
- self.skipTest(f"{encoding} can decode non-ASCII bytes {nonascii!a}")
+ self.skipTest(f"{encoding} can decode non-ASCII bytes")
+
+ expected_line = corrupted_output.decode(encoding, 'backslashreplace')
code = textwrap.dedent(fr"""
import sys
+ import unittest
+
+ class Tests(unittest.TestCase):
+ def test_pass(self):
+ pass
+
# bytes which cannot be decoded from UTF-8
- nonascii = {nonascii!a}
- sys.stdout.buffer.write(nonascii)
+ corrupted_output = {corrupted_output!a}
+ sys.stdout.buffer.write(corrupted_output)
sys.stdout.buffer.flush()
""")
testname = self.create_test(code=code)
+ output = self.run_tests("--fail-env-changed", "-v", "-j1", testname)
+ self.check_executed_tests(output, [testname],
+ parallel=True,
+ stats=1)
+ self.check_line(output, expected_line, regex=False)
+
+ def test_doctest(self):
+ code = textwrap.dedent(r'''
+ import doctest
+ import sys
+ from test import support
+
+ def my_function():
+ """
+ Pass:
+
+ >>> 1 + 1
+ 2
+
+ Failure:
+
+ >>> 2 + 3
+ 23
+ >>> 1 + 1
+ 11
+
+ Skipped test (ignored):
+
+ >>> id(1.0) # doctest: +SKIP
+ 7948648
+ """
+
+ def load_tests(loader, tests, pattern):
+ tests.addTest(doctest.DocTestSuite())
+ return tests
+ ''')
+ testname = self.create_test(code=code)
+
output = self.run_tests("--fail-env-changed", "-v", "-j1", testname,
exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
failed=[testname],
- randomize=True)
+ parallel=True,
+ stats=TestStats(1, 1, 0))
+
+ def _check_random_seed(self, run_workers: bool):
+ # gh-109276: When -r/--randomize is used, random.seed() is called
+ # with the same random seed before running each test file.
+ code = textwrap.dedent(r'''
+ import random
+ import unittest
+
+ class RandomSeedTest(unittest.TestCase):
+ def test_randint(self):
+ numbers = [random.randint(0, 1000) for _ in range(10)]
+ print(f"Random numbers: {numbers}")
+ ''')
+ tests = [self.create_test(name=f'test_random{i}', code=code)
+ for i in range(1, 3+1)]
+
+ random_seed = 856_656_202
+ cmd = ["--randomize", f"--randseed={random_seed}"]
+ if run_workers:
+ # run as many worker processes than the number of tests
+ cmd.append(f'-j{len(tests)}')
+ cmd.extend(tests)
+ output = self.run_tests(*cmd)
+
+ random.seed(random_seed)
+ # Make the assumption that nothing consume entropy between libregrest
+ # setup_tests() which calls random.seed() and RandomSeedTest calling
+ # random.randint().
+ numbers = [random.randint(0, 1000) for _ in range(10)]
+ expected = f"Random numbers: {numbers}"
+
+ regex = r'^Random numbers: .*$'
+ matches = re.findall(regex, output, flags=re.MULTILINE)
+ self.assertEqual(matches, [expected] * len(tests))
+
+ def test_random_seed(self):
+ self._check_random_seed(run_workers=False)
+
+ def test_random_seed_workers(self):
+ self._check_random_seed(run_workers=True)
+
+ def test_python_command(self):
+ code = textwrap.dedent(r"""
+ import sys
+ import unittest
+
+ class WorkerTests(unittest.TestCase):
+ def test_dev_mode(self):
+ self.assertTrue(sys.flags.dev_mode)
+ """)
+ tests = [self.create_test(code=code) for _ in range(3)]
+
+ # Custom Python command: "python -X dev"
+ python_cmd = [sys.executable, '-X', 'dev']
+ # test.libregrtest.cmdline uses shlex.split() to parse the Python
+ # command line string
+ python_cmd = shlex.join(python_cmd)
+
+ output = self.run_tests("--python", python_cmd, "-j0", *tests)
+ self.check_executed_tests(output, tests,
+ stats=len(tests), parallel=True)
class TestUtils(unittest.TestCase):
@@ -1615,6 +1928,17 @@ def test_format_duration(self):
self.assertEqual(utils.format_duration(3 * 3600 + 1),
'3 hour 1 sec')
+ def test_normalize_test_name(self):
+ normalize = normalize_test_name
+ self.assertEqual(normalize('test_access (test.test_os.FileTests.test_access)'),
+ 'test_access')
+ self.assertEqual(normalize('setUpClass (test.test_os.ChownFileTests)', is_error=True),
+ 'ChownFileTests')
+ self.assertEqual(normalize('test_success (test.test_bug.ExampleTests.test_success)', is_error=True),
+ 'test_success')
+ self.assertIsNone(normalize('setUpModule (test.test_x)', is_error=True))
+ self.assertIsNone(normalize('tearDownModule (test.test_module)', is_error=True))
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_signal.py b/Lib/test/test_signal.py
index 25afd6aabe0751..2a1a1ee22f43da 100644
--- a/Lib/test/test_signal.py
+++ b/Lib/test/test_signal.py
@@ -745,6 +745,7 @@ def test_siginterrupt_on(self):
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
+ @support.requires_resource('walltime')
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
diff --git a/Lib/test/test_site.py b/Lib/test/test_site.py
index 9e701fd847acdf..e8ec3b35881fec 100644
--- a/Lib/test/test_site.py
+++ b/Lib/test/test_site.py
@@ -465,10 +465,10 @@ def test_sitecustomize_executed(self):
else:
self.fail("sitecustomize not imported automatically")
- @test.support.requires_resource('network')
- @test.support.system_must_validate_cert
@unittest.skipUnless(hasattr(urllib.request, "HTTPSHandler"),
'need SSL support to download license')
+ @test.support.requires_resource('network')
+ @test.support.system_must_validate_cert
def test_license_exists_at_url(self):
# This test is a bit fragile since it depends on the format of the
# string displayed by license in the absence of a LICENSE file.
@@ -576,7 +576,7 @@ def _create_underpth_exe(self, lines, exe_pth=True):
_pth_file = os.path.splitext(exe_file)[0] + '._pth'
else:
_pth_file = os.path.splitext(dll_file)[0] + '._pth'
- with open(_pth_file, 'w') as f:
+ with open(_pth_file, 'w', encoding='utf8') as f:
for line in lines:
print(line, file=f)
return exe_file
@@ -613,7 +613,7 @@ def test_underpth_basic(self):
os.path.dirname(exe_file),
pth_lines)
- output = subprocess.check_output([exe_file, '-c',
+ output = subprocess.check_output([exe_file, '-X', 'utf8', '-c',
'import sys; print("\\n".join(sys.path) if sys.flags.no_site else "")'
], encoding='utf-8', errors='surrogateescape')
actual_sys_path = output.rstrip().split('\n')
diff --git a/Lib/test/test_smtpnet.py b/Lib/test/test_smtpnet.py
index 72f51cd8d81f59..2e0dc1aa276f35 100644
--- a/Lib/test/test_smtpnet.py
+++ b/Lib/test/test_smtpnet.py
@@ -61,6 +61,7 @@ def test_connect_default_port(self):
server.ehlo()
server.quit()
+ @support.requires_resource('walltime')
def test_connect_using_sslcontext(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py
index 0eaf64257c3b81..99c4c5cbc4902d 100644
--- a/Lib/test/test_socket.py
+++ b/Lib/test/test_socket.py
@@ -5288,6 +5288,7 @@ def mocked_socket_module(self):
finally:
socket.socket = old_socket
+ @socket_helper.skip_if_tcp_blackhole
def test_connect(self):
port = socket_helper.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -5296,6 +5297,7 @@ def test_connect(self):
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
+ @socket_helper.skip_if_tcp_blackhole
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
@@ -6472,12 +6474,16 @@ def test_sha256(self):
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
- expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
+ # gh-109396: In FIPS mode, Linux 6.5 requires a key
+ # of at least 112 bits. Use a key of 152 bits.
+ key = b"Python loves AF_ALG"
+ data = b"what do ya want for nothing?"
+ expected = bytes.fromhex("193dbb43c6297b47ea6277ec0ce67119a3f3aa66")
with self.create_alg('hash', 'hmac(sha1)') as algo:
- algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
+ algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
- op.sendall(b"what do ya want for nothing?")
+ op.sendall(data)
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
diff --git a/Lib/test/test_source_encoding.py b/Lib/test/test_source_encoding.py
index 72c2b47779e005..27871378f1c79e 100644
--- a/Lib/test/test_source_encoding.py
+++ b/Lib/test/test_source_encoding.py
@@ -68,6 +68,7 @@ def test_issue7820(self):
def test_20731(self):
sub = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
+ 'tokenizedata',
'coding20731.py')],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
@@ -100,10 +101,10 @@ def test_bad_coding2(self):
self.verify_bad_module(module_name)
def verify_bad_module(self, module_name):
- self.assertRaises(SyntaxError, __import__, 'test.' + module_name)
+ self.assertRaises(SyntaxError, __import__, 'test.tokenizedata.' + module_name)
path = os.path.dirname(__file__)
- filename = os.path.join(path, module_name + '.py')
+ filename = os.path.join(path, 'tokenizedata', module_name + '.py')
with open(filename, "rb") as fp:
bytes = fp.read()
self.assertRaises(SyntaxError, compile, bytes, filename, 'exec')
diff --git a/Lib/test/test_sqlite3/test_dump.py b/Lib/test/test_sqlite3/test_dump.py
index 3107e1b165d950..14a18c1ad37102 100644
--- a/Lib/test/test_sqlite3/test_dump.py
+++ b/Lib/test/test_sqlite3/test_dump.py
@@ -1,7 +1,6 @@
# Author: Paul Kippes
import unittest
-import sqlite3 as sqlite
from .util import memory_database
from .util import MemoryDatabaseMixin
diff --git a/Lib/test/test_sqlite3/test_userfunctions.py b/Lib/test/test_sqlite3/test_userfunctions.py
index 09019498fd5682..c6c3db159add64 100644
--- a/Lib/test/test_sqlite3/test_userfunctions.py
+++ b/Lib/test/test_sqlite3/test_userfunctions.py
@@ -29,7 +29,7 @@
from test.support import bigmemtest, gc_collect
from .util import cx_limit, memory_database
-from .util import with_tracebacks, check_tracebacks
+from .util import with_tracebacks
def func_returntext():
diff --git a/Lib/test/test_sqlite3/util.py b/Lib/test/test_sqlite3/util.py
index 505406c437b632..5599823838beea 100644
--- a/Lib/test/test_sqlite3/util.py
+++ b/Lib/test/test_sqlite3/util.py
@@ -4,7 +4,6 @@
import re
import sqlite3
import test.support
-import unittest
# Helper for temporary memory databases
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index 4e49dc5640d3f5..06304dcb4ec7b8 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -60,10 +60,10 @@
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
- return os.path.join(os.path.dirname(__file__), *name)
+ return os.path.join(os.path.dirname(__file__), "certdata", *name)
# The custom key and certificate files used in test_ssl are generated
-# using Lib/test/make_ssl_certs.py.
+# using Lib/test/certdata/make_ssl_certs.py.
# Other certificates are simply fetched from the internet servers they
# are meant to authenticate.
@@ -641,7 +641,7 @@ def test_openssl111_deprecations(self):
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
- certfile)
+ "certdata", certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
@@ -2182,6 +2182,7 @@ def test_timeout_connect_ex(self):
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
+ @support.requires_resource('walltime')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
@@ -2740,6 +2741,7 @@ def try_protocol_combo(server_protocol, client_protocol, expect_success,
class ThreadedTests(unittest.TestCase):
+ @support.requires_resource('walltime')
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
@@ -3307,12 +3309,12 @@ def test_socketserver(self):
# try to connect
if support.verbose:
sys.stdout.write('\n')
- with open(CERTFILE, 'rb') as f:
+ # Get this test file itself:
+ with open(__file__, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
- url = 'https://localhost:%d/%s' % (
- server.port, os.path.split(CERTFILE)[1])
+ url = f'https://localhost:{server.port}/test_ssl.py'
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
diff --git a/Lib/test/test_stable_abi_ctypes.py b/Lib/test/test_stable_abi_ctypes.py
index 1f3cf612c18f6d..94f817f8e1d159 100644
--- a/Lib/test/test_stable_abi_ctypes.py
+++ b/Lib/test/test_stable_abi_ctypes.py
@@ -405,6 +405,8 @@ def test_windows_feature_macros(self):
"PyMapping_GetOptionalItemString",
"PyMapping_HasKey",
"PyMapping_HasKeyString",
+ "PyMapping_HasKeyStringWithError",
+ "PyMapping_HasKeyWithError",
"PyMapping_Items",
"PyMapping_Keys",
"PyMapping_Length",
@@ -542,6 +544,8 @@ def test_windows_feature_macros(self):
"PyObject_GetTypeData",
"PyObject_HasAttr",
"PyObject_HasAttrString",
+ "PyObject_HasAttrStringWithError",
+ "PyObject_HasAttrWithError",
"PyObject_Hash",
"PyObject_HashNotImplemented",
"PyObject_Init",
diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py
index 23a3973305303d..f9b0ac2ad7b116 100644
--- a/Lib/test/test_statistics.py
+++ b/Lib/test/test_statistics.py
@@ -698,14 +698,6 @@ def test_check_all(self):
'missing name "%s" in __all__' % name)
-class DocTests(unittest.TestCase):
- @unittest.skipIf(sys.flags.optimize >= 2,
- "Docstrings are omitted with -OO and above")
- def test_doc_tests(self):
- failed, tried = doctest.testmod(statistics, optionflags=doctest.ELLIPSIS)
- self.assertGreater(tried, 0)
- self.assertEqual(failed, 0)
-
class StatisticsErrorTest(unittest.TestCase):
def test_has_exception(self):
errmsg = (
@@ -3145,6 +3137,7 @@ def tearDown(self):
def load_tests(loader, tests, ignore):
"""Used for doctest/unittest integration."""
tests.addTests(doctest.DocTestSuite())
+ tests.addTests(doctest.DocTestSuite(statistics))
return tests
diff --git a/Lib/test/test_str.py b/Lib/test/test_str.py
index 3ae2f45ef6bddc..814ef111c5bec8 100644
--- a/Lib/test/test_str.py
+++ b/Lib/test/test_str.py
@@ -812,16 +812,6 @@ def test_isidentifier(self):
self.assertFalse("©".isidentifier())
self.assertFalse("0".isidentifier())
- @support.cpython_only
- @support.requires_legacy_unicode_capi()
- @unittest.skipIf(_testcapi is None, 'need _testcapi module')
- def test_isidentifier_legacy(self):
- u = '𝖀𝖓𝖎𝖈𝖔𝖉𝖊'
- self.assertTrue(u.isidentifier())
- with warnings_helper.check_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- self.assertTrue(_testcapi.unicode_legacy_string(u).isidentifier())
-
def test_isprintable(self):
self.assertTrue("".isprintable())
self.assertTrue(" ".isprintable())
@@ -2489,26 +2479,6 @@ def test_getnewargs(self):
self.assertEqual(args[0], text)
self.assertEqual(len(args), 1)
- @support.cpython_only
- @support.requires_legacy_unicode_capi()
- @unittest.skipIf(_testcapi is None, 'need _testcapi module')
- def test_resize(self):
- for length in range(1, 100, 7):
- # generate a fresh string (refcount=1)
- text = 'a' * length + 'b'
-
- # fill wstr internal field
- with self.assertWarns(DeprecationWarning):
- abc = _testcapi.getargs_u(text)
- self.assertEqual(abc, text)
-
- # resize text: wstr field must be cleared and then recomputed
- text += 'c'
- with self.assertWarns(DeprecationWarning):
- abcdef = _testcapi.getargs_u(text)
- self.assertNotEqual(abc, abcdef)
- self.assertEqual(abcdef, text)
-
def test_compare(self):
# Issue #17615
N = 10
diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py
index 0b9e9e16f55d7e..d95ef72b0da47a 100644
--- a/Lib/test/test_subprocess.py
+++ b/Lib/test/test_subprocess.py
@@ -269,6 +269,7 @@ def test_check_output_stdin_with_input_arg(self):
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
+ @support.requires_resource('walltime')
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
@@ -1643,6 +1644,7 @@ def test_check_output_stdin_with_input_arg(self):
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
+ @support.requires_resource('walltime')
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py
index 86d26b7e8df4d0..5b57c5fd54a68d 100644
--- a/Lib/test/test_support.py
+++ b/Lib/test/test_support.py
@@ -685,6 +685,121 @@ def test_has_strftime_extensions(self):
else:
self.assertTrue(support.has_strftime_extensions)
+ def test_get_recursion_depth(self):
+ # test support.get_recursion_depth()
+ code = textwrap.dedent("""
+ from test import support
+ import sys
+
+ def check(cond):
+ if not cond:
+ raise AssertionError("test failed")
+
+ # depth 1
+ check(support.get_recursion_depth() == 1)
+
+ # depth 2
+ def test_func():
+ check(support.get_recursion_depth() == 2)
+ test_func()
+
+ def test_recursive(depth, limit):
+ if depth >= limit:
+ # cannot call get_recursion_depth() at this depth,
+ # it can raise RecursionError
+ return
+ get_depth = support.get_recursion_depth()
+ print(f"test_recursive: {depth}/{limit}: "
+ f"get_recursion_depth() says {get_depth}")
+ check(get_depth == depth)
+ test_recursive(depth + 1, limit)
+
+ # depth up to 25
+ with support.infinite_recursion(max_depth=25):
+ limit = sys.getrecursionlimit()
+ print(f"test with sys.getrecursionlimit()={limit}")
+ test_recursive(2, limit)
+
+ # depth up to 500
+ with support.infinite_recursion(max_depth=500):
+ limit = sys.getrecursionlimit()
+ print(f"test with sys.getrecursionlimit()={limit}")
+ test_recursive(2, limit)
+ """)
+ script_helper.assert_python_ok("-c", code)
+
+ def test_recursion(self):
+ # Test infinite_recursion() and get_recursion_available() functions.
+ def recursive_function(depth):
+ if depth:
+ recursive_function(depth - 1)
+
+ for max_depth in (5, 25, 250):
+ with support.infinite_recursion(max_depth):
+ available = support.get_recursion_available()
+
+ # Recursion up to 'available' additional frames should be OK.
+ recursive_function(available)
+
+ # Recursion up to 'available+1' additional frames must raise
+ # RecursionError. Avoid self.assertRaises(RecursionError) which
+ # can consume more than 3 frames and so raises RecursionError.
+ try:
+ recursive_function(available + 1)
+ except RecursionError:
+ pass
+ else:
+ self.fail("RecursionError was not raised")
+
+ # Test the bare minimumum: max_depth=3
+ with support.infinite_recursion(3):
+ try:
+ recursive_function(3)
+ except RecursionError:
+ pass
+ else:
+ self.fail("RecursionError was not raised")
+
+ def test_parse_memlimit(self):
+ parse = support._parse_memlimit
+ KiB = 1024
+ MiB = KiB * 1024
+ GiB = MiB * 1024
+ TiB = GiB * 1024
+ self.assertEqual(parse('0k'), 0)
+ self.assertEqual(parse('3k'), 3 * KiB)
+ self.assertEqual(parse('2.4m'), int(2.4 * MiB))
+ self.assertEqual(parse('4g'), int(4 * GiB))
+ self.assertEqual(parse('1t'), TiB)
+
+ for limit in ('', '3', '3.5.10k', '10x'):
+ with self.subTest(limit=limit):
+ with self.assertRaises(ValueError):
+ parse(limit)
+
+ def test_set_memlimit(self):
+ _4GiB = 4 * 1024 ** 3
+ TiB = 1024 ** 4
+ old_max_memuse = support.max_memuse
+ old_real_max_memuse = support.real_max_memuse
+ try:
+ if sys.maxsize > 2**32:
+ support.set_memlimit('4g')
+ self.assertEqual(support.max_memuse, _4GiB)
+ self.assertEqual(support.real_max_memuse, _4GiB)
+
+ big = 2**100 // TiB
+ support.set_memlimit(f'{big}t')
+ self.assertEqual(support.max_memuse, sys.maxsize)
+ self.assertEqual(support.real_max_memuse, big * TiB)
+ else:
+ support.set_memlimit('4g')
+ self.assertEqual(support.max_memuse, sys.maxsize)
+ self.assertEqual(support.real_max_memuse, _4GiB)
+ finally:
+ support.max_memuse = old_max_memuse
+ support.real_max_memuse = old_real_max_memuse
+
# XXX -follows a list of untested API
# make_legacy_pyc
# is_resource_enabled
@@ -696,7 +811,6 @@ def test_has_strftime_extensions(self):
# EnvironmentVarGuard
# transient_internet
# run_with_locale
- # set_memlimit
# bigmemtest
# precisionbigmemtest
# bigaddrspacetest
diff --git a/Lib/test/test_symtable.py b/Lib/test/test_symtable.py
index 25714aecda3a15..36cb7b3f242e4c 100644
--- a/Lib/test/test_symtable.py
+++ b/Lib/test/test_symtable.py
@@ -251,6 +251,10 @@ def test_symtable_repr(self):
self.assertEqual(str(self.top), "")
self.assertEqual(str(self.spam), "")
+ def test_symtable_entry_repr(self):
+ expected = f""
+ self.assertEqual(repr(self.top._table), expected)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py
index d8b684c8a008f0..f4948ceec66226 100644
--- a/Lib/test/test_sys.py
+++ b/Lib/test/test_sys.py
@@ -279,20 +279,29 @@ def test_switchinterval(self):
finally:
sys.setswitchinterval(orig)
- def test_recursionlimit(self):
+ def test_getrecursionlimit(self):
+ limit = sys.getrecursionlimit()
+ self.assertIsInstance(limit, int)
+ self.assertGreater(limit, 1)
+
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
- oldlimit = sys.getrecursionlimit()
- self.assertRaises(TypeError, sys.setrecursionlimit)
- self.assertRaises(ValueError, sys.setrecursionlimit, -42)
- sys.setrecursionlimit(10000)
- self.assertEqual(sys.getrecursionlimit(), 10000)
- sys.setrecursionlimit(oldlimit)
+
+ def test_setrecursionlimit(self):
+ old_limit = sys.getrecursionlimit()
+ try:
+ sys.setrecursionlimit(10_005)
+ self.assertEqual(sys.getrecursionlimit(), 10_005)
+
+ self.assertRaises(TypeError, sys.setrecursionlimit)
+ self.assertRaises(ValueError, sys.setrecursionlimit, -42)
+ finally:
+ sys.setrecursionlimit(old_limit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
- oldlimit = sys.getrecursionlimit()
+ old_limit = sys.getrecursionlimit()
def f():
f()
try:
@@ -311,35 +320,31 @@ def f():
with self.assertRaises(RecursionError):
f()
finally:
- sys.setrecursionlimit(oldlimit)
+ sys.setrecursionlimit(old_limit)
@test.support.cpython_only
- def test_setrecursionlimit_recursion_depth(self):
+ def test_setrecursionlimit_to_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
- from _testinternalcapi import get_recursion_depth
-
- def set_recursion_limit_at_depth(depth, limit):
- recursion_depth = get_recursion_depth()
- if recursion_depth >= depth:
- with self.assertRaises(RecursionError) as cm:
- sys.setrecursionlimit(limit)
- self.assertRegex(str(cm.exception),
- "cannot set the recursion limit to [0-9]+ "
- "at the recursion depth [0-9]+: "
- "the limit is too low")
- else:
- set_recursion_limit_at_depth(depth, limit)
-
- oldlimit = sys.getrecursionlimit()
+ old_limit = sys.getrecursionlimit()
try:
- sys.setrecursionlimit(1000)
-
- for limit in (10, 25, 50, 75, 100, 150, 200):
- set_recursion_limit_at_depth(limit, limit)
+ depth = support.get_recursion_depth()
+ with self.subTest(limit=sys.getrecursionlimit(), depth=depth):
+ # depth + 1 is OK
+ sys.setrecursionlimit(depth + 1)
+
+ # reset the limit to be able to call self.assertRaises()
+ # context manager
+ sys.setrecursionlimit(old_limit)
+ with self.assertRaises(RecursionError) as cm:
+ sys.setrecursionlimit(depth)
+ self.assertRegex(str(cm.exception),
+ "cannot set the recursion limit to [0-9]+ "
+ "at the recursion depth [0-9]+: "
+ "the limit is too low")
finally:
- sys.setrecursionlimit(oldlimit)
+ sys.setrecursionlimit(old_limit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
@@ -1195,15 +1200,24 @@ class MyType:
get_objects = sys.getobjects(3, MyType)
self.assertEqual(len(get_objects), 3)
+ @unittest.skipUnless(hasattr(sys, '_stats_on'), 'need Py_STATS build')
+ def test_pystats(self):
+ # Call the functions, just check that they don't crash
+ # Cannot save/restore state.
+ sys._stats_on()
+ sys._stats_off()
+ sys._stats_clear()
+ sys._stats_dump()
+
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
- import _testcapi
+ import _testinternalcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
- _testcapi.write_unraisable_exc(exc, err_msg, obj)
+ _testinternalcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
diff --git a/Lib/test/test_sys_setprofile.py b/Lib/test/test_sys_setprofile.py
index 49e076c77d167a..34c70d6c8de0c4 100644
--- a/Lib/test/test_sys_setprofile.py
+++ b/Lib/test/test_sys_setprofile.py
@@ -439,7 +439,6 @@ def __del__(self):
sys.setprofile(foo)
self.assertEqual(sys.getprofile(), bar)
-
def test_same_object(self):
def foo(*args):
...
@@ -448,6 +447,18 @@ def foo(*args):
del foo
sys.setprofile(sys.getprofile())
+ def test_profile_after_trace_opcodes(self):
+ def f():
+ ...
+
+ sys._getframe().f_trace_opcodes = True
+ prev_trace = sys.gettrace()
+ sys.settrace(lambda *args: None)
+ f()
+ sys.settrace(prev_trace)
+ sys.setprofile(lambda *args: None)
+ f()
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_sys_settrace.py b/Lib/test/test_sys_settrace.py
index 7d38addaee413e..f02169602e4925 100644
--- a/Lib/test/test_sys_settrace.py
+++ b/Lib/test/test_sys_settrace.py
@@ -8,6 +8,7 @@
from functools import wraps
import asyncio
from test.support import import_helper
+import contextlib
support.requires_working_socket(module=True)
@@ -40,6 +41,20 @@ async def asynciter(iterable):
for x in iterable:
yield x
+def clean_asynciter(test):
+ @wraps(test)
+ async def wrapper(*args, **kwargs):
+ cleanups = []
+ def wrapped_asynciter(iterable):
+ it = asynciter(iterable)
+ cleanups.append(it.aclose)
+ return it
+ try:
+ return await test(*args, **kwargs, asynciter=wrapped_asynciter)
+ finally:
+ while cleanups:
+ await cleanups.pop()()
+ return wrapper
# A very basic example. If this fails, we're in deep trouble.
def basic():
@@ -302,6 +317,13 @@ def generator_example():
[(5, 'line'), (5, 'return')])
+def lineno_matches_lasti(frame):
+ last_line = None
+ for start, end, line in frame.f_code.co_lines():
+ if start <= frame.f_lasti < end:
+ last_line = line
+ return last_line == frame.f_lineno
+
class Tracer:
def __init__(self, trace_line_events=None, trace_opcode_events=None):
self.trace_line_events = trace_line_events
@@ -315,6 +337,7 @@ def _reconfigure_frame(self, frame):
frame.f_trace_opcodes = self.trace_opcode_events
def trace(self, frame, event, arg):
+ assert lineno_matches_lasti(frame)
self._reconfigure_frame(frame)
self.events.append((frame.f_lineno, event))
return self.trace
@@ -906,6 +929,35 @@ def func():
(6, 'line'),
(6, 'return')])
+ def test_finally_with_conditional(self):
+
+ # See gh-105658
+ condition = True
+ def func():
+ try:
+ try:
+ raise Exception
+ finally:
+ if condition:
+ result = 1
+ result = 2
+ except:
+ result = 3
+ return result
+
+ self.run_and_compare(func,
+ [(0, 'call'),
+ (1, 'line'),
+ (2, 'line'),
+ (3, 'line'),
+ (3, 'exception'),
+ (5, 'line'),
+ (6, 'line'),
+ (8, 'line'),
+ (9, 'line'),
+ (10, 'line'),
+ (10, 'return')])
+
def test_break_to_continue1(self):
def func():
@@ -1653,7 +1705,6 @@ def error_once(frame, event, arg):
except Exception as ex:
count = 0
tb = ex.__traceback__
- print(tb)
while tb:
if tb.tb_frame.f_code.co_name == "test_settrace_error":
count += 1
@@ -1876,6 +1927,7 @@ def __init__(self, function, jumpFrom, jumpTo, event='line',
def trace(self, frame, event, arg):
if self.done:
return
+ assert lineno_matches_lasti(frame)
# frame.f_code.co_firstlineno is the first line of the decorator when
# 'function' is decorated and the decorator may be written using
# multiple physical lines when it is too long. Use the first line
@@ -1922,6 +1974,8 @@ def no_jump_without_trace_function():
class JumpTestCase(unittest.TestCase):
+ unbound_locals = r"assigning None to [0-9]+ unbound local"
+
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(None)
@@ -1933,33 +1987,47 @@ def compare_jump_output(self, expected, received):
"Received: " + repr(received))
def run_test(self, func, jumpFrom, jumpTo, expected, error=None,
- event='line', decorated=False):
- tracer = JumpTracer(func, jumpFrom, jumpTo, event, decorated)
+ event='line', decorated=False, warning=None):
+ wrapped = func
+ while hasattr(wrapped, '__wrapped__'):
+ wrapped = wrapped.__wrapped__
+
+ tracer = JumpTracer(wrapped, jumpFrom, jumpTo, event, decorated)
sys.settrace(tracer.trace)
output = []
- if error is None:
+
+ with contextlib.ExitStack() as stack:
+ if error is not None:
+ stack.enter_context(self.assertRaisesRegex(*error))
+ if warning is not None:
+ stack.enter_context(self.assertWarnsRegex(*warning))
func(output)
- else:
- with self.assertRaisesRegex(*error):
- func(output)
+
sys.settrace(None)
self.compare_jump_output(expected, output)
def run_async_test(self, func, jumpFrom, jumpTo, expected, error=None,
- event='line', decorated=False):
- tracer = JumpTracer(func, jumpFrom, jumpTo, event, decorated)
+ event='line', decorated=False, warning=None):
+ wrapped = func
+ while hasattr(wrapped, '__wrapped__'):
+ wrapped = wrapped.__wrapped__
+
+ tracer = JumpTracer(wrapped, jumpFrom, jumpTo, event, decorated)
sys.settrace(tracer.trace)
output = []
- if error is None:
+
+ with contextlib.ExitStack() as stack:
+ if error is not None:
+ stack.enter_context(self.assertRaisesRegex(*error))
+ if warning is not None:
+ stack.enter_context(self.assertWarnsRegex(*warning))
asyncio.run(func(output))
- else:
- with self.assertRaisesRegex(*error):
- asyncio.run(func(output))
+
sys.settrace(None)
asyncio.set_event_loop_policy(None)
self.compare_jump_output(expected, output)
- def jump_test(jumpFrom, jumpTo, expected, error=None, event='line'):
+ def jump_test(jumpFrom, jumpTo, expected, error=None, event='line', warning=None):
"""Decorator that creates a test that makes a jump
from one place to another in the following code.
"""
@@ -1967,11 +2035,11 @@ def decorator(func):
@wraps(func)
def test(self):
self.run_test(func, jumpFrom, jumpTo, expected,
- error=error, event=event, decorated=True)
+ error=error, event=event, decorated=True, warning=warning)
return test
return decorator
- def async_jump_test(jumpFrom, jumpTo, expected, error=None, event='line'):
+ def async_jump_test(jumpFrom, jumpTo, expected, error=None, event='line', warning=None):
"""Decorator that creates a test that makes a jump
from one place to another in the following asynchronous code.
"""
@@ -1979,7 +2047,7 @@ def decorator(func):
@wraps(func)
def test(self):
self.run_async_test(func, jumpFrom, jumpTo, expected,
- error=error, event=event, decorated=True)
+ error=error, event=event, decorated=True, warning=warning)
return test
return decorator
@@ -1996,7 +2064,7 @@ def test_jump_simple_backwards(output):
output.append(1)
output.append(2)
- @jump_test(3, 5, [2, 5])
+ @jump_test(3, 5, [2, 5], warning=(RuntimeWarning, unbound_locals))
def test_jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
@@ -2015,7 +2083,8 @@ def test_jump_out_of_block_backwards(output):
output.append(7)
@async_jump_test(4, 5, [3, 5])
- async def test_jump_out_of_async_for_block_forwards(output):
+ @clean_asynciter
+ async def test_jump_out_of_async_for_block_forwards(output, asynciter):
for i in [1]:
async for i in asynciter([1, 2]):
output.append(3)
@@ -2023,7 +2092,8 @@ async def test_jump_out_of_async_for_block_forwards(output):
output.append(5)
@async_jump_test(5, 2, [2, 4, 2, 4, 5, 6])
- async def test_jump_out_of_async_for_block_backwards(output):
+ @clean_asynciter
+ async def test_jump_out_of_async_for_block_backwards(output, asynciter):
for i in [1]:
output.append(2)
async for i in asynciter([1]):
@@ -2082,7 +2152,7 @@ def test_jump_in_nested_finally_3(output):
output.append(11)
output.append(12)
- @jump_test(5, 11, [2, 4], (ValueError, 'exception'))
+ @jump_test(5, 11, [2, 4], (ValueError, 'comes after the current code block'))
def test_no_jump_over_return_try_finally_in_finally_block(output):
try:
output.append(2)
@@ -2210,7 +2280,7 @@ def test_jump_within_except_block(output):
output.append(6)
output.append(7)
- @jump_test(6, 1, [1, 5, 1, 5])
+ @jump_test(6, 1, [1, 5, 1, 5], warning=(RuntimeWarning, unbound_locals))
def test_jump_over_try_except(output):
output.append(1)
try:
@@ -2306,7 +2376,7 @@ def test_jump_out_of_complex_nested_blocks(output):
output.append(11)
output.append(12)
- @jump_test(3, 5, [1, 2, 5])
+ @jump_test(3, 5, [1, 2, 5], warning=(RuntimeWarning, unbound_locals))
def test_jump_out_of_with_assignment(output):
output.append(1)
with tracecontext(output, 2) \
@@ -2314,7 +2384,7 @@ def test_jump_out_of_with_assignment(output):
output.append(4)
output.append(5)
- @async_jump_test(3, 5, [1, 2, 5])
+ @async_jump_test(3, 5, [1, 2, 5], warning=(RuntimeWarning, unbound_locals))
async def test_jump_out_of_async_with_assignment(output):
output.append(1)
async with asynctracecontext(output, 2) \
@@ -2350,7 +2420,7 @@ def test_jump_over_break_in_try_finally_block(output):
break
output.append(13)
- @jump_test(1, 7, [7, 8])
+ @jump_test(1, 7, [7, 8], warning=(RuntimeWarning, unbound_locals))
def test_jump_over_for_block_before_else(output):
output.append(1)
if not output: # always false
@@ -2361,7 +2431,7 @@ def test_jump_over_for_block_before_else(output):
output.append(7)
output.append(8)
- @async_jump_test(1, 7, [7, 8])
+ @async_jump_test(1, 7, [7, 8], warning=(RuntimeWarning, unbound_locals))
async def test_jump_over_async_for_block_before_else(output):
output.append(1)
if not output: # always false
@@ -2436,6 +2506,7 @@ def test_no_jump_backwards_into_for_block(output):
output.append(2)
output.append(3)
+
@async_jump_test(3, 2, [2, 2], (ValueError, "can't jump into the body of a for loop"))
async def test_no_jump_backwards_into_async_for_block(output):
async for i in asynciter([1, 2]):
@@ -2501,7 +2572,7 @@ def test_jump_backwards_into_try_except_block(output):
output.append(6)
# 'except' with a variable creates an implicit finally block
- @jump_test(5, 7, [4, 7, 8])
+ @jump_test(5, 7, [4, 7, 8], warning=(RuntimeWarning, unbound_locals))
def test_jump_between_except_blocks_2(output):
try:
1/0
@@ -2664,7 +2735,7 @@ def test_large_function(self):
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
- self.run_test(f, 2, 1007, [0])
+ self.run_test(f, 2, 1007, [0], warning=(RuntimeWarning, self.unbound_locals))
def test_jump_to_firstlineno(self):
# This tests that PDB can jump back to the first line in a
@@ -2714,7 +2785,7 @@ def gen():
next(gen())
output.append(5)
- @jump_test(2, 3, [1, 3])
+ @jump_test(2, 3, [1, 3], warning=(RuntimeWarning, unbound_locals))
def test_jump_forward_over_listcomp(output):
output.append(1)
x = [i for i in range(10)]
@@ -2722,13 +2793,13 @@ def test_jump_forward_over_listcomp(output):
# checking for segfaults.
# See https://github.com/python/cpython/issues/92311
- @jump_test(3, 1, [])
+ @jump_test(3, 1, [], warning=(RuntimeWarning, unbound_locals))
def test_jump_backward_over_listcomp(output):
a = 1
x = [i for i in range(10)]
c = 3
- @jump_test(8, 2, [2, 7, 2])
+ @jump_test(8, 2, [2, 7, 2], warning=(RuntimeWarning, unbound_locals))
def test_jump_backward_over_listcomp_v2(output):
flag = False
output.append(2)
@@ -2739,19 +2810,19 @@ def test_jump_backward_over_listcomp_v2(output):
output.append(7)
output.append(8)
- @async_jump_test(2, 3, [1, 3])
+ @async_jump_test(2, 3, [1, 3], warning=(RuntimeWarning, unbound_locals))
async def test_jump_forward_over_async_listcomp(output):
output.append(1)
x = [i async for i in asynciter(range(10))]
output.append(3)
- @async_jump_test(3, 1, [])
+ @async_jump_test(3, 1, [], warning=(RuntimeWarning, unbound_locals))
async def test_jump_backward_over_async_listcomp(output):
a = 1
x = [i async for i in asynciter(range(10))]
c = 3
- @async_jump_test(8, 2, [2, 7, 2])
+ @async_jump_test(8, 2, [2, 7, 2], warning=(RuntimeWarning, unbound_locals))
async def test_jump_backward_over_async_listcomp_v2(output):
flag = False
output.append(2)
@@ -2820,13 +2891,13 @@ def test_jump_with_null_on_stack_load_attr(output):
)
output.append(15)
- @jump_test(2, 3, [1, 3])
+ @jump_test(2, 3, [1, 3], warning=(RuntimeWarning, unbound_locals))
def test_jump_extended_args_unpack_ex_simple(output):
output.append(1)
_, *_, _ = output.append(2) or "Spam"
output.append(3)
- @jump_test(3, 4, [1, 4, 4, 5])
+ @jump_test(3, 4, [1, 4, 4, 5], warning=(RuntimeWarning, unbound_locals))
def test_jump_extended_args_unpack_ex_tricky(output):
output.append(1)
(
@@ -2848,9 +2919,9 @@ def test_jump_extended_args_for_iter(self):
namespace = {}
exec("\n".join(source), namespace)
f = namespace["f"]
- self.run_test(f, 2, 100_000, [1, 100_000])
+ self.run_test(f, 2, 100_000, [1, 100_000], warning=(RuntimeWarning, self.unbound_locals))
- @jump_test(2, 3, [1, 3])
+ @jump_test(2, 3, [1, 3], warning=(RuntimeWarning, unbound_locals))
def test_jump_or_pop(output):
output.append(1)
_ = output.append(2) and "Spam"
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index 0d6ca4315cfda4..cc26da05daeafc 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -1,3 +1,4 @@
+import errno
import sys
import os
import io
@@ -2587,16 +2588,17 @@ def tarfilecmd_failure(self, *args):
return script_helper.assert_python_failure('-m', 'tarfile', *args)
def make_simple_tarfile(self, tar_name):
- files = [support.findfile('tokenize_tests.txt'),
+ files = [support.findfile('tokenize_tests.txt',
+ subdir='tokenizedata'),
support.findfile('tokenize_tests-no-coding-cookie-'
- 'and-utf8-bom-sig-only.txt')]
+ 'and-utf8-bom-sig-only.txt',
+ subdir='tokenizedata')]
self.addCleanup(os_helper.unlink, tar_name)
with tarfile.open(tar_name, 'w') as tf:
for tardata in files:
tf.add(tardata, arcname=os.path.basename(tardata))
def make_evil_tarfile(self, tar_name):
- files = [support.findfile('tokenize_tests.txt')]
self.addCleanup(os_helper.unlink, tar_name)
with tarfile.open(tar_name, 'w') as tf:
benign = tarfile.TarInfo('benign')
@@ -2677,9 +2679,11 @@ def test_list_command_invalid_file(self):
self.assertEqual(rc, 1)
def test_create_command(self):
- files = [support.findfile('tokenize_tests.txt'),
+ files = [support.findfile('tokenize_tests.txt',
+ subdir='tokenizedata'),
support.findfile('tokenize_tests-no-coding-cookie-'
- 'and-utf8-bom-sig-only.txt')]
+ 'and-utf8-bom-sig-only.txt',
+ subdir='tokenizedata')]
for opt in '-c', '--create':
try:
out = self.tarfilecmd(opt, tmpname, *files)
@@ -2690,9 +2694,11 @@ def test_create_command(self):
os_helper.unlink(tmpname)
def test_create_command_verbose(self):
- files = [support.findfile('tokenize_tests.txt'),
+ files = [support.findfile('tokenize_tests.txt',
+ subdir='tokenizedata'),
support.findfile('tokenize_tests-no-coding-cookie-'
- 'and-utf8-bom-sig-only.txt')]
+ 'and-utf8-bom-sig-only.txt',
+ subdir='tokenizedata')]
for opt in '-v', '--verbose':
try:
out = self.tarfilecmd(opt, '-c', tmpname, *files,
@@ -2704,7 +2710,7 @@ def test_create_command_verbose(self):
os_helper.unlink(tmpname)
def test_create_command_dotless_filename(self):
- files = [support.findfile('tokenize_tests.txt')]
+ files = [support.findfile('tokenize_tests.txt', subdir='tokenizedata')]
try:
out = self.tarfilecmd('-c', dotlessname, *files)
self.assertEqual(out, b'')
@@ -2715,7 +2721,7 @@ def test_create_command_dotless_filename(self):
def test_create_command_dot_started_filename(self):
tar_name = os.path.join(TEMPDIR, ".testtar")
- files = [support.findfile('tokenize_tests.txt')]
+ files = [support.findfile('tokenize_tests.txt', subdir='tokenizedata')]
try:
out = self.tarfilecmd('-c', tar_name, *files)
self.assertEqual(out, b'')
@@ -2725,9 +2731,11 @@ def test_create_command_dot_started_filename(self):
os_helper.unlink(tar_name)
def test_create_command_compressed(self):
- files = [support.findfile('tokenize_tests.txt'),
+ files = [support.findfile('tokenize_tests.txt',
+ subdir='tokenizedata'),
support.findfile('tokenize_tests-no-coding-cookie-'
- 'and-utf8-bom-sig-only.txt')]
+ 'and-utf8-bom-sig-only.txt',
+ subdir='tokenizedata')]
for filetype in (GzipTest, Bz2Test, LzmaTest):
if not filetype.open:
continue
@@ -3808,34 +3816,55 @@ def test_modes(self):
arc.add('read_group_only', mode='?---r-----')
arc.add('no_bits', mode='?---------')
arc.add('dir/', mode='?---rwsrwt')
+ arc.add('dir_all_bits/', mode='?rwsrwsrwt')
- # On some systems, setting the sticky bit is a no-op.
- # Check if that's the case.
+ # On some systems, setting the uid, gid, and/or sticky bit is a no-ops.
+ # Check which bits we can set, so we can compare tarfile machinery to
+ # a simple chmod.
tmp_filename = os.path.join(TEMPDIR, "tmp.file")
with open(tmp_filename, 'w'):
pass
- os.chmod(tmp_filename, os.stat(tmp_filename).st_mode | stat.S_ISVTX)
- have_sticky_files = (os.stat(tmp_filename).st_mode & stat.S_ISVTX)
- os.unlink(tmp_filename)
+ try:
+ new_mode = (os.stat(tmp_filename).st_mode
+ | stat.S_ISVTX | stat.S_ISGID | stat.S_ISUID)
+ try:
+ os.chmod(tmp_filename, new_mode)
+ except OSError as exc:
+ if exc.errno == getattr(errno, "EFTYPE", 0):
+ # gh-108948: On FreeBSD, regular users cannot set
+ # the sticky bit.
+ self.skipTest("chmod() failed with EFTYPE: "
+ "regular users cannot set sticky bit")
+ else:
+ raise
+
+ got_mode = os.stat(tmp_filename).st_mode
+ _t_file = 't' if (got_mode & stat.S_ISVTX) else 'x'
+ _suid_file = 's' if (got_mode & stat.S_ISUID) else 'x'
+ _sgid_file = 's' if (got_mode & stat.S_ISGID) else 'x'
+ finally:
+ os.unlink(tmp_filename)
os.mkdir(tmp_filename)
- os.chmod(tmp_filename, os.stat(tmp_filename).st_mode | stat.S_ISVTX)
- have_sticky_dirs = (os.stat(tmp_filename).st_mode & stat.S_ISVTX)
+ new_mode = (os.stat(tmp_filename).st_mode
+ | stat.S_ISVTX | stat.S_ISGID | stat.S_ISUID)
+ os.chmod(tmp_filename, new_mode)
+ got_mode = os.stat(tmp_filename).st_mode
+ _t_dir = 't' if (got_mode & stat.S_ISVTX) else 'x'
+ _suid_dir = 's' if (got_mode & stat.S_ISUID) else 'x'
+ _sgid_dir = 's' if (got_mode & stat.S_ISGID) else 'x'
os.rmdir(tmp_filename)
with self.check_context(arc.open(), 'fully_trusted'):
- if have_sticky_files:
- self.expect_file('all_bits', mode='?rwsrwsrwt')
- else:
- self.expect_file('all_bits', mode='?rwsrwsrwx')
+ self.expect_file('all_bits',
+ mode=f'?rw{_suid_file}rw{_sgid_file}rw{_t_file}')
self.expect_file('perm_bits', mode='?rwxrwxrwx')
self.expect_file('exec_group_other', mode='?rw-rwxrwx')
self.expect_file('read_group_only', mode='?---r-----')
self.expect_file('no_bits', mode='?---------')
- if have_sticky_dirs:
- self.expect_file('dir/', mode='?---rwsrwt')
- else:
- self.expect_file('dir/', mode='?---rwsrwx')
+ self.expect_file('dir/', mode=f'?---rw{_sgid_dir}rw{_t_dir}')
+ self.expect_file('dir_all_bits/',
+ mode=f'?rw{_suid_dir}rw{_sgid_dir}rw{_t_dir}')
with self.check_context(arc.open(), 'tar'):
self.expect_file('all_bits', mode='?rwxr-xr-x')
@@ -3844,6 +3873,7 @@ def test_modes(self):
self.expect_file('read_group_only', mode='?---r-----')
self.expect_file('no_bits', mode='?---------')
self.expect_file('dir/', mode='?---r-xr-x')
+ self.expect_file('dir_all_bits/', mode='?rwxr-xr-x')
with self.check_context(arc.open(), 'data'):
normal_dir_mode = stat.filemode(stat.S_IMODE(
@@ -3854,6 +3884,7 @@ def test_modes(self):
self.expect_file('read_group_only', mode='?rw-r-----')
self.expect_file('no_bits', mode='?rw-------')
self.expect_file('dir/', mode=normal_dir_mode)
+ self.expect_file('dir_all_bits/', mode=normal_dir_mode)
def test_pipe(self):
# Test handling of a special file
diff --git a/Lib/test/test_tempfile.py b/Lib/test/test_tempfile.py
index db08fb1c7f2a42..1673507e2f7c91 100644
--- a/Lib/test/test_tempfile.py
+++ b/Lib/test/test_tempfile.py
@@ -1834,9 +1834,25 @@ def test_modes(self):
d.cleanup()
self.assertFalse(os.path.exists(d.name))
- @unittest.skipUnless(hasattr(os, 'chflags'), 'requires os.lchflags')
+ @unittest.skipUnless(hasattr(os, 'chflags'), 'requires os.chflags')
def test_flags(self):
flags = stat.UF_IMMUTABLE | stat.UF_NOUNLINK
+
+ # skip the test if these flags are not supported (ex: FreeBSD 13)
+ filename = os_helper.TESTFN
+ try:
+ open(filename, "w").close()
+ try:
+ os.chflags(filename, flags)
+ except OSError as exc:
+ # "OSError: [Errno 45] Operation not supported"
+ self.skipTest(f"chflags() doesn't support "
+ f"UF_IMMUTABLE|UF_NOUNLINK: {exc}")
+ else:
+ os.chflags(filename, 0)
+ finally:
+ os_helper.unlink(filename)
+
d = self.do_create(recurse=3, dirs=2, files=2)
with d:
# Change files and directories flags recursively.
diff --git a/Lib/test/test_timeout.py b/Lib/test/test_timeout.py
index 30e843a423a777..35ff56f1a5ee09 100644
--- a/Lib/test/test_timeout.py
+++ b/Lib/test/test_timeout.py
@@ -148,13 +148,12 @@ def setUp(self):
def tearDown(self):
self.sock.close()
- @unittest.skipIf(True, 'need to replace these hosts; see bpo-35518')
def testConnectTimeout(self):
# Testing connect timeout is tricky: we need to have IP connectivity
# to a host that silently drops our packets. We can't simulate this
# from Python because it's a function of the underlying TCP/IP stack.
- # So, the following Snakebite host has been defined:
- blackhole = resolve_address('blackhole.snakebite.net', 56666)
+ # So, the following port on the pythontest.net host has been defined:
+ blackhole = resolve_address('pythontest.net', 56666)
# Blackhole has been configured to silently drop any incoming packets.
# No RSTs (for TCP) or ICMP UNREACH (for UDP/ICMP) will be sent back
@@ -166,7 +165,7 @@ def testConnectTimeout(self):
# to firewalling or general network configuration. In order to improve
# our confidence in testing the blackhole, a corresponding 'whitehole'
# has also been set up using one port higher:
- whitehole = resolve_address('whitehole.snakebite.net', 56667)
+ whitehole = resolve_address('pythontest.net', 56667)
# This address has been configured to immediately drop any incoming
# packets as well, but it does it respectfully with regards to the
@@ -180,20 +179,15 @@ def testConnectTimeout(self):
# timeframe).
# For the records, the whitehole/blackhole configuration has been set
- # up using the 'pf' firewall (available on BSDs), using the following:
+ # up using the 'iptables' firewall, using the following rules:
#
- # ext_if="bge0"
- #
- # blackhole_ip="35.8.247.6"
- # whitehole_ip="35.8.247.6"
- # blackhole_port="56666"
- # whitehole_port="56667"
- #
- # block return in log quick on $ext_if proto { tcp udp } \
- # from any to $whitehole_ip port $whitehole_port
- # block drop in log quick on $ext_if proto { tcp udp } \
- # from any to $blackhole_ip port $blackhole_port
+ # -A INPUT -p tcp --destination-port 56666 -j DROP
+ # -A INPUT -p udp --destination-port 56666 -j DROP
+ # -A INPUT -p tcp --destination-port 56667 -j REJECT
+ # -A INPUT -p udp --destination-port 56667 -j REJECT
#
+ # See https://github.com/python/psf-salt/blob/main/pillar/base/firewall/snakebite.sls
+ # for the current configuration.
skip = True
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
diff --git a/Lib/test/test_tkinter/support.py b/Lib/test/test_tkinter/support.py
index 10e64bf40a4afa..a37705f0ae6feb 100644
--- a/Lib/test/test_tkinter/support.py
+++ b/Lib/test/test_tkinter/support.py
@@ -1,6 +1,5 @@
import functools
import tkinter
-import unittest
class AbstractTkTest:
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 7863e27fccd972..94fb6d933de114 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1200,7 +1200,7 @@ class TestTokenizerAdheresToPep0263(TestCase):
"""
def _testFile(self, filename):
- path = os.path.join(os.path.dirname(__file__), filename)
+ path = os.path.join(os.path.dirname(__file__), 'tokenizedata', filename)
with open(path, 'rb') as f:
TestRoundtrip.check_roundtrip(self, f)
@@ -1794,7 +1794,7 @@ def test_roundtrip(self):
self.check_roundtrip("if x == 1 : \n"
" print(x)\n")
- fn = support.findfile("tokenize_tests.txt")
+ fn = support.findfile("tokenize_tests.txt", subdir="tokenizedata")
with open(fn, 'rb') as f:
self.check_roundtrip(f)
self.check_roundtrip("if x == 1:\n"
@@ -1849,8 +1849,7 @@ def test_random_files(self):
# pass the '-ucpu' option to process the full directory.
import glob, random
- fn = support.findfile("tokenize_tests.txt")
- tempdir = os.path.dirname(fn) or os.curdir
+ tempdir = os.path.dirname(__file__) or os.curdir
testfiles = glob.glob(os.path.join(glob.escape(tempdir), "test*.py"))
# Tokenize is broken on test_pep3131.py because regular expressions are
@@ -1860,7 +1859,7 @@ def test_random_files(self):
testfiles.remove(os.path.join(tempdir, "test_unicode_identifiers.py"))
- # TODO: Remove this once we can unparse PEP 701 syntax
+ # TODO: Remove this once we can untokenize PEP 701 syntax
testfiles.remove(os.path.join(tempdir, "test_fstring.py"))
for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'):
diff --git a/Lib/test/test_tomllib/test_misc.py b/Lib/test/test_tomllib/test_misc.py
index a477a219fd9ebd..9e677a337a2835 100644
--- a/Lib/test/test_tomllib/test_misc.py
+++ b/Lib/test/test_tomllib/test_misc.py
@@ -9,6 +9,7 @@
import sys
import tempfile
import unittest
+from test import support
from . import tomllib
@@ -92,13 +93,23 @@ def test_deepcopy(self):
self.assertEqual(obj_copy, expected_obj)
def test_inline_array_recursion_limit(self):
- # 465 with default recursion limit
- nest_count = int(sys.getrecursionlimit() * 0.465)
- recursive_array_toml = "arr = " + nest_count * "[" + nest_count * "]"
- tomllib.loads(recursive_array_toml)
+ with support.infinite_recursion(max_depth=100):
+ available = support.get_recursion_available()
+ nest_count = (available // 2) - 2
+ # Add details if the test fails
+ with self.subTest(limit=sys.getrecursionlimit(),
+ available=available,
+ nest_count=nest_count):
+ recursive_array_toml = "arr = " + nest_count * "[" + nest_count * "]"
+ tomllib.loads(recursive_array_toml)
def test_inline_table_recursion_limit(self):
- # 310 with default recursion limit
- nest_count = int(sys.getrecursionlimit() * 0.31)
- recursive_table_toml = nest_count * "key = {" + nest_count * "}"
- tomllib.loads(recursive_table_toml)
+ with support.infinite_recursion(max_depth=100):
+ available = support.get_recursion_available()
+ nest_count = (available // 3) - 1
+ # Add details if the test fails
+ with self.subTest(limit=sys.getrecursionlimit(),
+ available=available,
+ nest_count=nest_count):
+ recursive_table_toml = nest_count * "key = {" + nest_count * "}"
+ tomllib.loads(recursive_table_toml)
diff --git a/Lib/test/test_tools/test_freeze.py b/Lib/test/test_tools/test_freeze.py
index 922e74b441457a..671ec2961e7f8f 100644
--- a/Lib/test/test_tools/test_freeze.py
+++ b/Lib/test/test_tools/test_freeze.py
@@ -15,6 +15,10 @@
@support.requires_zlib()
@unittest.skipIf(sys.platform.startswith('win'), 'not supported on Windows')
@support.skip_if_buildbot('not all buildbots have enough space')
+# gh-103053: Skip test if Python is built with Profile Guided Optimization
+# (PGO), since the test is just too slow in this case.
+@unittest.skipIf(support.check_cflags_pgo(),
+ 'test is too slow with PGO')
class TestFreeze(unittest.TestCase):
@support.requires_resource('cpu') # Building Python is slow
diff --git a/Lib/test/test_tools/test_reindent.py b/Lib/test/test_tools/test_reindent.py
index 3b0c793a38e4da..64e31c2b7703c0 100644
--- a/Lib/test/test_tools/test_reindent.py
+++ b/Lib/test/test_tools/test_reindent.py
@@ -25,7 +25,7 @@ def test_help(self):
self.assertGreater(err, b'')
def test_reindent_file_with_bad_encoding(self):
- bad_coding_path = findfile('bad_coding.py')
+ bad_coding_path = findfile('bad_coding.py', subdir='tokenizedata')
rc, out, err = assert_python_ok(self.script, '-r', bad_coding_path)
self.assertEqual(out, b'')
self.assertNotEqual(err, b'')
diff --git a/Lib/test/test_trace.py b/Lib/test/test_trace.py
index d1ef005a4314ed..c1e289bcaff9e5 100644
--- a/Lib/test/test_trace.py
+++ b/Lib/test/test_trace.py
@@ -360,9 +360,14 @@ def tearDown(self):
rmtree(TESTFN)
unlink(TESTFN)
- def _coverage(self, tracer,
- cmd='import test.support, test.test_pprint;'
- 'test.support.run_unittest(test.test_pprint.QueryTestCase)'):
+ DEFAULT_SCRIPT = '''if True:
+ import unittest
+ from test.test_pprint import QueryTestCase
+ loader = unittest.TestLoader()
+ tests = loader.loadTestsFromTestCase(QueryTestCase)
+ tests(unittest.TestResult())
+ '''
+ def _coverage(self, tracer, cmd=DEFAULT_SCRIPT):
tracer.run(cmd)
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=TESTFN)
diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py
index 316ade2171e94f..aa8405bd25d120 100644
--- a/Lib/test/test_traceback.py
+++ b/Lib/test/test_traceback.py
@@ -596,6 +596,24 @@ def f_with_binary_operator():
result_lines = self.get_exception(f_with_binary_operator)
self.assertEqual(result_lines, expected_error.splitlines())
+ def test_caret_for_binary_operators_with_spaces_and_parenthesis(self):
+ def f_with_binary_operator():
+ a = 1
+ b = ""
+ return ( a ) + b
+
+ lineno_f = f_with_binary_operator.__code__.co_firstlineno
+ expected_error = (
+ 'Traceback (most recent call last):\n'
+ f' File "{__file__}", line {self.callable_line}, in get_exception\n'
+ ' callable()\n'
+ f' File "{__file__}", line {lineno_f+3}, in f_with_binary_operator\n'
+ ' return ( a ) + b\n'
+ ' ~~~~~~~~~~^~~\n'
+ )
+ result_lines = self.get_exception(f_with_binary_operator)
+ self.assertEqual(result_lines, expected_error.splitlines())
+
def test_caret_for_subscript(self):
def f_with_subscript():
some_dict = {'x': {'y': None}}
@@ -630,6 +648,24 @@ def f_with_subscript():
result_lines = self.get_exception(f_with_subscript)
self.assertEqual(result_lines, expected_error.splitlines())
+ def test_caret_for_subscript_with_spaces_and_parenthesis(self):
+ def f_with_binary_operator():
+ a = []
+ b = c = 1
+ return b [ a ] + c
+
+ lineno_f = f_with_binary_operator.__code__.co_firstlineno
+ expected_error = (
+ 'Traceback (most recent call last):\n'
+ f' File "{__file__}", line {self.callable_line}, in get_exception\n'
+ ' callable()\n'
+ f' File "{__file__}", line {lineno_f+3}, in f_with_binary_operator\n'
+ ' return b [ a ] + c\n'
+ ' ~~~~~~^^^^^^^^^\n'
+ )
+ result_lines = self.get_exception(f_with_binary_operator)
+ self.assertEqual(result_lines, expected_error.splitlines())
+
def test_traceback_specialization_with_syntax_error(self):
bytecode = compile("1 / 0 / 1 / 2\n", TESTFN, "exec")
@@ -1563,27 +1599,28 @@ def __repr__(self):
err_msg = "b'please do not show me as numbers'"
self.assertEqual(self.get_report(e), vanilla + err_msg + '\n')
- def test_exception_with_note_with_multiple_notes(self):
- e = ValueError(42)
- vanilla = self.get_report(e)
+ def test_exception_with_multiple_notes(self):
+ for e in [ValueError(42), SyntaxError('bad syntax')]:
+ with self.subTest(e=e):
+ vanilla = self.get_report(e)
- e.add_note('Note 1')
- e.add_note('Note 2')
- e.add_note('Note 3')
+ e.add_note('Note 1')
+ e.add_note('Note 2')
+ e.add_note('Note 3')
- self.assertEqual(
- self.get_report(e),
- vanilla + 'Note 1\n' + 'Note 2\n' + 'Note 3\n')
+ self.assertEqual(
+ self.get_report(e),
+ vanilla + 'Note 1\n' + 'Note 2\n' + 'Note 3\n')
- del e.__notes__
- e.add_note('Note 4')
- del e.__notes__
- e.add_note('Note 5')
- e.add_note('Note 6')
+ del e.__notes__
+ e.add_note('Note 4')
+ del e.__notes__
+ e.add_note('Note 5')
+ e.add_note('Note 6')
- self.assertEqual(
- self.get_report(e),
- vanilla + 'Note 5\n' + 'Note 6\n')
+ self.assertEqual(
+ self.get_report(e),
+ vanilla + 'Note 5\n' + 'Note 6\n')
def test_exception_qualname(self):
class A:
diff --git a/Lib/test/test_type_cache.py b/Lib/test/test_type_cache.py
index 24f83cd3e172c7..72587ecc11b6f3 100644
--- a/Lib/test/test_type_cache.py
+++ b/Lib/test/test_type_cache.py
@@ -58,4 +58,4 @@ class C:
if __name__ == "__main__":
- support.run_unittest(TypeCacheTests)
+ unittest.main()
diff --git a/Lib/test/test_type_params.py b/Lib/test/test_type_params.py
index 0045057f181e1c..25ee188731f31f 100644
--- a/Lib/test/test_type_params.py
+++ b/Lib/test/test_type_params.py
@@ -412,6 +412,99 @@ def test_comprehension_02(self):
func, = T.__bound__
self.assertEqual(func(), 1)
+ def test_gen_exp_in_nested_class(self):
+ code = """
+ from test.test_type_params import make_base
+
+ class C[T]:
+ T = "class"
+ class Inner(make_base(T for _ in (1,)), make_base(T)):
+ pass
+ """
+ C = run_code(code)["C"]
+ T, = C.__type_params__
+ base1, base2 = C.Inner.__bases__
+ self.assertEqual(list(base1.__arg__), [T])
+ self.assertEqual(base2.__arg__, "class")
+
+ def test_gen_exp_in_nested_generic_class(self):
+ code = """
+ from test.test_type_params import make_base
+
+ class C[T]:
+ T = "class"
+ class Inner[U](make_base(T for _ in (1,)), make_base(T)):
+ pass
+ """
+ with self.assertRaisesRegex(SyntaxError,
+ "Cannot use comprehension in annotation scope within class scope"):
+ run_code(code)
+
+ def test_listcomp_in_nested_class(self):
+ code = """
+ from test.test_type_params import make_base
+
+ class C[T]:
+ T = "class"
+ class Inner(make_base([T for _ in (1,)]), make_base(T)):
+ pass
+ """
+ C = run_code(code)["C"]
+ T, = C.__type_params__
+ base1, base2 = C.Inner.__bases__
+ self.assertEqual(base1.__arg__, [T])
+ self.assertEqual(base2.__arg__, "class")
+
+ def test_listcomp_in_nested_generic_class(self):
+ code = """
+ from test.test_type_params import make_base
+
+ class C[T]:
+ T = "class"
+ class Inner[U](make_base([T for _ in (1,)]), make_base(T)):
+ pass
+ """
+ with self.assertRaisesRegex(SyntaxError,
+ "Cannot use comprehension in annotation scope within class scope"):
+ run_code(code)
+
+ def test_gen_exp_in_generic_method(self):
+ code = """
+ class C[T]:
+ T = "class"
+ def meth[U](x: (T for _ in (1,)), y: T):
+ pass
+ """
+ with self.assertRaisesRegex(SyntaxError,
+ "Cannot use comprehension in annotation scope within class scope"):
+ run_code(code)
+
+ def test_nested_scope_in_generic_alias(self):
+ code = """
+ class C[T]:
+ T = "class"
+ {}
+ """
+ error_cases = [
+ "type Alias1[T] = lambda: T",
+ "type Alias2 = lambda: T",
+ "type Alias3[T] = (T for _ in (1,))",
+ "type Alias4 = (T for _ in (1,))",
+ "type Alias5[T] = [T for _ in (1,)]",
+ "type Alias6 = [T for _ in (1,)]",
+ ]
+ for case in error_cases:
+ with self.subTest(case=case):
+ with self.assertRaisesRegex(SyntaxError,
+ r"Cannot use [a-z]+ in annotation scope within class scope"):
+ run_code(code.format(case))
+
+
+def make_base(arg):
+ class Base:
+ __arg__ = arg
+ return Base
+
def global_generic_func[T]():
pass
@@ -601,6 +694,19 @@ class Cls:
cls = ns["outer"]()
self.assertEqual(cls.Alias.__value__, "class")
+ def test_nested_free(self):
+ ns = run_code("""
+ def f():
+ T = str
+ class C:
+ T = int
+ class D[U](T):
+ x = T
+ return C
+ """)
+ C = ns["f"]()
+ self.assertIn(int, C.D.__bases__)
+ self.assertIs(C.D.x, str)
class TypeParamsManglingTest(unittest.TestCase):
def test_mangling(self):
@@ -956,3 +1062,43 @@ class NewStyle[T]:
for case in cases:
with self.subTest(case=case):
weakref.ref(case)
+
+
+class TypeParamsRuntimeTest(unittest.TestCase):
+ def test_name_error(self):
+ # gh-109118: This crashed the interpreter due to a refcounting bug
+ code = """
+ class name_2[name_5]:
+ class name_4[name_5](name_0):
+ pass
+ """
+ with self.assertRaises(NameError):
+ run_code(code)
+
+ # Crashed with a slightly different stack trace
+ code = """
+ class name_2[name_5]:
+ class name_4[name_5: name_5](name_0):
+ pass
+ """
+ with self.assertRaises(NameError):
+ run_code(code)
+
+ def test_broken_class_namespace(self):
+ code = """
+ class WeirdMapping(dict):
+ def __missing__(self, key):
+ if key == "T":
+ raise RuntimeError
+ raise KeyError(key)
+
+ class Meta(type):
+ def __prepare__(name, bases):
+ return WeirdMapping()
+
+ class MyClass[V](metaclass=Meta):
+ class Inner[U](T):
+ pass
+ """
+ with self.assertRaises(RuntimeError):
+ run_code(code)
diff --git a/Lib/test/test_types.py b/Lib/test/test_types.py
index f2efee90dc0240..c6bff79f903828 100644
--- a/Lib/test/test_types.py
+++ b/Lib/test/test_types.py
@@ -1900,6 +1900,33 @@ def test_pickle(self):
self.assertEqual(ns, ns_roundtrip, pname)
+ def test_replace(self):
+ ns = types.SimpleNamespace(x=11, y=22)
+
+ ns2 = copy.replace(ns)
+ self.assertEqual(ns2, ns)
+ self.assertIsNot(ns2, ns)
+ self.assertIs(type(ns2), types.SimpleNamespace)
+ self.assertEqual(vars(ns2), {'x': 11, 'y': 22})
+ ns2.x = 3
+ self.assertEqual(ns.x, 11)
+ ns.x = 4
+ self.assertEqual(ns2.x, 3)
+
+ self.assertEqual(vars(copy.replace(ns, x=1)), {'x': 1, 'y': 22})
+ self.assertEqual(vars(copy.replace(ns, y=2)), {'x': 4, 'y': 2})
+ self.assertEqual(vars(copy.replace(ns, x=1, y=2)), {'x': 1, 'y': 2})
+
+ def test_replace_subclass(self):
+ class Spam(types.SimpleNamespace):
+ pass
+
+ spam = Spam(ham=8, eggs=9)
+ spam2 = copy.replace(spam, ham=5)
+
+ self.assertIs(type(spam2), Spam)
+ self.assertEqual(vars(spam2), {'ham': 5, 'eggs': 9})
+
def test_fake_namespace_compare(self):
# Issue #24257: Incorrect use of PyObject_IsInstance() caused
# SystemError.
diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py
index 69f5ff913c57bb..9e891f113840be 100644
--- a/Lib/test/test_typing.py
+++ b/Lib/test/test_typing.py
@@ -7586,6 +7586,17 @@ def test_total(self):
self.assertEqual(Options.__required_keys__, frozenset())
self.assertEqual(Options.__optional_keys__, {'log_level', 'log_path'})
+ def test_total_inherits_non_total(self):
+ class TD1(TypedDict, total=False):
+ a: int
+
+ self.assertIs(TD1.__total__, False)
+
+ class TD2(TD1):
+ b: str
+
+ self.assertIs(TD2.__total__, True)
+
def test_optional_keys(self):
class Point2Dor3D(Point2D, total=False):
z: int
@@ -9362,6 +9373,10 @@ def test_all(self):
self.assertIn('SupportsComplex', a)
def test_all_exported_names(self):
+ # ensure all dynamically created objects are actualised
+ for name in typing.__all__:
+ getattr(typing, name)
+
actual_all = set(typing.__all__)
computed_all = {
k for k, v in vars(typing).items()
diff --git a/Lib/test/test_unicode_identifiers.py b/Lib/test/test_unicode_identifiers.py
index 5b9ced5d1cb837..63c6c055824b20 100644
--- a/Lib/test/test_unicode_identifiers.py
+++ b/Lib/test/test_unicode_identifiers.py
@@ -19,7 +19,7 @@ def test_non_bmp_normalized(self):
def test_invalid(self):
try:
- from test import badsyntax_3131
+ from test.tokenizedata import badsyntax_3131
except SyntaxError as err:
self.assertEqual(str(err),
"invalid character '€' (U+20AC) (badsyntax_3131.py, line 2)")
diff --git a/Lib/test/test_unicodedata.py b/Lib/test/test_unicodedata.py
index 515c3840cb3647..6adf03316ca0bb 100644
--- a/Lib/test/test_unicodedata.py
+++ b/Lib/test/test_unicodedata.py
@@ -18,7 +18,7 @@
class UnicodeMethodsTest(unittest.TestCase):
# update this, if the database changes
- expectedchecksum = 'e708c31c0d51f758adf475cb7201cf80917362be'
+ expectedchecksum = '63aa77dcb36b0e1df082ee2a6071caeda7f0955e'
@requires_resource('cpu')
def test_method_checksum(self):
@@ -71,7 +71,7 @@ class UnicodeFunctionsTest(UnicodeDatabaseTest):
# Update this if the database changes. Make sure to do a full rebuild
# (e.g. 'make distclean && make') to get the correct checksum.
- expectedchecksum = '26ff0d31c14194b4606a5b3a81ac36df3a14e331'
+ expectedchecksum = '232affd2a50ec4bd69d2482aa0291385cbdefaba'
@requires_resource('cpu')
def test_function_checksum(self):
diff --git a/Lib/test/test_unittest/test_discovery.py b/Lib/test/test_unittest/test_discovery.py
index 946fa1258ea25e..dcb72d73efceab 100644
--- a/Lib/test/test_unittest/test_discovery.py
+++ b/Lib/test/test_unittest/test_discovery.py
@@ -6,7 +6,6 @@
import pickle
from test import support
from test.support import import_helper
-import test.test_importlib.util
import unittest
import unittest.mock
@@ -572,7 +571,7 @@ def _get_module_from_name(name):
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.skipped), 1)
- self.assertEqual(result.testsRun, 1)
+ self.assertEqual(result.testsRun, 0)
self.assertEqual(import_calls, ['my_package'])
# Check picklability
@@ -826,6 +825,8 @@ def restore():
'as dotted module names')
def test_discovery_failed_discovery(self):
+ from test.test_importlib import util
+
loader = unittest.TestLoader()
package = types.ModuleType('package')
@@ -837,7 +838,7 @@ def _import(packagename, *args, **kwargs):
# Since loader.discover() can modify sys.path, restore it when done.
with import_helper.DirsOnSysPath():
# Make sure to remove 'package' from sys.modules when done.
- with test.test_importlib.util.uncache('package'):
+ with util.uncache('package'):
with self.assertRaises(TypeError) as cm:
loader.discover('package')
self.assertEqual(str(cm.exception),
diff --git a/Lib/test/test_unittest/test_loader.py b/Lib/test/test_unittest/test_loader.py
index f32450c9223d8b..83dd25ca54623f 100644
--- a/Lib/test/test_unittest/test_loader.py
+++ b/Lib/test/test_unittest/test_loader.py
@@ -82,6 +82,22 @@ def runTest(self):
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
+ # "Do not load any tests from `TestCase` class itself."
+ def test_loadTestsFromTestCase__from_TestCase(self):
+ loader = unittest.TestLoader()
+
+ suite = loader.loadTestsFromTestCase(unittest.TestCase)
+ self.assertIsInstance(suite, loader.suiteClass)
+ self.assertEqual(list(suite), [])
+
+ # "Do not load any tests from `FunctionTestCase` class."
+ def test_loadTestsFromTestCase__from_FunctionTestCase(self):
+ loader = unittest.TestLoader()
+
+ suite = loader.loadTestsFromTestCase(unittest.FunctionTestCase)
+ self.assertIsInstance(suite, loader.suiteClass)
+ self.assertEqual(list(suite), [])
+
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
@@ -103,6 +119,19 @@ def test(self):
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
+ # "This test ensures that internal `TestCase` subclasses are not loaded"
+ def test_loadTestsFromModule__TestCase_subclass_internals(self):
+ # See https://github.com/python/cpython/issues/84867
+ m = types.ModuleType('m')
+ # Simulate imported names:
+ m.TestCase = unittest.TestCase
+ m.FunctionTestCase = unittest.FunctionTestCase
+
+ loader = unittest.TestLoader()
+ suite = loader.loadTestsFromModule(m)
+ self.assertIsInstance(suite, loader.suiteClass)
+ self.assertEqual(list(suite), [])
+
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
diff --git a/Lib/test/test_unittest/test_skipping.py b/Lib/test/test_unittest/test_skipping.py
index f146dcac18ecc0..1a6af06d32b433 100644
--- a/Lib/test/test_unittest/test_skipping.py
+++ b/Lib/test/test_unittest/test_skipping.py
@@ -103,16 +103,16 @@ def test_dont_skip(self): pass
result = LoggingResult(events)
self.assertIs(suite.run(result), result)
self.assertEqual(len(result.skipped), 1)
- expected = ['startTest', 'addSkip', 'stopTest',
- 'startTest', 'addSuccess', 'stopTest']
+ expected = ['addSkip', 'stopTest', 'startTest',
+ 'addSuccess', 'stopTest']
self.assertEqual(events, expected)
- self.assertEqual(result.testsRun, 2)
+ self.assertEqual(result.testsRun, 1)
self.assertEqual(result.skipped, [(test_do_skip, "testing")])
self.assertTrue(result.wasSuccessful())
events = []
result = test_do_skip.run()
- self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',
+ self.assertEqual(events, ['startTestRun', 'addSkip',
'stopTest', 'stopTestRun'])
self.assertEqual(result.skipped, [(test_do_skip, "testing")])
@@ -135,13 +135,13 @@ def test_1(self):
test = Foo("test_1")
suite = unittest.TestSuite([test])
self.assertIs(suite.run(result), result)
- self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
+ self.assertEqual(events, ['addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
events = []
result = test.run()
- self.assertEqual(events, ['startTestRun', 'startTest', 'addSkip',
+ self.assertEqual(events, ['startTestRun', 'addSkip',
'stopTest', 'stopTestRun'])
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
diff --git a/Lib/test/test_unittest/testmock/testmock.py b/Lib/test/test_unittest/testmock/testmock.py
index bb09913d70b7ca..d23eb87696f406 100644
--- a/Lib/test/test_unittest/testmock/testmock.py
+++ b/Lib/test/test_unittest/testmock/testmock.py
@@ -2270,7 +2270,7 @@ def test_misspelled_arguments(self):
class Foo():
one = 'one'
# patch, patch.object and create_autospec need to check for misspelled
- # arguments explicitly and throw a RuntimError if found.
+ # arguments explicitly and throw a RuntimeError if found.
with self.assertRaises(RuntimeError):
with patch(f'{__name__}.Something.meth', autospect=True): pass
with self.assertRaises(RuntimeError):
diff --git a/Lib/test/test_unittest/testmock/testthreadingmock.py b/Lib/test/test_unittest/testmock/testthreadingmock.py
index 94e71921d9bc03..a02b532ed447cd 100644
--- a/Lib/test/test_unittest/testmock/testthreadingmock.py
+++ b/Lib/test/test_unittest/testmock/testthreadingmock.py
@@ -3,7 +3,7 @@
import concurrent.futures
from test.support import threading_helper
-from unittest.mock import patch, ThreadingMock, call
+from unittest.mock import patch, ThreadingMock
threading_helper.requires_working_threading(module=True)
diff --git a/Lib/test/test_unpack.py b/Lib/test/test_unpack.py
index f5ca1d455b5c6f..515ec128a08a9c 100644
--- a/Lib/test/test_unpack.py
+++ b/Lib/test/test_unpack.py
@@ -162,7 +162,7 @@ def test_extended_oparg_not_ignored(self):
ns = {}
exec(code, ns)
unpack_400 = ns["unpack_400"]
- # Warm up the the function for quickening (PEP 659)
+ # Warm up the function for quickening (PEP 659)
for _ in range(30):
y = unpack_400(range(400))
self.assertEqual(y, 399)
diff --git a/Lib/test/test_unparse.py b/Lib/test/test_unparse.py
index b3efb61e83049e..bdf7b0588bee67 100644
--- a/Lib/test/test_unparse.py
+++ b/Lib/test/test_unparse.py
@@ -197,6 +197,10 @@ def test_fstrings_complicated(self):
self.check_ast_roundtrip('''f"a\\r\\nb"''')
self.check_ast_roundtrip('''f"\\u2028{'x'}"''')
+ def test_fstrings_pep701(self):
+ self.check_ast_roundtrip('f" something { my_dict["key"] } something else "')
+ self.check_ast_roundtrip('f"{f"{f"{f"{f"{f"{1+1}"}"}"}"}"}"')
+
def test_strings(self):
self.check_ast_roundtrip("u'foo'")
self.check_ast_roundtrip("r'foo'")
@@ -378,8 +382,15 @@ def test_invalid_fstring_value(self):
)
)
- def test_invalid_fstring_backslash(self):
- self.check_invalid(ast.FormattedValue(value=ast.Constant(value="\\\\")))
+ def test_fstring_backslash(self):
+ # valid since Python 3.12
+ self.assertEqual(ast.unparse(
+ ast.FormattedValue(
+ value=ast.Constant(value="\\\\"),
+ conversion=-1,
+ format_spec=None,
+ )
+ ), "{'\\\\\\\\'}")
def test_invalid_yield_from(self):
self.check_invalid(ast.YieldFrom(value=None))
@@ -502,11 +513,11 @@ def test_class_bases_and_keywords(self):
self.check_src_roundtrip("class X(*args, **kwargs):\n pass")
def test_fstrings(self):
- self.check_src_roundtrip('''f\'\'\'-{f"""*{f"+{f'.{x}.'}+"}*"""}-\'\'\'''')
- self.check_src_roundtrip('''f"\\u2028{'x'}"''')
+ self.check_src_roundtrip("f'-{f'*{f'+{f'.{x}.'}+'}*'}-'")
+ self.check_src_roundtrip("f'\\u2028{'x'}'")
self.check_src_roundtrip(r"f'{x}\n'")
- self.check_src_roundtrip('''f''\'{"""\n"""}\\n''\'''')
- self.check_src_roundtrip('''f''\'{f"""{x}\n"""}\\n''\'''')
+ self.check_src_roundtrip("f'{'\\n'}\\n'")
+ self.check_src_roundtrip("f'{f'{x}\\n'}\\n'")
def test_docstrings(self):
docstrings = (
@@ -624,6 +635,20 @@ def test_star_expr_assign_target_multiple(self):
self.check_src_roundtrip("[a, b] = [c, d] = [e, f] = g")
self.check_src_roundtrip("a, b = [c, d] = e, f = g")
+ def test_multiquote_joined_string(self):
+ self.check_ast_roundtrip("f\"'''{1}\\\"\\\"\\\"\" ")
+ self.check_ast_roundtrip("""f"'''{1}""\\"" """)
+ self.check_ast_roundtrip("""f'""\"{1}''' """)
+ self.check_ast_roundtrip("""f'""\"{1}""\\"' """)
+
+ self.check_ast_roundtrip("""f"'''{"\\n"}""\\"" """)
+ self.check_ast_roundtrip("""f'""\"{"\\n"}''' """)
+ self.check_ast_roundtrip("""f'""\"{"\\n"}""\\"' """)
+
+ self.check_ast_roundtrip("""f'''""\"''\\'{"\\n"}''' """)
+ self.check_ast_roundtrip("""f'''""\"''\\'{"\\n\\"'"}''' """)
+ self.check_ast_roundtrip("""f'''""\"''\\'{""\"\\n\\"'''""\" '''\\n'''}''' """)
+
class ManualASTCreationTestCase(unittest.TestCase):
"""Test that AST nodes created without a type_params field unparse correctly."""
diff --git a/Lib/test/test_urllib2_localnet.py b/Lib/test/test_urllib2_localnet.py
index 0dcdbac76b50f2..50c491a3cfd3d0 100644
--- a/Lib/test/test_urllib2_localnet.py
+++ b/Lib/test/test_urllib2_localnet.py
@@ -21,9 +21,9 @@
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
-CERT_localhost = os.path.join(here, 'keycert.pem')
+CERT_localhost = os.path.join(here, 'certdata', 'keycert.pem')
# Self-signed cert file for 'fakehostname'
-CERT_fakehostname = os.path.join(here, 'keycert2.pem')
+CERT_fakehostname = os.path.join(here, 'certdata', 'keycert2.pem')
# Loopback http server infrastructure
diff --git a/Lib/test/test_urllib2net.py b/Lib/test/test_urllib2net.py
index d8d882b2d33589..f0874d8d3ce463 100644
--- a/Lib/test/test_urllib2net.py
+++ b/Lib/test/test_urllib2net.py
@@ -133,6 +133,7 @@ def setUp(self):
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
+ @support.requires_resource('walltime')
def test_ftp(self):
# Testing the same URL twice exercises the caching in CacheFTPHandler
urls = [
@@ -196,6 +197,7 @@ def test_urlwithfrag(self):
self.assertEqual(res.geturl(),
"http://www.pythontest.net/index.html#frag")
+ @support.requires_resource('walltime')
def test_redirect_url_withfrag(self):
redirect_url_with_frag = "http://www.pythontest.net/redir/with_frag/"
with socket_helper.transient_internet(redirect_url_with_frag):
@@ -334,6 +336,7 @@ def test_http_timeout(self):
FTP_HOST = 'ftp://www.pythontest.net/'
+ @support.requires_resource('walltime')
def test_ftp_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
with socket_helper.transient_internet(self.FTP_HOST, timeout=None):
@@ -352,6 +355,7 @@ def test_ftp_default_timeout(self):
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
+ @support.requires_resource('walltime')
def test_ftp_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with socket_helper.transient_internet(self.FTP_HOST):
@@ -363,6 +367,7 @@ def test_ftp_no_timeout(self):
socket.setdefaulttimeout(None)
self.assertIsNone(u.fp.fp.raw._sock.gettimeout())
+ @support.requires_resource('walltime')
def test_ftp_timeout(self):
with socket_helper.transient_internet(self.FTP_HOST):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
diff --git a/Lib/test/test_urllibnet.py b/Lib/test/test_urllibnet.py
index 773101ce41f602..49a3b5afdebb2f 100644
--- a/Lib/test/test_urllibnet.py
+++ b/Lib/test/test_urllibnet.py
@@ -109,6 +109,7 @@ def test_getcode(self):
open_url.close()
self.assertEqual(code, 404)
+ @support.requires_resource('walltime')
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
@@ -191,6 +192,7 @@ def test_header(self):
logo = "http://www.pythontest.net/"
+ @support.requires_resource('walltime')
def test_data_header(self):
with self.urlretrieve(self.logo) as (file_location, fileheaders):
datevalue = fileheaders.get('Date')
diff --git a/Lib/test/test_utf8source.py b/Lib/test/test_utf8source.py
index 97dced8a622889..c42b6aaaab579d 100644
--- a/Lib/test/test_utf8source.py
+++ b/Lib/test/test_utf8source.py
@@ -1,5 +1,3 @@
-# This file is marked as binary in the CVS, to prevent MacCVS from recoding it.
-
import unittest
class PEP3120Test(unittest.TestCase):
@@ -16,7 +14,7 @@ def test_pep3120(self):
def test_badsyntax(self):
try:
- import test.badsyntax_pep3120
+ import test.tokenizedata.badsyntax_pep3120
except SyntaxError as msg:
msg = str(msg).lower()
self.assertTrue('utf-8' in msg)
diff --git a/Lib/test/test_venv.py b/Lib/test/test_venv.py
index aa6a8fbf8cfd17..a894bb10bd04da 100644
--- a/Lib/test/test_venv.py
+++ b/Lib/test/test_venv.py
@@ -82,6 +82,13 @@ def setUp(self):
def tearDown(self):
rmtree(self.env_dir)
+ def envpy(self, *, real_env_dir=False):
+ if real_env_dir:
+ env_dir = os.path.realpath(self.env_dir)
+ else:
+ env_dir = self.env_dir
+ return os.path.join(env_dir, self.bindir, self.exe)
+
def run_with_capture(self, func, *args, **kwargs):
with captured_stdout() as output:
with captured_stderr() as error:
@@ -138,7 +145,8 @@ def _check_output_of_default_create(self):
self.assertIn('executable = %s' %
os.path.realpath(sys.executable), data)
copies = '' if os.name=='nt' else ' --copies'
- cmd = f'command = {sys.executable} -m venv{copies} --without-pip {self.env_dir}'
+ cmd = (f'command = {sys.executable} -m venv{copies} --without-pip '
+ f'--without-scm-ignore-files {self.env_dir}')
self.assertIn(cmd, data)
fn = self.get_env_file(self.bindir, self.exe)
if not os.path.exists(fn): # diagnostics for Windows buildbot failures
@@ -148,35 +156,37 @@ def _check_output_of_default_create(self):
self.assertTrue(os.path.exists(fn), 'File %r should exist.' % fn)
def test_config_file_command_key(self):
- attrs = [
- (None, None),
- ('symlinks', '--copies'),
- ('with_pip', '--without-pip'),
- ('system_site_packages', '--system-site-packages'),
- ('clear', '--clear'),
- ('upgrade', '--upgrade'),
- ('upgrade_deps', '--upgrade-deps'),
- ('prompt', '--prompt'),
+ options = [
+ (None, None, None), # Default case.
+ ('--copies', 'symlinks', False),
+ ('--without-pip', 'with_pip', False),
+ ('--system-site-packages', 'system_site_packages', True),
+ ('--clear', 'clear', True),
+ ('--upgrade', 'upgrade', True),
+ ('--upgrade-deps', 'upgrade_deps', True),
+ ('--prompt', 'prompt', True),
+ ('--without-scm-ignore-files', 'scm_ignore_files', frozenset()),
]
- for attr, opt in attrs:
- rmtree(self.env_dir)
- if not attr:
- b = venv.EnvBuilder()
- else:
- b = venv.EnvBuilder(
- **{attr: False if attr in ('with_pip', 'symlinks') else True})
- b.upgrade_dependencies = Mock() # avoid pip command to upgrade deps
- b._setup_pip = Mock() # avoid pip setup
- self.run_with_capture(b.create, self.env_dir)
- data = self.get_text_file_contents('pyvenv.cfg')
- if not attr:
- for opt in ('--system-site-packages', '--clear', '--upgrade',
- '--upgrade-deps', '--prompt'):
- self.assertNotRegex(data, rf'command = .* {opt}')
- elif os.name=='nt' and attr=='symlinks':
- pass
- else:
- self.assertRegex(data, rf'command = .* {opt}')
+ for opt, attr, value in options:
+ with self.subTest(opt=opt, attr=attr, value=value):
+ rmtree(self.env_dir)
+ if not attr:
+ kwargs = {}
+ else:
+ kwargs = {attr: value}
+ b = venv.EnvBuilder(**kwargs)
+ b.upgrade_dependencies = Mock() # avoid pip command to upgrade deps
+ b._setup_pip = Mock() # avoid pip setup
+ self.run_with_capture(b.create, self.env_dir)
+ data = self.get_text_file_contents('pyvenv.cfg')
+ if not attr or opt.endswith('git'):
+ for opt in ('--system-site-packages', '--clear', '--upgrade',
+ '--upgrade-deps', '--prompt'):
+ self.assertNotRegex(data, rf'command = .* {opt}')
+ elif os.name=='nt' and attr=='symlinks':
+ pass
+ else:
+ self.assertRegex(data, rf'command = .* {opt}')
def test_prompt(self):
env_name = os.path.split(self.env_dir)[1]
@@ -243,8 +253,7 @@ def test_prefixes(self):
# check a venv's prefixes
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
- envpy = os.path.join(self.env_dir, self.bindir, self.exe)
- cmd = [envpy, '-c', None]
+ cmd = [self.envpy(), '-c', None]
for prefix, expected in (
('prefix', self.env_dir),
('exec_prefix', self.env_dir),
@@ -261,8 +270,7 @@ def test_sysconfig(self):
"""
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir, symlinks=False)
- envpy = os.path.join(self.env_dir, self.bindir, self.exe)
- cmd = [envpy, '-c', None]
+ cmd = [self.envpy(), '-c', None]
for call, expected in (
# installation scheme
('get_preferred_scheme("prefix")', 'venv'),
@@ -284,8 +292,7 @@ def test_sysconfig_symlinks(self):
"""
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir, symlinks=True)
- envpy = os.path.join(self.env_dir, self.bindir, self.exe)
- cmd = [envpy, '-c', None]
+ cmd = [self.envpy(), '-c', None]
for call, expected in (
# installation scheme
('get_preferred_scheme("prefix")', 'venv'),
@@ -424,8 +431,7 @@ def test_executable(self):
"""
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
- envpy = os.path.join(os.path.realpath(self.env_dir),
- self.bindir, self.exe)
+ envpy = self.envpy(real_env_dir=True)
out, err = check_output([envpy, '-c',
'import sys; print(sys.executable)'])
self.assertEqual(out.strip(), envpy.encode())
@@ -438,8 +444,7 @@ def test_executable_symlinks(self):
rmtree(self.env_dir)
builder = venv.EnvBuilder(clear=True, symlinks=True)
builder.create(self.env_dir)
- envpy = os.path.join(os.path.realpath(self.env_dir),
- self.bindir, self.exe)
+ envpy = self.envpy(real_env_dir=True)
out, err = check_output([envpy, '-c',
'import sys; print(sys.executable)'])
self.assertEqual(out.strip(), envpy.encode())
@@ -454,7 +459,6 @@ def test_unicode_in_batch_file(self):
builder = venv.EnvBuilder(clear=True)
builder.create(env_dir)
activate = os.path.join(env_dir, self.bindir, 'activate.bat')
- envpy = os.path.join(env_dir, self.bindir, self.exe)
out, err = check_output(
[activate, '&', self.exe, '-c', 'print(0)'],
encoding='oem',
@@ -473,9 +477,7 @@ def test_multiprocessing(self):
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
- envpy = os.path.join(os.path.realpath(self.env_dir),
- self.bindir, self.exe)
- out, err = check_output([envpy, '-c',
+ out, err = check_output([self.envpy(real_env_dir=True), '-c',
'from multiprocessing import Pool; '
'pool = Pool(1); '
'print(pool.apply_async("Python".lower).get(3)); '
@@ -491,10 +493,8 @@ def test_multiprocessing_recursion(self):
rmtree(self.env_dir)
self.run_with_capture(venv.create, self.env_dir)
- envpy = os.path.join(os.path.realpath(self.env_dir),
- self.bindir, self.exe)
script = os.path.join(TEST_HOME_DIR, '_test_venv_multiprocessing.py')
- subprocess.check_call([envpy, script])
+ subprocess.check_call([self.envpy(real_env_dir=True), script])
@unittest.skipIf(os.name == 'nt', 'not relevant on Windows')
def test_deactivate_with_strict_bash_opts(self):
@@ -521,9 +521,7 @@ def test_macos_env(self):
builder = venv.EnvBuilder()
builder.create(self.env_dir)
- envpy = os.path.join(os.path.realpath(self.env_dir),
- self.bindir, self.exe)
- out, err = check_output([envpy, '-c',
+ out, err = check_output([self.envpy(real_env_dir=True), '-c',
'import os; print("__PYVENV_LAUNCHER__" in os.environ)'])
self.assertEqual(out.strip(), 'False'.encode())
@@ -585,6 +583,7 @@ def test_zippath_from_non_installed_posix(self):
"-m",
"venv",
"--without-pip",
+ "--without-scm-ignore-files",
self.env_dir]
# Our fake non-installed python is not fully functional because
# it cannot find the extensions. Set PYTHONPATH so it can run the
@@ -609,13 +608,13 @@ def test_zippath_from_non_installed_posix(self):
# prevent https://github.com/python/cpython/issues/104839
child_env["ASAN_OPTIONS"] = asan_options
subprocess.check_call(cmd, env=child_env)
- envpy = os.path.join(self.env_dir, self.bindir, self.exe)
# Now check the venv created from the non-installed python has
# correct zip path in pythonpath.
- cmd = [envpy, '-S', '-c', 'import sys; print(sys.path)']
+ cmd = [self.envpy(), '-S', '-c', 'import sys; print(sys.path)']
out, err = check_output(cmd)
self.assertTrue(zip_landmark.encode() in out)
+ @requireVenvCreate
def test_activate_shell_script_has_no_dos_newlines(self):
"""
Test that the `activate` shell script contains no CR LF.
@@ -632,13 +631,80 @@ def test_activate_shell_script_has_no_dos_newlines(self):
error_message = f"CR LF found in line {i}"
self.assertFalse(line.endswith(b'\r\n'), error_message)
+ @requireVenvCreate
+ def test_scm_ignore_files_git(self):
+ """
+ Test that a .gitignore file is created when "git" is specified.
+ The file should contain a `*\n` line.
+ """
+ self.run_with_capture(venv.create, self.env_dir,
+ scm_ignore_files={'git'})
+ file_lines = self.get_text_file_contents('.gitignore').splitlines()
+ self.assertIn('*', file_lines)
+
+ @requireVenvCreate
+ def test_create_scm_ignore_files_multiple(self):
+ """
+ Test that ``scm_ignore_files`` can work with multiple SCMs.
+ """
+ bzrignore_name = ".bzrignore"
+ contents = "# For Bazaar.\n*\n"
+
+ class BzrEnvBuilder(venv.EnvBuilder):
+ def create_bzr_ignore_file(self, context):
+ gitignore_path = os.path.join(context.env_dir, bzrignore_name)
+ with open(gitignore_path, 'w', encoding='utf-8') as file:
+ file.write(contents)
+
+ builder = BzrEnvBuilder(scm_ignore_files={'git', 'bzr'})
+ self.run_with_capture(builder.create, self.env_dir)
+
+ gitignore_lines = self.get_text_file_contents('.gitignore').splitlines()
+ self.assertIn('*', gitignore_lines)
+
+ bzrignore = self.get_text_file_contents(bzrignore_name)
+ self.assertEqual(bzrignore, contents)
+
+ @requireVenvCreate
+ def test_create_scm_ignore_files_empty(self):
+ """
+ Test that no default ignore files are created when ``scm_ignore_files``
+ is empty.
+ """
+ # scm_ignore_files is set to frozenset() by default.
+ self.run_with_capture(venv.create, self.env_dir)
+ with self.assertRaises(FileNotFoundError):
+ self.get_text_file_contents('.gitignore')
+
+ self.assertIn("--without-scm-ignore-files",
+ self.get_text_file_contents('pyvenv.cfg'))
+
+ @requireVenvCreate
+ def test_cli_with_scm_ignore_files(self):
+ """
+ Test that default SCM ignore files are created by default via the CLI.
+ """
+ self.run_with_capture(venv.main, ['--without-pip', self.env_dir])
+
+ gitignore_lines = self.get_text_file_contents('.gitignore').splitlines()
+ self.assertIn('*', gitignore_lines)
+
+ @requireVenvCreate
+ def test_cli_without_scm_ignore_files(self):
+ """
+ Test that ``--without-scm-ignore-files`` doesn't create SCM ignore files.
+ """
+ args = ['--without-pip', '--without-scm-ignore-files', self.env_dir]
+ self.run_with_capture(venv.main, args)
+
+ with self.assertRaises(FileNotFoundError):
+ self.get_text_file_contents('.gitignore')
+
@requireVenvCreate
class EnsurePipTest(BaseTest):
"""Test venv module installation of pip."""
def assert_pip_not_installed(self):
- envpy = os.path.join(os.path.realpath(self.env_dir),
- self.bindir, self.exe)
- out, err = check_output([envpy, '-c',
+ out, err = check_output([self.envpy(real_env_dir=True), '-c',
'try:\n import pip\nexcept ImportError:\n print("OK")'])
# We force everything to text, so unittest gives the detailed diff
# if we get unexpected results
@@ -705,9 +771,9 @@ def do_test_with_pip(self, system_site_packages):
system_site_packages=system_site_packages,
with_pip=True)
# Ensure pip is available in the virtual environment
- envpy = os.path.join(os.path.realpath(self.env_dir), self.bindir, self.exe)
# Ignore DeprecationWarning since pip code is not part of Python
- out, err = check_output([envpy, '-W', 'ignore::DeprecationWarning',
+ out, err = check_output([self.envpy(real_env_dir=True),
+ '-W', 'ignore::DeprecationWarning',
'-W', 'ignore::ImportWarning', '-I',
'-m', 'pip', '--version'])
# We force everything to text, so unittest gives the detailed diff
@@ -728,7 +794,7 @@ def do_test_with_pip(self, system_site_packages):
# It seems ensurepip._uninstall calls subprocesses which do not
# inherit the interpreter settings.
envvars["PYTHONWARNINGS"] = "ignore"
- out, err = check_output([envpy,
+ out, err = check_output([self.envpy(real_env_dir=True),
'-W', 'ignore::DeprecationWarning',
'-W', 'ignore::ImportWarning', '-I',
'-m', 'ensurepip._uninstall'])
diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py
index b9352cb865d027..6d413aa68a338d 100644
--- a/Lib/test/test_xml_etree.py
+++ b/Lib/test/test_xml_etree.py
@@ -365,6 +365,7 @@ def test_path_cache(self):
from xml.etree import ElementPath
elem = ET.XML(SAMPLE_XML)
+ ElementPath._cache.clear()
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
cache_len_10 = len(ElementPath._cache)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
@@ -3926,8 +3927,9 @@ def test_issue14818(self):
# --------------------------------------------------------------------
class NoAcceleratorTest(unittest.TestCase):
- def setUp(self):
- if not pyET:
+ @classmethod
+ def setUpClass(cls):
+ if ET is not pyET:
raise unittest.SkipTest('only for the Python version')
# Test that the C accelerator was not imported for pyET
@@ -4192,8 +4194,7 @@ def get_option(config, option_name, default=None):
# --------------------------------------------------------------------
-
-def test_main(module=None):
+def setUpModule(module=None):
# When invoked without a module, runs the Python ET tests by loading pyET.
# Otherwise, uses the given module as the ET.
global pyET
@@ -4205,63 +4206,30 @@ def test_main(module=None):
global ET
ET = module
- test_classes = [
- ModuleTest,
- ElementSlicingTest,
- BasicElementTest,
- BadElementTest,
- BadElementPathTest,
- ElementTreeTest,
- IOTest,
- ParseErrorTest,
- XIncludeTest,
- ElementTreeTypeTest,
- ElementFindTest,
- ElementIterTest,
- TreeBuilderTest,
- XMLParserTest,
- XMLPullParserTest,
- BugsTest,
- KeywordArgsTest,
- BoolTest,
- C14NTest,
- ]
-
- # These tests will only run for the pure-Python version that doesn't import
- # _elementtree. We can't use skipUnless here, because pyET is filled in only
- # after the module is loaded.
- if pyET is not ET:
- test_classes.extend([
- NoAcceleratorTest,
- ])
+ # don't interfere with subsequent tests
+ def cleanup():
+ global ET, pyET
+ ET = pyET = None
+ unittest.addModuleCleanup(cleanup)
# Provide default namespace mapping and path cache.
from xml.etree import ElementPath
nsmap = ET.register_namespace._namespace_map
# Copy the default namespace mapping
nsmap_copy = nsmap.copy()
+ unittest.addModuleCleanup(nsmap.update, nsmap_copy)
+ unittest.addModuleCleanup(nsmap.clear)
+
# Copy the path cache (should be empty)
path_cache = ElementPath._cache
+ unittest.addModuleCleanup(setattr, ElementPath, "_cache", path_cache)
ElementPath._cache = path_cache.copy()
+
# Align the Comment/PI factories.
if hasattr(ET, '_set_factories'):
old_factories = ET._set_factories(ET.Comment, ET.PI)
- else:
- old_factories = None
-
- try:
- support.run_unittest(*test_classes)
- finally:
- from xml.etree import ElementPath
- # Restore mapping and path cache
- nsmap.clear()
- nsmap.update(nsmap_copy)
- ElementPath._cache = path_cache
- if old_factories is not None:
- ET._set_factories(*old_factories)
- # don't interfere with subsequent tests
- ET = pyET = None
+ unittest.addModuleCleanup(ET._set_factories, *old_factories)
if __name__ == '__main__':
- test_main()
+ unittest.main()
diff --git a/Lib/test/test_xml_etree_c.py b/Lib/test/test_xml_etree_c.py
index fd27b575ec8dc9..3a0fc572f457ff 100644
--- a/Lib/test/test_xml_etree_c.py
+++ b/Lib/test/test_xml_etree_c.py
@@ -254,20 +254,25 @@ def test_element_with_children(self):
self.check_sizeof(e, self.elementsize + self.extra +
struct.calcsize('8P'))
-def test_main():
- from test import test_xml_etree
-
- # Run the tests specific to the C implementation
- support.run_unittest(
- MiscTests,
- TestAliasWorking,
- TestAcceleratorImported,
- SizeofTest,
- )
- # Run the same test suite as the Python module
- test_xml_etree.test_main(module=cET)
+def install_tests():
+ # Test classes should have __module__ referring to this module.
+ from test import test_xml_etree
+ for name, base in vars(test_xml_etree).items():
+ if isinstance(base, type) and issubclass(base, unittest.TestCase):
+ class Temp(base):
+ pass
+ Temp.__name__ = Temp.__qualname__ = name
+ Temp.__module__ = __name__
+ assert name not in globals()
+ globals()[name] = Temp
+
+install_tests()
+
+def setUpModule():
+ from test import test_xml_etree
+ test_xml_etree.setUpModule(module=cET)
if __name__ == '__main__':
- test_main()
+ unittest.main()
diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
index edc741dbc60088..6c4b8384a3202e 100644
--- a/Lib/test/test_xmlrpc.py
+++ b/Lib/test/test_xmlrpc.py
@@ -1037,38 +1037,47 @@ def test_path2(self):
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
+ @support.requires_resource('walltime')
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
+ @support.requires_resource('walltime')
def test_invalid_path(self):
p = xmlrpclib.ServerProxy(URL+"/invalid")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
+ @support.requires_resource('walltime')
def test_path_query_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v#frag")
self.assertEqual(p.test(), "/foo?k=v#frag")
+ @support.requires_resource('walltime')
def test_path_fragment(self):
p = xmlrpclib.ServerProxy(URL+"/foo#frag")
self.assertEqual(p.test(), "/foo#frag")
+ @support.requires_resource('walltime')
def test_path_query(self):
p = xmlrpclib.ServerProxy(URL+"/foo?k=v")
self.assertEqual(p.test(), "/foo?k=v")
+ @support.requires_resource('walltime')
def test_empty_path(self):
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.test(), "/RPC2")
+ @support.requires_resource('walltime')
def test_root_path(self):
p = xmlrpclib.ServerProxy(URL + "/")
self.assertEqual(p.test(), "/")
+ @support.requires_resource('walltime')
def test_empty_path_query(self):
p = xmlrpclib.ServerProxy(URL + "?k=v")
self.assertEqual(p.test(), "?k=v")
+ @support.requires_resource('walltime')
def test_empty_path_fragment(self):
p = xmlrpclib.ServerProxy(URL + "#frag")
self.assertEqual(p.test(), "#frag")
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index 55306c63cd4e16..9a099adc74f4b4 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -3,7 +3,6 @@
from test.support import import_helper
import binascii
import copy
-import os
import pickle
import random
import sys
diff --git a/Lib/test/tokenizedata/__init__.py b/Lib/test/tokenizedata/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/Lib/test/bad_coding.py b/Lib/test/tokenizedata/bad_coding.py
similarity index 100%
rename from Lib/test/bad_coding.py
rename to Lib/test/tokenizedata/bad_coding.py
diff --git a/Lib/test/bad_coding2.py b/Lib/test/tokenizedata/bad_coding2.py
similarity index 100%
rename from Lib/test/bad_coding2.py
rename to Lib/test/tokenizedata/bad_coding2.py
diff --git a/Lib/test/badsyntax_3131.py b/Lib/test/tokenizedata/badsyntax_3131.py
similarity index 100%
rename from Lib/test/badsyntax_3131.py
rename to Lib/test/tokenizedata/badsyntax_3131.py
diff --git a/Lib/test/badsyntax_pep3120.py b/Lib/test/tokenizedata/badsyntax_pep3120.py
similarity index 100%
rename from Lib/test/badsyntax_pep3120.py
rename to Lib/test/tokenizedata/badsyntax_pep3120.py
diff --git a/Lib/test/coding20731.py b/Lib/test/tokenizedata/coding20731.py
similarity index 100%
rename from Lib/test/coding20731.py
rename to Lib/test/tokenizedata/coding20731.py
diff --git a/Lib/test/tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt b/Lib/test/tokenizedata/tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt
similarity index 100%
rename from Lib/test/tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt
rename to Lib/test/tokenizedata/tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt
diff --git a/Lib/test/tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt b/Lib/test/tokenizedata/tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt
similarity index 100%
rename from Lib/test/tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt
rename to Lib/test/tokenizedata/tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt
diff --git a/Lib/test/tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt b/Lib/test/tokenizedata/tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt
similarity index 100%
rename from Lib/test/tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt
rename to Lib/test/tokenizedata/tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt
diff --git a/Lib/test/tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt b/Lib/test/tokenizedata/tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt
similarity index 100%
rename from Lib/test/tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt
rename to Lib/test/tokenizedata/tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt
diff --git a/Lib/test/tokenize_tests.txt b/Lib/test/tokenizedata/tokenize_tests.txt
similarity index 100%
rename from Lib/test/tokenize_tests.txt
rename to Lib/test/tokenizedata/tokenize_tests.txt
diff --git a/Lib/traceback.py b/Lib/traceback.py
index 354754b9560a19..67941ff45988c2 100644
--- a/Lib/traceback.py
+++ b/Lib/traceback.py
@@ -608,11 +608,21 @@ def _extract_caret_anchors_from_line_segment(segment):
and not operator_str[operator_offset + 1].isspace()
):
right_anchor += 1
+
+ while left_anchor < len(segment) and ((ch := segment[left_anchor]).isspace() or ch in ")#"):
+ left_anchor += 1
+ right_anchor += 1
return _Anchors(normalize(left_anchor), normalize(right_anchor))
case ast.Subscript():
- subscript_start = normalize(expr.value.end_col_offset)
- subscript_end = normalize(expr.slice.end_col_offset + 1)
- return _Anchors(subscript_start, subscript_end)
+ left_anchor = normalize(expr.value.end_col_offset)
+ right_anchor = normalize(expr.slice.end_col_offset + 1)
+ while left_anchor < len(segment) and ((ch := segment[left_anchor]).isspace() or ch != "["):
+ left_anchor += 1
+ while right_anchor < len(segment) and ((ch := segment[right_anchor]).isspace() or ch != "]"):
+ right_anchor += 1
+ if right_anchor < len(segment):
+ right_anchor += 1
+ return _Anchors(left_anchor, right_anchor)
return None
diff --git a/Lib/typing.py b/Lib/typing.py
index 8655b756a9fd13..639be75747dae0 100644
--- a/Lib/typing.py
+++ b/Lib/typing.py
@@ -23,10 +23,8 @@
from collections import defaultdict
import collections.abc
import copyreg
-import contextlib
import functools
import operator
-import re as stdlib_re # Avoid confusion with the typing.re namespace on <=3.11
import sys
import types
from types import WrapperDescriptorType, MethodWrapperType, MethodDescriptorType, GenericAlias
@@ -2580,8 +2578,6 @@ class Other(Leaf): # Error reported by type checker
KeysView = _alias(collections.abc.KeysView, 1)
ItemsView = _alias(collections.abc.ItemsView, 2)
ValuesView = _alias(collections.abc.ValuesView, 1)
-ContextManager = _alias(contextlib.AbstractContextManager, 1, name='ContextManager')
-AsyncContextManager = _alias(contextlib.AbstractAsyncContextManager, 1, name='AsyncContextManager')
Dict = _alias(dict, 2, inst=False, name='Dict')
DefaultDict = _alias(collections.defaultdict, 2, name='DefaultDict')
OrderedDict = _alias(collections.OrderedDict, 2)
@@ -2886,8 +2882,7 @@ def __new__(cls, name, bases, ns, total=True):
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
- if not hasattr(tp_dict, '__total__'):
- tp_dict.__total__ = total
+ tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
@@ -3239,10 +3234,6 @@ def __enter__(self) -> 'TextIO':
pass
-Pattern = _alias(stdlib_re.Pattern, 1)
-Match = _alias(stdlib_re.Match, 1)
-
-
def reveal_type[T](obj: T, /) -> T:
"""Reveal the inferred type of a variable.
@@ -3427,3 +3418,21 @@ def get_protocol_members(tp: type, /) -> frozenset[str]:
if not is_protocol(tp):
raise TypeError(f'{tp!r} is not a Protocol')
return frozenset(tp.__protocol_attrs__)
+
+
+def __getattr__(attr):
+ """Improve the import time of the typing module.
+
+ Soft-deprecated objects which are costly to create
+ are only created on-demand here.
+ """
+ if attr in {"Pattern", "Match"}:
+ import re
+ obj = _alias(getattr(re, attr), 1)
+ elif attr in {"ContextManager", "AsyncContextManager"}:
+ import contextlib
+ obj = _alias(getattr(contextlib, f"Abstract{attr}"), 1, name=attr)
+ else:
+ raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
+ globals()[attr] = obj
+ return obj
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py
index 001b640dc43ad6..811557498bb30e 100644
--- a/Lib/unittest/case.py
+++ b/Lib/unittest/case.py
@@ -606,7 +606,6 @@ def run(self, result=None):
else:
stopTestRun = None
- result.startTest(self)
try:
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
@@ -617,6 +616,9 @@ def run(self, result=None):
_addSkip(result, self, skip_why)
return result
+ # Increase the number of tests only if it hasn't been skipped
+ result.startTest(self)
+
expecting_failure = (
getattr(self, "__unittest_expecting_failure__", False) or
getattr(testMethod, "__unittest_expecting_failure__", False)
diff --git a/Lib/unittest/loader.py b/Lib/unittest/loader.py
index 678d627d7c6926..9a3e5cc4bf30e5 100644
--- a/Lib/unittest/loader.py
+++ b/Lib/unittest/loader.py
@@ -84,9 +84,13 @@ def loadTestsFromTestCase(self, testCaseClass):
raise TypeError("Test cases should not be derived from "
"TestSuite. Maybe you meant to derive from "
"TestCase?")
- testCaseNames = self.getTestCaseNames(testCaseClass)
- if not testCaseNames and hasattr(testCaseClass, 'runTest'):
- testCaseNames = ['runTest']
+ if testCaseClass in (case.TestCase, case.FunctionTestCase):
+ # We don't load any tests from base types that should not be loaded.
+ testCaseNames = []
+ else:
+ testCaseNames = self.getTestCaseNames(testCaseClass)
+ if not testCaseNames and hasattr(testCaseClass, 'runTest'):
+ testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
@@ -95,7 +99,11 @@ def loadTestsFromModule(self, module, *, pattern=None):
tests = []
for name in dir(module):
obj = getattr(module, name)
- if isinstance(obj, type) and issubclass(obj, case.TestCase):
+ if (
+ isinstance(obj, type)
+ and issubclass(obj, case.TestCase)
+ and obj not in (case.TestCase, case.FunctionTestCase)
+ ):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
@@ -164,7 +172,11 @@ def loadTestsFromName(self, name, module=None):
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
- elif isinstance(obj, type) and issubclass(obj, case.TestCase):
+ elif (
+ isinstance(obj, type)
+ and issubclass(obj, case.TestCase)
+ and obj not in (case.TestCase, case.FunctionTestCase)
+ ):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.FunctionType) and
isinstance(parent, type) and
diff --git a/Lib/unittest/result.py b/Lib/unittest/result.py
index 3ace0a5b7bf2ef..9e56f658027f4d 100644
--- a/Lib/unittest/result.py
+++ b/Lib/unittest/result.py
@@ -97,10 +97,12 @@ def _restoreStdout(self):
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
- self._stdout_buffer.seek(0)
- self._stdout_buffer.truncate()
- self._stderr_buffer.seek(0)
- self._stderr_buffer.truncate()
+ if self._stdout_buffer is not None:
+ self._stdout_buffer.seek(0)
+ self._stdout_buffer.truncate()
+ if self._stderr_buffer is not None:
+ self._stderr_buffer.seek(0)
+ self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
diff --git a/Lib/venv/__init__.py b/Lib/venv/__init__.py
index 2173c9b13e5cf7..d960bf3bd82ac5 100644
--- a/Lib/venv/__init__.py
+++ b/Lib/venv/__init__.py
@@ -41,11 +41,13 @@ class EnvBuilder:
environment
:param prompt: Alternative terminal prefix for the environment.
:param upgrade_deps: Update the base venv modules to the latest on PyPI
+ :param scm_ignore_files: Create ignore files for the SCMs specified by the
+ iterable.
"""
def __init__(self, system_site_packages=False, clear=False,
symlinks=False, upgrade=False, with_pip=False, prompt=None,
- upgrade_deps=False):
+ upgrade_deps=False, *, scm_ignore_files=frozenset()):
self.system_site_packages = system_site_packages
self.clear = clear
self.symlinks = symlinks
@@ -56,6 +58,7 @@ def __init__(self, system_site_packages=False, clear=False,
prompt = os.path.basename(os.getcwd())
self.prompt = prompt
self.upgrade_deps = upgrade_deps
+ self.scm_ignore_files = frozenset(map(str.lower, scm_ignore_files))
def create(self, env_dir):
"""
@@ -66,6 +69,8 @@ def create(self, env_dir):
"""
env_dir = os.path.abspath(env_dir)
context = self.ensure_directories(env_dir)
+ for scm in self.scm_ignore_files:
+ getattr(self, f"create_{scm}_ignore_file")(context)
# See issue 24875. We need system_site_packages to be False
# until after pip is installed.
true_system_site_packages = self.system_site_packages
@@ -210,6 +215,8 @@ def create_configuration(self, context):
args.append('--upgrade-deps')
if self.orig_prompt is not None:
args.append(f'--prompt="{self.orig_prompt}"')
+ if not self.scm_ignore_files:
+ args.append('--without-scm-ignore-files')
args.append(context.env_dir)
args = ' '.join(args)
@@ -278,6 +285,19 @@ def symlink_or_copy(self, src, dst, relative_symlinks_ok=False):
shutil.copyfile(src, dst)
+ def create_git_ignore_file(self, context):
+ """
+ Create a .gitignore file in the environment directory.
+
+ The contents of the file cause the entire environment directory to be
+ ignored by git.
+ """
+ gitignore_path = os.path.join(context.env_dir, '.gitignore')
+ with open(gitignore_path, 'w', encoding='utf-8') as file:
+ file.write('# Created by venv; '
+ 'see https://docs.python.org/3/library/venv.html\n')
+ file.write('*\n')
+
def setup_python(self, context):
"""
Set up a Python executable in the environment.
@@ -461,11 +481,13 @@ def upgrade_dependencies(self, context):
def create(env_dir, system_site_packages=False, clear=False,
- symlinks=False, with_pip=False, prompt=None, upgrade_deps=False):
+ symlinks=False, with_pip=False, prompt=None, upgrade_deps=False,
+ *, scm_ignore_files=frozenset()):
"""Create a virtual environment in a directory."""
builder = EnvBuilder(system_site_packages=system_site_packages,
clear=clear, symlinks=symlinks, with_pip=with_pip,
- prompt=prompt, upgrade_deps=upgrade_deps)
+ prompt=prompt, upgrade_deps=upgrade_deps,
+ scm_ignore_files=scm_ignore_files)
builder.create(env_dir)
@@ -525,6 +547,11 @@ def main(args=None):
dest='upgrade_deps',
help=f'Upgrade core dependencies ({", ".join(CORE_VENV_DEPS)}) '
'to the latest version in PyPI')
+ parser.add_argument('--without-scm-ignore-files', dest='scm_ignore_files',
+ action='store_const', const=frozenset(),
+ default=frozenset(['git']),
+ help='Skips adding SCM ignore files to the environment '
+ 'directory (Git is supported by default).')
options = parser.parse_args(args)
if options.upgrade and options.clear:
raise ValueError('you cannot supply --upgrade and --clear together.')
@@ -534,7 +561,8 @@ def main(args=None):
upgrade=options.upgrade,
with_pip=options.with_pip,
prompt=options.prompt,
- upgrade_deps=options.upgrade_deps)
+ upgrade_deps=options.upgrade_deps,
+ scm_ignore_files=options.scm_ignore_files)
for d in options.dirs:
builder.create(d)
diff --git a/Lib/venv/__main__.py b/Lib/venv/__main__.py
index 912423e4a78198..88f55439dc210c 100644
--- a/Lib/venv/__main__.py
+++ b/Lib/venv/__main__.py
@@ -6,5 +6,5 @@
main()
rc = 0
except Exception as e:
- print('Error: %s' % e, file=sys.stderr)
+ print('Error:', e, file=sys.stderr)
sys.exit(rc)
diff --git a/Makefile.pre.in b/Makefile.pre.in
index 7b67738f4341a2..d123fa3e6f4a47 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -400,12 +400,14 @@ PYTHON_OBJS= \
Python/instrumentation.o \
Python/intrinsics.o \
Python/legacy_tracing.o \
+ Python/lock.o \
Python/marshal.o \
Python/modsupport.o \
Python/mysnprintf.o \
Python/mystrtoul.o \
Python/optimizer.o \
Python/optimizer_analysis.o \
+ Python/parking_lot.o \
Python/pathconfig.o \
Python/preconfig.o \
Python/pyarena.o \
@@ -490,6 +492,7 @@ OBJECT_OBJS= \
Objects/weakrefobject.o \
@PERF_TRAMPOLINE_OBJ@
+DEEPFREEZE_C = Python/deepfreeze/deepfreeze.c
DEEPFREEZE_OBJS = Python/deepfreeze/deepfreeze.o
##########################################################################
@@ -504,7 +507,6 @@ LIBRARY_OBJS_OMIT_FROZEN= \
LIBRARY_OBJS= \
$(LIBRARY_OBJS_OMIT_FROZEN) \
- $(DEEPFREEZE_OBJS) \
Modules/getpath.o \
Python/frozen.o
@@ -777,7 +779,6 @@ coverage-report: regen-token regen-frozen
.PHONY: clinic
clinic: check-clean-src $(srcdir)/Modules/_blake2/blake2s_impl.c
$(PYTHON_FOR_REGEN) $(srcdir)/Tools/clinic/clinic.py --make --exclude Lib/test/clinic.test.c --srcdir $(srcdir)
- $(PYTHON_FOR_REGEN) $(srcdir)/Tools/build/generate_global_objects.py
.PHONY: clinic-tests
clinic-tests: check-clean-src $(srcdir)/Lib/test/clinic.test.c
@@ -1061,7 +1062,7 @@ regen-re: $(BUILDPYTHON)
$(RUNSHARED) ./$(BUILDPYTHON) $(srcdir)/Tools/build/generate_re_casefix.py $(srcdir)/Lib/re/_casefix.py
Programs/_testembed: Programs/_testembed.o $(LINK_PYTHON_DEPS)
- $(LINKCC) $(PY_CORE_LDFLAGS) $(LINKFORSHARED) -o $@ Programs/_testembed.o $(LINK_PYTHON_OBJS) $(LIBS) $(MODLIBS) $(SYSLIBS)
+ $(LINKCC) $(PY_LDFLAGS_NOLTO) $(LINKFORSHARED) -o $@ Programs/_testembed.o $(LINK_PYTHON_OBJS) $(LIBS) $(MODLIBS) $(SYSLIBS)
############################################################################
# "Bootstrap Python" used to run deepfreeze.py
@@ -1162,7 +1163,7 @@ Programs/_freeze_module.o: Programs/_freeze_module.c Makefile
Modules/getpath_noop.o: $(srcdir)/Modules/getpath_noop.c Makefile
Programs/_freeze_module: Programs/_freeze_module.o Modules/getpath_noop.o $(LIBRARY_OBJS_OMIT_FROZEN)
- $(LINKCC) $(PY_CORE_LDFLAGS) -o $@ Programs/_freeze_module.o Modules/getpath_noop.o $(LIBRARY_OBJS_OMIT_FROZEN) $(LIBS) $(MODLIBS) $(SYSLIBS)
+ $(LINKCC) $(PY_LDFLAGS_NOLTO) -o $@ Programs/_freeze_module.o Modules/getpath_noop.o $(LIBRARY_OBJS_OMIT_FROZEN) $(LIBS) $(MODLIBS) $(SYSLIBS)
# We manually freeze getpath.py rather than through freeze_modules
Python/frozen_modules/getpath.h: Modules/getpath.py $(FREEZE_MODULE_BOOTSTRAP_DEPS)
@@ -1252,12 +1253,12 @@ regen-frozen: Tools/build/freeze_modules.py $(FROZEN_FILES_IN)
# Deepfreeze targets
.PHONY: regen-deepfreeze
-regen-deepfreeze: $(DEEPFREEZE_OBJS)
+regen-deepfreeze: $(DEEPFREEZE_C)
DEEPFREEZE_DEPS=$(srcdir)/Tools/build/deepfreeze.py Include/internal/pycore_global_strings.h $(FREEZE_MODULE_DEPS) $(FROZEN_FILES_OUT)
# BEGIN: deepfreeze modules
-Python/deepfreeze/deepfreeze.c: $(DEEPFREEZE_DEPS)
+$(DEEPFREEZE_C): $(DEEPFREEZE_DEPS)
$(PYTHON_FOR_FREEZE) $(srcdir)/Tools/build/deepfreeze.py \
Python/frozen_modules/importlib._bootstrap.h:importlib._bootstrap \
Python/frozen_modules/importlib._bootstrap_external.h:importlib._bootstrap_external \
@@ -1284,8 +1285,6 @@ Python/deepfreeze/deepfreeze.c: $(DEEPFREEZE_DEPS)
Python/frozen_modules/frozen_only.h:frozen_only \
-o Python/deepfreeze/deepfreeze.c
# END: deepfreeze modules
- @echo "Note: Deepfreeze may have added some global objects,"
- @echo " so run 'make regen-global-objects' if necessary."
# We keep this renamed target around for folks with muscle memory.
.PHONY: regen-importlib
@@ -1294,11 +1293,12 @@ regen-importlib: regen-frozen
############################################################################
# Global objects
+# Dependencies which can add and/or remove _Py_ID() identifiers:
+# - deepfreeze.c
+# - "make clinic"
.PHONY: regen-global-objects
-regen-global-objects: $(srcdir)/Tools/build/generate_global_objects.py
+regen-global-objects: $(srcdir)/Tools/build/generate_global_objects.py $(DEEPFREEZE_C) clinic
$(PYTHON_FOR_REGEN) $(srcdir)/Tools/build/generate_global_objects.py
- @echo "Note: Global objects can be added or removed by other tools (e.g. deepfreeze), "
- @echo " so be sure to re-run regen-global-objects after those tools."
############################################################################
# ABI
@@ -1320,9 +1320,10 @@ regen-limited-abi: all
############################################################################
# Regenerate all generated files
+# "clinic" is regenerated implicitly via "regen-global-objects".
.PHONY: regen-all
regen-all: regen-cases regen-typeslots \
- regen-token regen-ast regen-keyword regen-sre regen-frozen clinic \
+ regen-token regen-ast regen-keyword regen-sre regen-frozen \
regen-pegen-metaparser regen-pegen regen-test-frozenmain \
regen-test-levenshtein regen-global-objects
@echo
@@ -1724,6 +1725,7 @@ PYTHON_HEADERS= \
$(srcdir)/Include/cpython/pylifecycle.h \
$(srcdir)/Include/cpython/pymem.h \
$(srcdir)/Include/cpython/pystate.h \
+ $(srcdir)/Include/cpython/pystats.h \
$(srcdir)/Include/cpython/pythonrun.h \
$(srcdir)/Include/cpython/pythread.h \
$(srcdir)/Include/cpython/setobject.h \
@@ -1741,7 +1743,6 @@ PYTHON_HEADERS= \
$(srcdir)/Include/internal/pycore_ast_state.h \
$(srcdir)/Include/internal/pycore_atexit.h \
$(srcdir)/Include/internal/pycore_atomic.h \
- $(srcdir)/Include/internal/pycore_atomic_funcs.h \
$(srcdir)/Include/internal/pycore_bitutils.h \
$(srcdir)/Include/internal/pycore_bytes_methods.h \
$(srcdir)/Include/internal/pycore_bytesobject.h \
@@ -1779,6 +1780,8 @@ PYTHON_HEADERS= \
$(srcdir)/Include/internal/pycore_interp.h \
$(srcdir)/Include/internal/pycore_intrinsics.h \
$(srcdir)/Include/internal/pycore_list.h \
+ $(srcdir)/Include/internal/pycore_llist.h \
+ $(srcdir)/Include/internal/pycore_lock.h \
$(srcdir)/Include/internal/pycore_long.h \
$(srcdir)/Include/internal/pycore_modsupport.h \
$(srcdir)/Include/internal/pycore_moduleobject.h \
@@ -1790,6 +1793,7 @@ PYTHON_HEADERS= \
$(srcdir)/Include/internal/pycore_opcode_metadata.h \
$(srcdir)/Include/internal/pycore_opcode_utils.h \
$(srcdir)/Include/internal/pycore_optimizer.h \
+ $(srcdir)/Include/internal/pycore_parking_lot.h \
$(srcdir)/Include/internal/pycore_pathconfig.h \
$(srcdir)/Include/internal/pycore_pyarena.h \
$(srcdir)/Include/internal/pycore_pyerrors.h \
@@ -1798,12 +1802,14 @@ PYTHON_HEADERS= \
$(srcdir)/Include/internal/pycore_pymem.h \
$(srcdir)/Include/internal/pycore_pymem_init.h \
$(srcdir)/Include/internal/pycore_pystate.h \
+ $(srcdir)/Include/internal/pycore_pystats.h \
$(srcdir)/Include/internal/pycore_pythonrun.h \
$(srcdir)/Include/internal/pycore_pythread.h \
$(srcdir)/Include/internal/pycore_range.h \
$(srcdir)/Include/internal/pycore_runtime.h \
$(srcdir)/Include/internal/pycore_runtime_init_generated.h \
$(srcdir)/Include/internal/pycore_runtime_init.h \
+ $(srcdir)/Include/internal/pycore_semaphore.h \
$(srcdir)/Include/internal/pycore_setobject.h \
$(srcdir)/Include/internal/pycore_signal.h \
$(srcdir)/Include/internal/pycore_sliceobject.h \
@@ -1886,7 +1892,7 @@ buildbottest: all
-@if which pybuildbot.identify >/dev/null 2>&1; then \
pybuildbot.identify "CC='$(CC)'" "CXX='$(CXX)'"; \
fi
- $(TESTRUNNER) -j 1 -u all -W --slowest --fail-env-changed --timeout=$(TESTTIMEOUT) $(TESTOPTS)
+ $(TESTRUNNER) -j 1 -u all -W --slowest --fail-env-changed --fail-rerun --timeout=$(TESTTIMEOUT) $(TESTOPTS)
# Like testall, but run Python tests with HOSTRUNNER directly.
.PHONY: hostrunnertest
@@ -2140,7 +2146,8 @@ LIBSUBDIRS= asyncio \
TESTSUBDIRS= idlelib/idle_test \
test \
test/audiodata \
- test/capath \
+ test/certdata \
+ test/certdata/capath \
test/cjkencodings \
test/crashers \
test/data \
@@ -2149,6 +2156,7 @@ TESTSUBDIRS= idlelib/idle_test \
test/encoded_modules \
test/leakers \
test/libregrtest \
+ test/mathdata \
test/subprocessdata \
test/support \
test/support/_hypothesis_stubs \
@@ -2156,8 +2164,10 @@ TESTSUBDIRS= idlelib/idle_test \
test/test_capi \
test/test_cppext \
test/test_ctypes \
+ test/test_dataclasses \
test/test_email \
test/test_email/data \
+ test/test_future_stmt \
test/test_import \
test/test_import/data \
test/test_import/data/circular_imports \
@@ -2244,6 +2254,7 @@ TESTSUBDIRS= idlelib/idle_test \
test/test_zoneinfo \
test/test_zoneinfo/data \
test/tkinterdata \
+ test/tokenizedata \
test/tracedmodules \
test/typinganndata \
test/xmltestdata \
@@ -2594,6 +2605,7 @@ recheck:
autoconf:
(cd $(srcdir); autoreconf -ivf -Werror)
+# See https://github.com/tiran/cpython_autoconf container
.PHONY: regen-configure
regen-configure:
@if command -v podman >/dev/null; then RUNTIME="podman"; else RUNTIME="docker"; fi; \
diff --git a/Misc/ACKS b/Misc/ACKS
index e52208a41cc9f7..aaa178fc3b5d08 100644
--- a/Misc/ACKS
+++ b/Misc/ACKS
@@ -254,6 +254,7 @@ Curtis Bucher
Colm Buckley
Erik de Bueger
Jan-Hein Bührman
+Marc Bürg
Lars Buitinck
Artem Bulgakov
Dick Bulterman
@@ -503,6 +504,7 @@ Daniel Ellis
Phil Elson
David Ely
Victor van den Elzen
+Vlad Emelianov
Jeff Epler
Tom Epperly
Gökcen Eraslan
@@ -1269,7 +1271,7 @@ R. David Murray
Matti Mäki
Jörg Müller
Kaushik N
-Dong-hee Na
+Donghee Na
Dale Nagata
John Nagle
Takahiro Nakayama
@@ -1331,6 +1333,7 @@ Ethan Onstott
Ken Jin Ooi
Piet van Oostrum
Tomas Oppelstrup
+Itamar Oren
Jason Orendorff
Yan "yyyyyyyan" Orestes
Bastien Orivel
@@ -1341,7 +1344,6 @@ Michele Orrù
Tomáš Orsava
Oleg Oshmyan
Denis Osipov
-Itamar Ostricher
Denis S. Otkidach
Peter Otten
Michael Otteneder
@@ -1869,6 +1871,7 @@ Steven Troxler
Brent Tubbs
Anthony Tuininga
Erno Tukia
+Adam Turner
David Turner
Stephen Turner
Itamar Turner-Trauring
@@ -2076,7 +2079,5 @@ Jelle Zijlstra
Gennadiy Zlobin
Doug Zongker
Peter Åstrand
-Vlad Emelianov
-Andrey Doroschenko
(Entries should be added in rough alphabetical order by last names)
diff --git a/Misc/NEWS.d/3.10.0a1.rst b/Misc/NEWS.d/3.10.0a1.rst
index 79d85a40df8bbe..a9f25b482508ba 100644
--- a/Misc/NEWS.d/3.10.0a1.rst
+++ b/Misc/NEWS.d/3.10.0a1.rst
@@ -68,7 +68,7 @@ getting the ``__bases__`` attribute leads to infinite recursion.
.. section: Core and Builtins
Speed up calls to ``reversed()`` by using the :pep:`590` ``vectorcall``
-calling convention. Patch by Dong-hee Na.
+calling convention. Patch by Donghee Na.
..
@@ -88,7 +88,7 @@ convention. Patch by Dennis Sweeney.
.. section: Core and Builtins
Speed up calls to ``bool()`` by using the :pep:`590` ``vectorcall`` calling
-convention. Patch by Dong-hee Na.
+convention. Patch by Donghee Na.
..
@@ -715,7 +715,7 @@ Fix refleak in _Py_fopen_obj() when PySys_Audit() fails
.. section: Core and Builtins
Add a state to the :mod:`!nis` module (:pep:`3121`) and apply the multiphase
-initialization. Patch by Dong-hee Na.
+initialization. Patch by Donghee Na.
..
@@ -936,7 +936,7 @@ class. Patch by Pablo Galindo.
.. section: Core and Builtins
:c:func:`Py_TYPE()` is changed to the inline static function. Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -2596,7 +2596,7 @@ remove multiple items from a list".
.. section: Documentation
Fix RemovedInSphinx40Warning when building the documentation. Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -2862,7 +2862,7 @@ Make test_gdb properly run on HP-UX. Patch by Michael Osipov.
.. section: Build
Update :c:macro:`Py_UNREACHABLE` to use __builtin_unreachable() if only the
-compiler is able to use it. Patch by Dong-hee Na.
+compiler is able to use it. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.10.0a2.rst b/Misc/NEWS.d/3.10.0a2.rst
index 78b25779802d6e..78f4377656b0cc 100644
--- a/Misc/NEWS.d/3.10.0a2.rst
+++ b/Misc/NEWS.d/3.10.0a2.rst
@@ -185,7 +185,7 @@ Removed special methods ``__int__``, ``__float__``, ``__floordiv__``,
Micro optimization when compute :c:member:`~PySequenceMethods.sq_item` and
:c:member:`~PyMappingMethods.mp_subscript` of :class:`range`. Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -205,7 +205,7 @@ error message using the current locale's encoding.
.. nonce: iLoMVF
.. section: Core and Builtins
-Micro optimization for range.index if step is 1. Patch by Dong-hee Na.
+Micro optimization for range.index if step is 1. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.10.0a3.rst b/Misc/NEWS.d/3.10.0a3.rst
index 755109cbd376f4..7112819c1b4118 100644
--- a/Misc/NEWS.d/3.10.0a3.rst
+++ b/Misc/NEWS.d/3.10.0a3.rst
@@ -153,7 +153,7 @@ Allow an unparenthesized walrus in subscript indexes.
.. section: Core and Builtins
Make sure that the compiler front-end produces a well-formed control flow
-graph. Be be more aggressive in the compiler back-end, as it is now safe to
+graph. Be more aggressive in the compiler back-end, as it is now safe to
do so.
..
@@ -394,7 +394,7 @@ Removed the ``formatter`` module, which was deprecated in Python 3.4. It is
somewhat obsolete, little used, and not tested. It was originally scheduled
to be removed in Python 3.6, but such removals were delayed until after
Python 2.7 EOL. Existing users should copy whatever classes they use into
-their code. Patch by Dong-hee Na and and Terry J. Reedy.
+their code. Patch by Donghee Na and and Terry J. Reedy.
..
diff --git a/Misc/NEWS.d/3.10.0a4.rst b/Misc/NEWS.d/3.10.0a4.rst
index 95f9319668db45..414823f162d85c 100644
--- a/Misc/NEWS.d/3.10.0a4.rst
+++ b/Misc/NEWS.d/3.10.0a4.rst
@@ -105,7 +105,7 @@ blocks
.. section: Core and Builtins
Make the :mod:`atexit` module state per-interpreter. It is now safe have
-more than one :mod:`atexit` module instance. Patch by Dong-hee Na and Victor
+more than one :mod:`atexit` module instance. Patch by Donghee Na and Victor
Stinner.
..
@@ -768,7 +768,7 @@ results. Patch by Ammar Askar.
.. section: Tests
Update test_nntplib to use official group name of news.aioe.org for testing.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.10.0a6.rst b/Misc/NEWS.d/3.10.0a6.rst
index 313aa689254040..c379b968c9885b 100644
--- a/Misc/NEWS.d/3.10.0a6.rst
+++ b/Misc/NEWS.d/3.10.0a6.rst
@@ -295,7 +295,7 @@ actual dictionary. This created problems for introspection tools.
.. section: Library
Added :const:`~os.O_EVTONLY`, :const:`~os.O_FSYNC`, :const:`~os.O_SYMLINK` and
-:const:`~os.O_NOFOLLOW_ANY` for macOS. Patch by Dong-hee Na.
+:const:`~os.O_NOFOLLOW_ANY` for macOS. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.10.0a7.rst b/Misc/NEWS.d/3.10.0a7.rst
index 7933f71b01c14d..3a1694f444616a 100644
--- a/Misc/NEWS.d/3.10.0a7.rst
+++ b/Misc/NEWS.d/3.10.0a7.rst
@@ -113,7 +113,7 @@ in f-strings. Patch by Pablo Galindo.
.. section: Core and Builtins
Speed up calls to ``map()`` by using the :pep:`590` ``vectorcall`` calling
-convention. Patch by Dong-hee Na.
+convention. Patch by Donghee Na.
..
@@ -240,7 +240,7 @@ of processes that don't use sigaltstack.
.. section: Core and Builtins
Speed up calls to ``filter()`` by using the :pep:`590` ``vectorcall``
-calling convention. Patch by Dong-hee Na.
+calling convention. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.10.0b1.rst b/Misc/NEWS.d/3.10.0b1.rst
index 3c71bc73b812a1..e7b6b93d0b6df3 100644
--- a/Misc/NEWS.d/3.10.0b1.rst
+++ b/Misc/NEWS.d/3.10.0b1.rst
@@ -516,7 +516,7 @@ encoding.
.. section: Library
Removed an unnecessary list comprehension before looping from
-:func:`urllib.parse.parse_qsl`. Patch by Christoph Zwerschke and Dong-hee
+:func:`urllib.parse.parse_qsl`. Patch by Christoph Zwerschke and Donghee
Na.
..
diff --git a/Misc/NEWS.d/3.11.0a1.rst b/Misc/NEWS.d/3.11.0a1.rst
index e1d0adc478029f..7c991e7667b01a 100644
--- a/Misc/NEWS.d/3.11.0a1.rst
+++ b/Misc/NEWS.d/3.11.0a1.rst
@@ -292,7 +292,7 @@ Fixed pickling of range iterators that iterated for over ``2**32`` times.
.. section: Core and Builtins
A :exc:`SyntaxError` is now raised when trying to delete :const:`__debug__`.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -415,7 +415,7 @@ type :class:`float` or :class:`complex`.
.. section: Core and Builtins
A debug variable :envvar:`PYTHONDUMPREFSFILE` is added for creating a dump
-file which is generated by :option:`--with-trace-refs`. Patch by Dong-hee
+file which is generated by :option:`--with-trace-refs`. Patch by Donghee
Na.
..
@@ -670,7 +670,7 @@ Parameter substitution of the union type with wrong types now raises
.. section: Core and Builtins
Update ``property_descr_set`` to use vectorcall if possible. Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -732,7 +732,7 @@ Collapse union of equal types. E.g. the result of ``int | int`` is now
On Windows, :func:`os.urandom`: uses BCryptGenRandom API instead of
CryptGenRandom API which is deprecated from Microsoft Windows API. Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -1657,7 +1657,7 @@ Patch by Hugo van Kemenade.
.. section: Library
Pure ASCII strings are now normalized in constant time by
-:func:`unicodedata.normalize`. Patch by Dong-hee Na.
+:func:`unicodedata.normalize`. Patch by Donghee Na.
..
@@ -1968,7 +1968,7 @@ A new function ``operator.call`` has been added, such that
:class:`!webbrowser.MacOSX` is deprecated and will be removed in Python 3.13.
It is untested and undocumented and also not used by :mod:`webbrowser` itself.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -2465,7 +2465,7 @@ generator
.. section: Library
Make the implementation consistency of :func:`~operator.indexOf` between C
-and Python versions. Patch by Dong-hee Na.
+and Python versions. Patch by Donghee Na.
..
@@ -2752,7 +2752,7 @@ of reserved filenames, including those with trailing spaces or colons.
.. section: Library
Fix :meth:`~email.message.MIMEPart.as_string` to pass unixfrom properly.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -2809,7 +2809,7 @@ behaves differently than the similar implementation in :mod:`sysconfig`.
.. section: Library
:class:`smtpd.MailmanProxy` is now removed as it is unusable without an
-external module, ``mailman``. Patch by Dong-hee Na.
+external module, ``mailman``. Patch by Donghee Na.
..
@@ -2916,7 +2916,7 @@ Support PEP 515 for Fraction's initialization from string.
.. nonce: qFBYpp
.. section: Library
-Remove deprecated functions in the :mod:`gettext`. Patch by Dong-hee Na.
+Remove deprecated functions in the :mod:`gettext`. Patch by Donghee Na.
..
@@ -4471,7 +4471,7 @@ and modify the frozen modules.
.. section: Build
Add support for building with clang thin lto via --with-lto=thin/full. Patch
-by Dong-hee Na and Brett Holman.
+by Donghee Na and Brett Holman.
..
@@ -4798,7 +4798,7 @@ Allow the Argument Clinic tool to handle ``__complex__`` special methods.
Removed the 'test2to3' demo project that demonstrated using lib2to3 to
support Python 2.x and Python 3.x from a single source in a distutils
-package. Patch by Dong-hee Na
+package. Patch by Donghee Na
..
diff --git a/Misc/NEWS.d/3.11.0a2.rst b/Misc/NEWS.d/3.11.0a2.rst
index cf26137dff19ef..503e489b658e4d 100644
--- a/Misc/NEWS.d/3.11.0a2.rst
+++ b/Misc/NEWS.d/3.11.0a2.rst
@@ -142,7 +142,7 @@ Add SipHash13 for string hash algorithm and use it by default.
.. nonce: CTUT8s
.. section: Core and Builtins
-Fix reference leak from descr_check. Patch by Dong-hee Na.
+Fix reference leak from descr_check. Patch by Donghee Na.
..
@@ -263,7 +263,7 @@ Improve the generated bytecode for class and mapping patterns.
.. section: Core and Builtins
Speed up calls to ``enumerate()`` by using the :pep:`590` ``vectorcall``
-calling convention. Patch by Dong-hee Na.
+calling convention. Patch by Donghee Na.
..
@@ -396,7 +396,7 @@ Patch by Inada Naoki.
.. section: Library
Update :class:`~typing.ForwardRef` to support ``|`` operator. Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -486,7 +486,7 @@ Patch by Joongi Kim.
.. section: Library
Empty escapechar/quotechar is not allowed when initializing
-:class:`csv.Dialect`. Patch by Vajrasky Kok and Dong-hee Na.
+:class:`csv.Dialect`. Patch by Vajrasky Kok and Donghee Na.
..
@@ -569,7 +569,7 @@ formatting options.
.. section: Library
Improve error message of :class:`csv.Dialect` when initializing. Patch by
-Vajrasky Kok and Dong-hee Na.
+Vajrasky Kok and Donghee Na.
..
diff --git a/Misc/NEWS.d/3.11.0a3.rst b/Misc/NEWS.d/3.11.0a3.rst
index 7fdc191c244849..a96a59115797ee 100644
--- a/Misc/NEWS.d/3.11.0a3.rst
+++ b/Misc/NEWS.d/3.11.0a3.rst
@@ -615,7 +615,7 @@ Launch GNOME web browsers via gio tool instead of obsolete gvfs-open
.. section: Library
On Windows, :func:`time.sleep` now uses a waitable timer which supports
-high-resolution timers. Patch by Dong-hee Na and Eryk Sun.
+high-resolution timers. Patch by Donghee Na and Eryk Sun.
..
diff --git a/Misc/NEWS.d/3.11.0a5.rst b/Misc/NEWS.d/3.11.0a5.rst
index c28078da8d8339..08d94e82ed8ccf 100644
--- a/Misc/NEWS.d/3.11.0a5.rst
+++ b/Misc/NEWS.d/3.11.0a5.rst
@@ -127,7 +127,7 @@ Aditya.
.. section: Core and Builtins
Speed up calls to :meth:`weakref.ref.__call__` by using the :pep:`590`
-``vectorcall`` calling convention. Patch by Dong-hee Na.
+``vectorcall`` calling convention. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.11.0a6.rst b/Misc/NEWS.d/3.11.0a6.rst
index fcec71c6f59da2..52055b3fafd485 100644
--- a/Misc/NEWS.d/3.11.0a6.rst
+++ b/Misc/NEWS.d/3.11.0a6.rst
@@ -382,7 +382,7 @@ involving lots of brackets. Patch by Pablo Galindo.
.. section: Core and Builtins
:mod:`ctypes` now allocates memory on the stack instead of on the heap to
-pass arguments while calling a Python callback function. Patch by Dong-hee
+pass arguments while calling a Python callback function. Patch by Donghee
Na.
..
@@ -441,7 +441,7 @@ Add a missing call to ``va_end()`` in ``Modules/_hashopenssl.c``.
.. section: Core and Builtins
Use :c:func:`PyObject_Vectorcall` while calling ctypes callback function.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -514,7 +514,7 @@ For performance, use the optimized string-searching implementations from
.. section: Library
:class:`~http.server.SimpleHTTPRequestHandler` now uses HTML5 grammar. Patch
-by Dong-hee Na.
+by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.11.0a7.rst b/Misc/NEWS.d/3.11.0a7.rst
index 94c15f1c1f5237..6e41f9cbd933b5 100644
--- a/Misc/NEWS.d/3.11.0a7.rst
+++ b/Misc/NEWS.d/3.11.0a7.rst
@@ -89,7 +89,7 @@ problem. Define :c:macro:`PY_CALL_TRAMPOLINE` to enable call trampolines.
.. section: Core and Builtins
Some Windows system error codes(>= 10000) are now mapped into the correct
-errno and may now raise a subclass of :exc:`OSError`. Patch by Dong-hee Na.
+errno and may now raise a subclass of :exc:`OSError`. Patch by Donghee Na.
..
@@ -1599,7 +1599,7 @@ Call the public :func:`sys.get_asyncgen_hooks` and
.. section: C API
Remove private functions ``_PySys_GetObjectId()`` and
-``_PySys_SetObjectId()``. Patch by Dong-hee Na.
+``_PySys_SetObjectId()``. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.11.0b1.rst b/Misc/NEWS.d/3.11.0b1.rst
index 2bcccc7dae3734..a4cdda2cafdb43 100644
--- a/Misc/NEWS.d/3.11.0b1.rst
+++ b/Misc/NEWS.d/3.11.0b1.rst
@@ -185,7 +185,7 @@ functions leave the current exception unchanged. Patch by Victor Stinner.
.. section: Core and Builtins
Fix a minor memory leak at exit: release the memory of the
-:class:`generic_alias_iterator` type. Patch by Dong-hee Na.
+:class:`generic_alias_iterator` type. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.12.0a1.rst b/Misc/NEWS.d/3.12.0a1.rst
index 5178f4055e7b8e..633738de92bef7 100644
--- a/Misc/NEWS.d/3.12.0a1.rst
+++ b/Misc/NEWS.d/3.12.0a1.rst
@@ -82,7 +82,7 @@ the test failed).
.. nonce: eOBh8M
.. section: Core and Builtins
-Suppress ImportError for invalid query for help() command. Patch by Dong-hee
+Suppress ImportError for invalid query for help() command. Patch by Donghee
Na.
..
@@ -164,7 +164,7 @@ to calculate those doing pointer arithmetic.
.. section: Core and Builtins
:func:`os.sched_yield` now release the GIL while calling sched_yield(2).
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -465,7 +465,7 @@ Remove dead code from ``CALL_FUNCTION_EX`` opcode.
.. nonce: VE8-zf
.. section: Core and Builtins
-:class:`memoryview` now supports half-floats. Patch by Dong-hee Na and
+:class:`memoryview` now supports half-floats. Patch by Donghee Na and
Antoine Pitrou.
..
@@ -857,7 +857,7 @@ code objects could be "deduplicated" during compilation.
.. section: Core and Builtins
Reduce allocation size of :class:`list` from :meth:`str.split` and
-:meth:`str.rsplit`. Patch by Dong-hee Na and Inada Naoki.
+:meth:`str.rsplit`. Patch by Donghee Na and Inada Naoki.
..
@@ -3742,7 +3742,7 @@ Fix :func:`ast.unparse` when ``ImportFrom.level`` is None
Now :func:`~dis.dis` and :func:`~dis.get_instructions` handle operand values
for instructions prefixed by ``EXTENDED_ARG_QUICK``. Patch by Sam Gross and
-Dong-hee Na.
+Donghee Na.
..
@@ -5004,7 +5004,7 @@ Patch by Illia Volochii and Adam Turner.
.. section: Build
Fix the build process of clang compiler for :program:`_bootstrap_python` if
-LTO optimization is applied. Patch by Matthias Görgens and Dong-hee Na.
+LTO optimization is applied. Patch by Matthias Görgens and Donghee Na.
..
@@ -5024,7 +5024,7 @@ LTO optimization is applied. Patch by Matthias Görgens and Dong-hee Na.
.. section: Build
CPython now uses the ThinLTO option as the default policy if the Clang
-compiler accepts the flag. Patch by Dong-hee Na.
+compiler accepts the flag. Patch by Donghee Na.
..
@@ -5350,7 +5350,7 @@ in a virtual environment.
.. nonce: FbHZuS
.. section: Windows
-Fix :file:`py.exe` launcher handling of ``-V:/`` option when
+Fix :file:`py.exe` launcher handling of :samp:`-V:{}/` option when
default preferences have been set in environment variables or configuration
files.
diff --git a/Misc/NEWS.d/3.12.0a2.rst b/Misc/NEWS.d/3.12.0a2.rst
index f781e38665a8ea..1a04ed473f329d 100644
--- a/Misc/NEWS.d/3.12.0a2.rst
+++ b/Misc/NEWS.d/3.12.0a2.rst
@@ -8,7 +8,7 @@ The IDNA codec decoder used on DNS hostnames by :mod:`socket` or
:mod:`asyncio` related name resolution functions no longer involves a
quadratic algorithm. This prevents a potential CPU denial of service if an
out-of-spec excessive length hostname involving bidirectional characters
-were decoded. Some protocols such as :mod:`urllib` http ``3xx`` redirects
+were decoded. Some protocols such as :mod:`urllib` http :samp:`3{xx}` redirects
potentially allow for an attacker to supply such a name.
Individual labels within an IDNA encoded DNS name will now raise an error
@@ -111,7 +111,7 @@ back to alternative names ("python", "python.").
.. section: Core and Builtins
Update :mod:`faulthandler` to emit an error message with the proper
-unexpected signal number. Patch by Dong-hee Na.
+unexpected signal number. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.12.0a3.rst b/Misc/NEWS.d/3.12.0a3.rst
index 3e6f8de5d911f2..ce128fd5f80c77 100644
--- a/Misc/NEWS.d/3.12.0a3.rst
+++ b/Misc/NEWS.d/3.12.0a3.rst
@@ -9,7 +9,7 @@ within a garbage request to be printed to the stderr server log.
This is done by changing the :mod:`http.server`
:class:`BaseHTTPRequestHandler` ``.log_message`` method to replace control
-characters with a ``\xHH`` hex escape before printing.
+characters with a :samp:`\\x{HH}` hex escape before printing.
..
@@ -153,7 +153,7 @@ to specialize attribute accesses on types that haven't had
.. section: Core and Builtins
Allow some features of :mod:`syslog` to the main interpreter only. Patch by
-Dong-hee Na.
+Donghee Na.
..
diff --git a/Misc/NEWS.d/3.12.0a4.rst b/Misc/NEWS.d/3.12.0a4.rst
index 8951490f41b94c..b3b39024056ccc 100644
--- a/Misc/NEWS.d/3.12.0a4.rst
+++ b/Misc/NEWS.d/3.12.0a4.rst
@@ -676,7 +676,7 @@ parameter names in the C implementation. Patch by Alex Waygood.
.. section: Library
Update :exc:`~urllib.error.HTTPError` to be initialized properly, even if
-the ``fp`` is ``None``. Patch by Dong-hee Na.
+the ``fp`` is ``None``. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.12.0a5.rst b/Misc/NEWS.d/3.12.0a5.rst
index f6f8de46cf70d9..8cf90b0e9cde46 100644
--- a/Misc/NEWS.d/3.12.0a5.rst
+++ b/Misc/NEWS.d/3.12.0a5.rst
@@ -38,7 +38,7 @@ would get out of sync, causing inconsistent behavior and crashes.
.. section: Core and Builtins
Fix wrong lineno in exception message on :keyword:`continue` or
-:keyword:`break` which are not in a loop. Patch by Dong-hee Na.
+:keyword:`break` which are not in a loop. Patch by Donghee Na.
..
@@ -48,7 +48,7 @@ Fix wrong lineno in exception message on :keyword:`continue` or
.. section: Core and Builtins
Fix :func:`~unicodedata.is_normalized` to properly handle the UCD 3.2.0
-cases. Patch by Dong-hee Na.
+cases. Patch by Donghee Na.
..
@@ -507,7 +507,7 @@ inheritance.
.. section: Build
Update BOLT configration not to use depreacted usage of ``--split
-functions``. Patch by Dong-hee Na.
+functions``. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.12.0a6.rst b/Misc/NEWS.d/3.12.0a6.rst
index 07967028bdee70..5bd600cd8b6fc0 100644
--- a/Misc/NEWS.d/3.12.0a6.rst
+++ b/Misc/NEWS.d/3.12.0a6.rst
@@ -220,7 +220,7 @@ access of ``builtins.__dict__`` keys mutates the iter object.
.. section: Core and Builtins
Update :mod:`tracemalloc` to handle presize of object properly. Patch by
-Dong-hee Na.
+Donghee Na.
..
diff --git a/Misc/NEWS.d/3.12.0b1.rst b/Misc/NEWS.d/3.12.0b1.rst
index 652b706880fb92..0944dfd0e90ab9 100644
--- a/Misc/NEWS.d/3.12.0b1.rst
+++ b/Misc/NEWS.d/3.12.0b1.rst
@@ -213,7 +213,7 @@ attribute.
.. section: Core and Builtins
Reduce object creation while calling callback function from gc. Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -464,7 +464,7 @@ unpickled.
.. section: Core and Builtins
Migrate :meth:`~ssl.SSLContext.set_ecdh_curve` method not to use deprecated
-OpenSSL APIs. Patch by Dong-hee Na.
+OpenSSL APIs. Patch by Donghee Na.
..
@@ -2073,7 +2073,7 @@ Define ``.PHONY`` / virtual make targets consistently and properly.
.. nonce: -W9BJS
.. section: Build
-Add gcc fallback of mkfifoat/mknodat for macOS. Patch by Dong-hee Na.
+Add gcc fallback of mkfifoat/mknodat for macOS. Patch by Donghee Na.
..
@@ -2372,7 +2372,7 @@ Add a new C-API function to eagerly assign a version tag to a PyTypeObject:
.. section: C API
:c:func:`PyObject_GC_Resize` should calculate preheader size if needed.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.5.0rc1.rst b/Misc/NEWS.d/3.5.0rc1.rst
index 1fb9bc6c04da38..64e9435b252acb 100644
--- a/Misc/NEWS.d/3.5.0rc1.rst
+++ b/Misc/NEWS.d/3.5.0rc1.rst
@@ -168,7 +168,7 @@ Sanad Zaki Rizvi.
Idle editor default font. Switch from Courier to platform-sensitive
TkFixedFont. This should not affect current customized font selections. If
-there is a problem, edit $HOME/.idlerc/config-main.cfg and remove 'fontxxx'
+there is a problem, edit $HOME/.idlerc/config-main.cfg and remove ':samp:`font{xxx}`'
entries from [Editor Window]. Patch by Mark Roseman.
..
diff --git a/Misc/NEWS.d/3.5.1rc1.rst b/Misc/NEWS.d/3.5.1rc1.rst
index dc247ce2096a7d..05e1ecfaf6bc79 100644
--- a/Misc/NEWS.d/3.5.1rc1.rst
+++ b/Misc/NEWS.d/3.5.1rc1.rst
@@ -189,7 +189,7 @@ comprehensions correspond to the opening brace.
.. nonce: 0Gh-Ty
.. section: Core and Builtins
-Hide the private _Py_atomic_xxx symbols from the public Python.h header to
+Hide the private :samp:`_Py_atomic_{xxx}` symbols from the public Python.h header to
fix a compilation error with OpenMP. PyThreadState_GET() becomes an alias to
PyThreadState_Get() to avoid ABI incompatibilities.
diff --git a/Misc/NEWS.d/3.5.4.rst b/Misc/NEWS.d/3.5.4.rst
index cd0ca4872f1ab0..7839fa2709ecf2 100644
--- a/Misc/NEWS.d/3.5.4.rst
+++ b/Misc/NEWS.d/3.5.4.rst
@@ -5,4 +5,4 @@
.. section: Library
ftplib.FTP.putline() now throws ValueError on commands that contains CR or
-LF. Patch by Dong-hee Na.
+LF. Patch by Donghee Na.
diff --git a/Misc/NEWS.d/3.5.4rc1.rst b/Misc/NEWS.d/3.5.4rc1.rst
index 04a035a41e7461..d65d5d14ee78bb 100644
--- a/Misc/NEWS.d/3.5.4rc1.rst
+++ b/Misc/NEWS.d/3.5.4rc1.rst
@@ -340,7 +340,7 @@ not keep objects alive longer than expected.
.. section: Library
inspect.signature() now supports callables with variable-argument parameters
-wrapped with partialmethod. Patch by Dong-hee Na.
+wrapped with partialmethod. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.6.2rc1.rst b/Misc/NEWS.d/3.6.2rc1.rst
index cdf4c3d541c4ca..28eb88f79130c5 100644
--- a/Misc/NEWS.d/3.6.2rc1.rst
+++ b/Misc/NEWS.d/3.6.2rc1.rst
@@ -77,7 +77,7 @@ delivered to the innermost frame.
.. section: Core and Builtins
sys.getsizeof() on a code object now returns the sizes which includes the
-code struct and sizes of objects which it references. Patch by Dong-hee Na.
+code struct and sizes of objects which it references. Patch by Donghee Na.
..
@@ -163,7 +163,7 @@ no longer ignored. Patch by Mircea Cosbuc.
.. nonce: I2mDTz
.. section: Library
-Functional API of enum allows to create empty enums. Patched by Dong-hee Na
+Functional API of enum allows to create empty enums. Patched by Donghee Na
..
@@ -202,7 +202,7 @@ not keep objects alive longer than expected.
.. section: Library
inspect.signature() now supports callables with variable-argument parameters
-wrapped with partialmethod. Patch by Dong-hee Na.
+wrapped with partialmethod. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.6.3rc1.rst b/Misc/NEWS.d/3.6.3rc1.rst
index 4dc2eef5d3b61b..4b2aae9dc88441 100644
--- a/Misc/NEWS.d/3.6.3rc1.rst
+++ b/Misc/NEWS.d/3.6.3rc1.rst
@@ -506,7 +506,7 @@ Fix handling of long oids in ssl. Based on patch by Christian Heimes.
.. section: Library
ftplib.FTP.putline() now throws ValueError on commands that contains CR or
-LF. Patch by Dong-hee Na.
+LF. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.6.6rc1.rst b/Misc/NEWS.d/3.6.6rc1.rst
index 71a5c3ec595ba2..9624195c79043b 100644
--- a/Misc/NEWS.d/3.6.6rc1.rst
+++ b/Misc/NEWS.d/3.6.6rc1.rst
@@ -289,7 +289,7 @@ literals on pydoc. Patch by Andrés Delfino.
.. section: Library
Update error message when constructing invalid inspect.Parameters Patch by
-Dong-hee Na.
+Donghee Na.
..
diff --git a/Misc/NEWS.d/3.7.0a1.rst b/Misc/NEWS.d/3.7.0a1.rst
index 712558bf98d018..bee424241fd712 100644
--- a/Misc/NEWS.d/3.7.0a1.rst
+++ b/Misc/NEWS.d/3.7.0a1.rst
@@ -529,7 +529,7 @@ name are now supported.
.. section: Core and Builtins
sys.getsizeof() on a code object now returns the sizes which includes the
-code struct and sizes of objects which it references. Patch by Dong-hee Na.
+code struct and sizes of objects which it references. Patch by Donghee Na.
..
@@ -2260,7 +2260,7 @@ Update zlib to 1.2.11.
.. section: Library
ftplib.FTP.putline() now throws ValueError on commands that contains CR or
-LF. Patch by Dong-hee Na.
+LF. Patch by Donghee Na.
..
@@ -2329,7 +2329,7 @@ always return bytes.
.. nonce: I2mDTz
.. section: Library
-Functional API of enum allows to create empty enums. Patched by Dong-hee Na
+Functional API of enum allows to create empty enums. Patched by Donghee Na
..
@@ -2612,7 +2612,7 @@ Fix handling escape characters in HZ codec. Based on patch by Ma Lin.
.. section: Library
inspect.signature() now supports callables with variable-argument parameters
-wrapped with partialmethod. Patch by Dong-hee Na.
+wrapped with partialmethod. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.7.0a3.rst b/Misc/NEWS.d/3.7.0a3.rst
index 52df0e7e82b080..a968616f55be68 100644
--- a/Misc/NEWS.d/3.7.0a3.rst
+++ b/Misc/NEWS.d/3.7.0a3.rst
@@ -539,7 +539,7 @@ optional
.. section: Library
Updates 2to3 to convert from operator.isCallable(obj) to callable(obj).
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -549,7 +549,7 @@ Patch by Dong-hee Na.
.. section: Library
inspect.signature should follow :pep:`8`, if the parameter has an annotation
-and a default value. Patch by Dong-hee Na.
+and a default value. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.7.0b5.rst b/Misc/NEWS.d/3.7.0b5.rst
index 20476993b9652a..fb29109869188b 100644
--- a/Misc/NEWS.d/3.7.0b5.rst
+++ b/Misc/NEWS.d/3.7.0b5.rst
@@ -418,7 +418,7 @@ trigger a ``DeprecationWarning`` and have been marked for removal in Python
.. section: Library
Update error message when constructing invalid inspect.Parameters Patch by
-Dong-hee Na.
+Donghee Na.
..
diff --git a/Misc/NEWS.d/3.8.0a1.rst b/Misc/NEWS.d/3.8.0a1.rst
index dbbfb6e8b0d68e..57f72e95b029fc 100644
--- a/Misc/NEWS.d/3.8.0a1.rst
+++ b/Misc/NEWS.d/3.8.0a1.rst
@@ -1965,7 +1965,7 @@ result of an internal future if it's already done.
.. section: Library
Add a deprecated warning for the :meth:`threading.Thread.isAlive` method.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -4974,7 +4974,7 @@ Enum members.
.. section: Library
Update error message when constructing invalid inspect.Parameters Patch by
-Dong-hee Na.
+Donghee Na.
..
@@ -8253,7 +8253,7 @@ Explain how IDLE's Shell displays output.
Improve the doc about IDLE running user code. The section is renamed from
"IDLE -- console differences" is renamed "Running user code". It mostly
-covers the implications of using custom sys.stdxxx objects.
+covers the implications of using custom :samp:sys.std{xxx}` objects.
..
diff --git a/Misc/NEWS.d/3.8.0a4.rst b/Misc/NEWS.d/3.8.0a4.rst
index da03d93eae3965..7e8bfa5c4364a9 100644
--- a/Misc/NEWS.d/3.8.0a4.rst
+++ b/Misc/NEWS.d/3.8.0a4.rst
@@ -1087,7 +1087,7 @@ on the ABI.
Change ``PyAPI_FUNC(type)``, ``PyAPI_DATA(type)`` and ``PyMODINIT_FUNC``
macros of ``pyport.h`` when ``Py_BUILD_CORE_MODULE`` is defined. The
``Py_BUILD_CORE_MODULE`` define must be now be used to build a C extension
-as a dynamic library accessing Python internals: export the PyInit_xxx()
+as a dynamic library accessing Python internals: export the :samp:`PyInit_{xxx}()`
function in DLL exports on Windows.
..
diff --git a/Misc/NEWS.d/3.9.0a1.rst b/Misc/NEWS.d/3.9.0a1.rst
index 5a4431b0fcf1c6..9818c17705074b 100644
--- a/Misc/NEWS.d/3.9.0a1.rst
+++ b/Misc/NEWS.d/3.9.0a1.rst
@@ -33,7 +33,7 @@ Fixes audit event for :func:`os.system` to be named ``os.system``.
.. section: Security
Escape the server title of :class:`xmlrpc.server.DocXMLRPCServer` when
-rendering the document page as HTML. (Contributed by Dong-hee Na in
+rendering the document page as HTML. (Contributed by Donghee Na in
:issue:`38243`.)
..
@@ -203,7 +203,7 @@ arguments in decorators.
.. section: Core and Builtins
Fix a segmentation fault when using reverse iterators of empty ``dict``
-objects. Patch by Dong-hee Na and Inada Naoki.
+objects. Patch by Donghee Na and Inada Naoki.
..
@@ -280,7 +280,7 @@ visited by ``tp_traverse()`` are valid.
.. section: Core and Builtins
Remove unnecessary intersection and update set operation in dictview with
-empty set. (Contributed by Dong-hee Na in :issue:`38210`.)
+empty set. (Contributed by Donghee Na in :issue:`38210`.)
..
@@ -1194,7 +1194,7 @@ Expose the Linux ``pidfd_open`` syscall as :func:`os.pidfd_open`.
.. section: Library
Added constants :const:`~fcntl.F_OFD_GETLK`, :const:`~fcntl.F_OFD_SETLK` and
-:const:`~fcntl.F_OFD_SETLKW` to the :mod:`fcntl` module. Patch by Dong-hee
+:const:`~fcntl.F_OFD_SETLKW` to the :mod:`fcntl` module. Patch by Donghee
Na.
..
@@ -1284,7 +1284,7 @@ Fixed erroneous equality comparison in statistics.NormalDist().
.. section: Library
Added :const:`~os.CLD_KILLED` and :const:`~os.CLD_STOPPED` for
-:attr:`si_code`. Patch by Dong-hee Na.
+:attr:`si_code`. Patch by Donghee Na.
..
@@ -1882,7 +1882,7 @@ avoid dynamic lookup.
.. section: Library
Update :class:`importlib.machinery.BuiltinImporter` to use
-``loader._ORIGIN`` instead of a hardcoded value. Patch by Dong-hee Na.
+``loader._ORIGIN`` instead of a hardcoded value. Patch by Donghee Na.
..
@@ -2080,7 +2080,7 @@ method which emits a deprecation warning and calls corresponding methody
.. section: Library
Update test_statistics.py to verify that the statistics module works well
-for both C and Python implementations. Patch by Dong-hee Na
+for both C and Python implementations. Patch by Donghee Na
..
@@ -2201,7 +2201,7 @@ uses more than ``SIGSTKSZ`` bytes of stack memory on some platforms.
.. nonce: AmXrik
.. section: Library
-Add C fastpath for statistics.NormalDist.inv_cdf() Patch by Dong-hee Na
+Add C fastpath for statistics.NormalDist.inv_cdf() Patch by Donghee Na
..
@@ -2210,7 +2210,7 @@ Add C fastpath for statistics.NormalDist.inv_cdf() Patch by Dong-hee Na
.. nonce: Ene6L-
.. section: Library
-Remove the deprecated method `threading.Thread.isAlive()`. Patch by Dong-hee
+Remove the deprecated method `threading.Thread.isAlive()`. Patch by Donghee
Na.
..
@@ -4089,7 +4089,7 @@ Increase code coverage for multiprocessing.shared_memory.
.. nonce: Kl1sti
.. section: Tests
-Add tests for json.dump(..., skipkeys=True). Patch by Dong-hee Na.
+Add tests for json.dump(..., skipkeys=True). Patch by Donghee Na.
..
@@ -4118,7 +4118,7 @@ Add tests for ROT-13 codec.
.. nonce: Zoe9ek
.. section: Tests
-Added tests for PyDateTime_xxx_GET_xxx() macros of the C API of the
+Added tests for :samp:`PyDateTime_{xxx}_GET_{xxx}()` macros of the C API of the
:mod:`datetime` module. Patch by Joannah Nanjekye.
..
@@ -4576,7 +4576,7 @@ distutils bdist_wininst: bdist_wininst only works on Windows.
.. nonce: j5ebdT
.. section: Build
-Many ``PyRun_XXX()`` functions like :c:func:`PyRun_String` were no longer
+Many :samp:`PyRun_{XXX}()` functions like :c:func:`PyRun_String` were no longer
exported in ``libpython38.dll`` by mistake. Export them again to fix the ABI
compatibility.
diff --git a/Misc/NEWS.d/3.9.0a3.rst b/Misc/NEWS.d/3.9.0a3.rst
index 8b7ff49668e1c0..8a94848427382b 100644
--- a/Misc/NEWS.d/3.9.0a3.rst
+++ b/Misc/NEWS.d/3.9.0a3.rst
@@ -149,7 +149,7 @@ argument - by Anthony Sottile.
.. section: Core and Builtins
Correct the error message when calling the :func:`min` or :func:`max` with
-no arguments. Patch by Dong-hee Na.
+no arguments. Patch by Donghee Na.
..
@@ -392,7 +392,7 @@ Remove ``fractions.gcd()`` function, deprecated since Python 3.5
.. section: Library
:class:`~smtplib.LMTP` constructor now has an optional *timeout* parameter.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -414,7 +414,7 @@ Taskaya.
:class:`~ftplib.FTP_TLS` and :class:`~ftplib.FTP_TLS` now raise a
:class:`ValueError` if the given timeout for their constructor is zero to
-prevent the creation of a non-blocking socket. Patch by Dong-hee Na.
+prevent the creation of a non-blocking socket. Patch by Donghee Na.
..
@@ -425,7 +425,7 @@ prevent the creation of a non-blocking socket. Patch by Dong-hee Na.
:class:`~smtplib.SMTP` and :class:`~smtplib.SMTP_SSL` now raise a
:class:`ValueError` if the given timeout for their constructor is zero to
-prevent the creation of a non-blocking socket. Patch by Dong-hee Na.
+prevent the creation of a non-blocking socket. Patch by Donghee Na.
..
@@ -456,7 +456,7 @@ resilients to inaccessible sys.path entries (importlib_metadata v1.4.0).
:class:`~!nntplib.NNTP` and :class:`~!nntplib.NNTP_SSL` now raise a
:class:`ValueError` if the given timeout for their constructor is zero to
-prevent the creation of a non-blocking socket. Patch by Dong-hee Na.
+prevent the creation of a non-blocking socket. Patch by Donghee Na.
..
@@ -488,7 +488,7 @@ towards *y*.
:class:`~poplib.POP3` and :class:`~poplib.POP3_SSL` now raise a
:class:`ValueError` if the given timeout for their constructor is zero to
-prevent the creation of a non-blocking socket. Patch by Dong-hee Na.
+prevent the creation of a non-blocking socket. Patch by Donghee Na.
..
@@ -571,7 +571,7 @@ new task spawning before exception raising.
.. section: Library
Correctly parenthesize filter-based statements that contain lambda
-expressions in mod:`!lib2to3`. Patch by Dong-hee Na.
+expressions in mod:`!lib2to3`. Patch by Donghee Na.
..
@@ -699,7 +699,7 @@ upon inheritance. Patch by Bar Harel.
:meth:`~imaplib.IMAP4.open` method now has an optional *timeout* parameter
with this change. The overridden methods of :class:`~imaplib.IMAP4_SSL` and
:class:`~imaplib.IMAP4_stream` were applied to this change. Patch by
-Dong-hee Na.
+Donghee Na.
..
diff --git a/Misc/NEWS.d/3.9.0a4.rst b/Misc/NEWS.d/3.9.0a4.rst
index 019b34c4082d10..e59435b5509acf 100644
--- a/Misc/NEWS.d/3.9.0a4.rst
+++ b/Misc/NEWS.d/3.9.0a4.rst
@@ -43,7 +43,7 @@ first item. Patch by Yonatan Goldschmidt.
.. nonce: BIIX2M
.. section: Core and Builtins
-Update clinic tool to use :c:func:`Py_IS_TYPE`. Patch by Dong-hee Na.
+Update clinic tool to use :c:func:`Py_IS_TYPE`. Patch by Donghee Na.
..
@@ -141,7 +141,7 @@ collection of deleted, pickled objects.
.. section: Core and Builtins
Fixed a possible crash in :meth:`list.__contains__` when a list is changed
-during comparing items. Patch by Dong-hee Na.
+during comparing items. Patch by Donghee Na.
..
@@ -152,7 +152,7 @@ during comparing items. Patch by Dong-hee Na.
:term:`floor division` of float operation now has a better performance. Also
the message of :exc:`ZeroDivisionError` for this operation is updated. Patch
-by Dong-hee Na.
+by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.9.0a5.rst b/Misc/NEWS.d/3.9.0a5.rst
index 19ad20ad3db042..6ff05788214723 100644
--- a/Misc/NEWS.d/3.9.0a5.rst
+++ b/Misc/NEWS.d/3.9.0a5.rst
@@ -96,7 +96,7 @@ Port itertools module to multiphase initialization (:pep:`489`).
.. section: Core and Builtins
Speed up calls to ``frozenset()`` by using the :pep:`590` ``vectorcall``
-calling convention. Patch by Dong-hee Na.
+calling convention. Patch by Donghee Na.
..
@@ -117,7 +117,7 @@ own variable.
.. section: Core and Builtins
Speed up calls to ``set()`` by using the :pep:`590` ``vectorcall`` calling
-convention. Patch by Dong-hee Na.
+convention. Patch by Donghee Na.
..
@@ -166,7 +166,7 @@ Allow executing asynchronous comprehensions on the top level when the
.. section: Core and Builtins
Speed up calls to ``tuple()`` by using the :pep:`590` ``vectorcall`` calling
-convention. Patch by Dong-hee Na.
+convention. Patch by Donghee Na.
..
@@ -571,7 +571,7 @@ Fixed :func:`ast.unparse` for extended slices containing a single element
.. nonce: yWq9NJ
.. section: Library
-Fix :mod:`json.tool` to catch :exc:`BrokenPipeError`. Patch by Dong-hee Na.
+Fix :mod:`json.tool` to catch :exc:`BrokenPipeError`. Patch by Donghee Na.
..
@@ -783,7 +783,7 @@ when the optional ``qop`` parameter is not present.
.. section: Library
HTTP status codes ``103 EARLY_HINTS`` and ``425 TOO_EARLY`` are added to
-:class:`http.HTTPStatus`. Patch by Dong-hee Na.
+:class:`http.HTTPStatus`. Patch by Donghee Na.
..
@@ -1133,7 +1133,7 @@ module. Patch by José Roberto Meza Cabrera.
.. section: C API
Add :c:func:`PyModule_AddType` helper function: add a type to a module.
-Patch by Dong-hee Na.
+Patch by Donghee Na.
..
@@ -1163,7 +1163,7 @@ Python thread state.
.. nonce: R3jaTy
.. section: C API
-Add _PyArg_NoKwnames helper function. Patch by Dong-hee Na.
+Add _PyArg_NoKwnames helper function. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/3.9.0b1.rst b/Misc/NEWS.d/3.9.0b1.rst
index 15790bc425c13f..ee87315ad334e9 100644
--- a/Misc/NEWS.d/3.9.0b1.rst
+++ b/Misc/NEWS.d/3.9.0b1.rst
@@ -490,7 +490,7 @@ The first argument of :func:`pickle.loads` is now positional-only.
.. section: Library
Update :mod:`!nntplib` to merge :class:`!nntplib.NNTP` and
-:class:`!nntplib._NNTPBase`. Patch by Dong-hee Na.
+:class:`!nntplib._NNTPBase`. Patch by Donghee Na.
..
@@ -500,7 +500,7 @@ Update :mod:`!nntplib` to merge :class:`!nntplib.NNTP` and
.. section: Library
Update :mod:`dbm.gnu` to use gdbm_count if possible when calling
-:func:`len`. Patch by Dong-hee Na.
+:func:`len`. Patch by Donghee Na.
..
@@ -592,7 +592,7 @@ subdirectories in package data, matching backport in importlib_resources
.. nonce: 5GuK2A
.. section: Library
-:meth:`imaplib.IMAP4.unselect` is added. Patch by Dong-hee Na.
+:meth:`imaplib.IMAP4.unselect` is added. Patch by Donghee Na.
..
diff --git a/Misc/NEWS.d/next/Build/2023-09-01-01-39-26.gh-issue-108740.JHExAQ.rst b/Misc/NEWS.d/next/Build/2023-09-01-01-39-26.gh-issue-108740.JHExAQ.rst
new file mode 100644
index 00000000000000..190d50387f339e
--- /dev/null
+++ b/Misc/NEWS.d/next/Build/2023-09-01-01-39-26.gh-issue-108740.JHExAQ.rst
@@ -0,0 +1,4 @@
+Fix a race condition in ``make regen-all``. The ``deepfreeze.c`` source and
+files generated by Argument Clinic are now generated or updated before
+generating "global objects". Previously, some identifiers may miss depending
+on the order in which these files were generated. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Build/2023-09-02-18-04-15.gh-issue-63760.r8hJ6q.rst b/Misc/NEWS.d/next/Build/2023-09-02-18-04-15.gh-issue-63760.r8hJ6q.rst
new file mode 100644
index 00000000000000..9a7249e923e0c7
--- /dev/null
+++ b/Misc/NEWS.d/next/Build/2023-09-02-18-04-15.gh-issue-63760.r8hJ6q.rst
@@ -0,0 +1,3 @@
+Fix Solaris build: no longer redefine the ``gethostname()`` function. Solaris
+defines the function since 2005. Patch by Victor Stinner, original patch by
+Jakub Kulík.
diff --git a/Misc/NEWS.d/next/Build/2023-09-07-19-58-05.gh-issue-109054.5r3S3l.rst b/Misc/NEWS.d/next/Build/2023-09-07-19-58-05.gh-issue-109054.5r3S3l.rst
new file mode 100644
index 00000000000000..d86a110e0de68c
--- /dev/null
+++ b/Misc/NEWS.d/next/Build/2023-09-07-19-58-05.gh-issue-109054.5r3S3l.rst
@@ -0,0 +1,6 @@
+Fix building the ``_testcapi`` extension on Linux AArch64 which requires
+linking to libatomic when ```` is used: the
+``_Py_atomic_or_uint64()`` function requires libatomic
+``__atomic_fetch_or_8()`` on this platform. The configure script now checks
+if linking to libatomic is needed and generates a new LIBATOMIC variable
+used to build the _testcapi extension. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/C API/2023-08-24-20-08-02.gh-issue-108014.20DOSS.rst b/Misc/NEWS.d/next/C API/2023-08-24-20-08-02.gh-issue-108014.20DOSS.rst
index 5c1b04f3237e78..35cb153ba09076 100644
--- a/Misc/NEWS.d/next/C API/2023-08-24-20-08-02.gh-issue-108014.20DOSS.rst
+++ b/Misc/NEWS.d/next/C API/2023-08-24-20-08-02.gh-issue-108014.20DOSS.rst
@@ -1,4 +1,4 @@
Add :c:func:`PyLong_AsInt` function: similar to :c:func:`PyLong_AsLong`, but
store the result in a C :c:expr:`int` instead of a C :c:expr:`long`.
-Previously, it was known as the the private function :c:func:`!_PyLong_AsInt`
+Previously, it was known as the private function :c:func:`!_PyLong_AsInt`
(with an underscore prefix). Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/C API/2023-09-01-16-28-09.gh-issue-108511.gg-QDG.rst b/Misc/NEWS.d/next/C API/2023-09-01-16-28-09.gh-issue-108511.gg-QDG.rst
new file mode 100644
index 00000000000000..1e5f32905aa24d
--- /dev/null
+++ b/Misc/NEWS.d/next/C API/2023-09-01-16-28-09.gh-issue-108511.gg-QDG.rst
@@ -0,0 +1,4 @@
+Add functions :c:func:`PyObject_HasAttrWithError`,
+:c:func:`PyObject_HasAttrStringWithError`,
+:c:func:`PyMapping_HasKeyWithError` and
+:c:func:`PyMapping_HasKeyStringWithError`.
diff --git a/Misc/NEWS.d/next/C API/2023-09-01-18-42-31.gh-issue-108765.IyYNDu.rst b/Misc/NEWS.d/next/C API/2023-09-01-18-42-31.gh-issue-108765.IyYNDu.rst
new file mode 100644
index 00000000000000..7b33481f225b5a
--- /dev/null
+++ b/Misc/NEWS.d/next/C API/2023-09-01-18-42-31.gh-issue-108765.IyYNDu.rst
@@ -0,0 +1,6 @@
+``Python.h`` no longer includes these standard header files: ````,
+```` and ````. If needed, they should now be included
+explicitly. For example, ```` provides the ``clock()`` and ``gmtime()``
+functions, ```` provides the ``select()`` function, and
+```` provides the ``futimes()``, ``gettimeofday()`` and
+``setitimer()`` functions. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/C API/2023-09-02-22-35-55.gh-issue-108765.4TOdBT.rst b/Misc/NEWS.d/next/C API/2023-09-02-22-35-55.gh-issue-108765.4TOdBT.rst
new file mode 100644
index 00000000000000..c13b6d9db053fc
--- /dev/null
+++ b/Misc/NEWS.d/next/C API/2023-09-02-22-35-55.gh-issue-108765.4TOdBT.rst
@@ -0,0 +1,5 @@
+``Python.h`` no longer includes the ```` standard header file. If
+needed, it should now be included explicitly. For example, it provides
+``isalpha()`` and ``tolower()`` functions which are locale dependent. Python
+provides locale independent functions, like :c:func:`!Py_ISALPHA` and
+:c:func:`!Py_TOLOWER`. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/C API/2023-09-12-13-09-36.gh-issue-108724.-yMsC8.rst b/Misc/NEWS.d/next/C API/2023-09-12-13-09-36.gh-issue-108724.-yMsC8.rst
new file mode 100644
index 00000000000000..5cddf9bc239700
--- /dev/null
+++ b/Misc/NEWS.d/next/C API/2023-09-12-13-09-36.gh-issue-108724.-yMsC8.rst
@@ -0,0 +1 @@
+Add :c:type:`PyMutex` internal-only lightweight locking API.
diff --git a/Misc/NEWS.d/next/C API/2023-09-17-21-47-31.gh-issue-109521.JDF6i9.rst b/Misc/NEWS.d/next/C API/2023-09-17-21-47-31.gh-issue-109521.JDF6i9.rst
new file mode 100644
index 00000000000000..338650c9246686
--- /dev/null
+++ b/Misc/NEWS.d/next/C API/2023-09-17-21-47-31.gh-issue-109521.JDF6i9.rst
@@ -0,0 +1,5 @@
+:c:func:`PyImport_GetImporter` now sets RuntimeError if it fails to get
+:data:`sys.path_hooks` or :data:`sys.path_importer_cache` or they are not
+list and dict correspondingly. Previously it could return NULL without
+setting error in obscure cases, crash or raise SystemError if these
+attributes have wrong type.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-03-26-19-11-10.gh-issue-93627.0UgwBL.rst b/Misc/NEWS.d/next/Core and Builtins/2023-03-26-19-11-10.gh-issue-93627.0UgwBL.rst
new file mode 100644
index 00000000000000..854da44b560b21
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-03-26-19-11-10.gh-issue-93627.0UgwBL.rst
@@ -0,0 +1 @@
+Update the Python pickle module implementation to match the C implementation of the pickle module. For objects setting reduction methods like :meth:`~object.__reduce_ex__` or :meth:`~object.__reduce__` to ``None``, pickling will result in a :exc:`TypeError`.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-06-05-23-38-43.gh-issue-104635.VYZhVh.rst b/Misc/NEWS.d/next/Core and Builtins/2023-06-05-23-38-43.gh-issue-104635.VYZhVh.rst
index f20ddb56d171c3..417e45a6655db6 100644
--- a/Misc/NEWS.d/next/Core and Builtins/2023-06-05-23-38-43.gh-issue-104635.VYZhVh.rst
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-06-05-23-38-43.gh-issue-104635.VYZhVh.rst
@@ -1,2 +1,2 @@
Eliminate redundant :opcode:`STORE_FAST` instructions in the compiler. Patch
-by Dong-hee Na and Carl Meyer.
+by Donghee Na and Carl Meyer.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-06-29-09-42-56.gh-issue-106213.TCUgzM.rst b/Misc/NEWS.d/next/Core and Builtins/2023-06-29-09-42-56.gh-issue-106213.TCUgzM.rst
new file mode 100644
index 00000000000000..431f9cc0e4bb7d
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-06-29-09-42-56.gh-issue-106213.TCUgzM.rst
@@ -0,0 +1,2 @@
+Changed the way that Emscripten call trampolines work for compatibility with
+Wasm/JS Promise integration.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-07-06-00-35-44.gh-issue-96844.kwvoS-.rst b/Misc/NEWS.d/next/Core and Builtins/2023-07-06-00-35-44.gh-issue-96844.kwvoS-.rst
index 55334173bc002d..cc9c6e39a77fd2 100644
--- a/Misc/NEWS.d/next/Core and Builtins/2023-07-06-00-35-44.gh-issue-96844.kwvoS-.rst
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-07-06-00-35-44.gh-issue-96844.kwvoS-.rst
@@ -1 +1 @@
-Improve error message of :meth:`list.remove`. Patch by Dong-hee Na.
+Improve error message of :meth:`list.remove`. Patch by Donghee Na.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-07-23-13-07-34.gh-issue-107122.9HFUyb.rst b/Misc/NEWS.d/next/Core and Builtins/2023-07-23-13-07-34.gh-issue-107122.9HFUyb.rst
index 64ac8ac6df09b8..08decfd89b7cf0 100644
--- a/Misc/NEWS.d/next/Core and Builtins/2023-07-23-13-07-34.gh-issue-107122.9HFUyb.rst
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-07-23-13-07-34.gh-issue-107122.9HFUyb.rst
@@ -1 +1 @@
-Add :meth:`dbm.gnu.gdbm.clear` to :mod:`dbm.gnu`. Patch By Dong-hee Na.
+Add :meth:`dbm.gnu.gdbm.clear` to :mod:`dbm.gnu`. Patch By Donghee Na.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-07-23-21-16-54.gh-issue-107122.VNuNcq.rst b/Misc/NEWS.d/next/Core and Builtins/2023-07-23-21-16-54.gh-issue-107122.VNuNcq.rst
index 5b7cc98ddc6414..f68036cef34365 100644
--- a/Misc/NEWS.d/next/Core and Builtins/2023-07-23-21-16-54.gh-issue-107122.VNuNcq.rst
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-07-23-21-16-54.gh-issue-107122.VNuNcq.rst
@@ -1 +1 @@
-Add :meth:`dbm.ndbm.ndbm.clear` to :mod:`dbm.ndbm`. Patch By Dong-hee Na.
+Add :meth:`dbm.ndbm.ndbm.clear` to :mod:`dbm.ndbm`. Patch By Donghee Na.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-08-13-17-18-22.gh-issue-108390.TkBccC.rst b/Misc/NEWS.d/next/Core and Builtins/2023-08-13-17-18-22.gh-issue-108390.TkBccC.rst
new file mode 100644
index 00000000000000..3ed596007b56f7
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-08-13-17-18-22.gh-issue-108390.TkBccC.rst
@@ -0,0 +1,4 @@
+Raise an exception when setting a non-local event (``RAISE``, ``EXCEPTION_HANDLED``,
+etc.) in ``sys.monitoring.set_local_events``.
+
+Fixes crash when tracing in recursive calls to Python classes.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-08-26-10-36-45.gh-issue-108614.wl5l-W.rst b/Misc/NEWS.d/next/Core and Builtins/2023-08-26-10-36-45.gh-issue-108614.wl5l-W.rst
new file mode 100644
index 00000000000000..ace670c9ba7fdf
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-08-26-10-36-45.gh-issue-108614.wl5l-W.rst
@@ -0,0 +1,2 @@
+Add RESUME_CHECK instruction, to avoid having to handle instrumentation,
+signals, and contexts switches in the tier 2 execution engine.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-08-28-03-38-28.gh-issue-108716.HJBPwt.rst b/Misc/NEWS.d/next/Core and Builtins/2023-08-28-03-38-28.gh-issue-108716.HJBPwt.rst
new file mode 100644
index 00000000000000..f63eb8689d63a3
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-08-28-03-38-28.gh-issue-108716.HJBPwt.rst
@@ -0,0 +1,2 @@
+Turn off deep-freezing of code objects. Modules are still frozen, so that a
+file system search is not needed for common modules.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-05-11-31-27.gh-issue-104584.IRSXA2.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-05-11-31-27.gh-issue-104584.IRSXA2.rst
new file mode 100644
index 00000000000000..7f556bf8c31c11
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-05-11-31-27.gh-issue-104584.IRSXA2.rst
@@ -0,0 +1,2 @@
+Fix a crash when running with :envvar:`PYTHONUOPS` or :option:`-X uops <-X>`
+enabled and an error occurs during optimization.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-05-20-52-17.gh-issue-108959.6z45Sy.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-05-20-52-17.gh-issue-108959.6z45Sy.rst
new file mode 100644
index 00000000000000..792bbc454f2b27
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-05-20-52-17.gh-issue-108959.6z45Sy.rst
@@ -0,0 +1,2 @@
+Fix caret placement for error locations for subscript and binary operations
+that involve non-semantic parentheses and spaces. Patch by Pablo Galindo
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-06-13-28-42.gh-issue-108732.I6DkEQ.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-06-13-28-42.gh-issue-108732.I6DkEQ.rst
new file mode 100644
index 00000000000000..94a143b86b6708
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-06-13-28-42.gh-issue-108732.I6DkEQ.rst
@@ -0,0 +1,2 @@
+Make iteration variables of module- and class-scoped comprehensions visible
+to pdb and other tools that use ``frame.f_locals`` again.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-06-22-50-25.gh-issue-108976.MUKaIJ.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-06-22-50-25.gh-issue-108976.MUKaIJ.rst
new file mode 100644
index 00000000000000..4b89375f0f57ef
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-06-22-50-25.gh-issue-108976.MUKaIJ.rst
@@ -0,0 +1,2 @@
+Fix crash that occurs after de-instrumenting a code object in a monitoring
+callback.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-07-16-05-36.gh-issue-88943.rH_X3W.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-16-05-36.gh-issue-88943.rH_X3W.rst
new file mode 100644
index 00000000000000..a99830fe4227c9
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-16-05-36.gh-issue-88943.rH_X3W.rst
@@ -0,0 +1,3 @@
+Improve syntax error for non-ASCII character that follows a numerical
+literal. It now points on the invalid non-ASCII character, not on the valid
+numerical literal.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-07-18-24-42.gh-issue-109118.yPXRAe.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-18-24-42.gh-issue-109118.yPXRAe.rst
new file mode 100644
index 00000000000000..f14fce4423896f
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-18-24-42.gh-issue-109118.yPXRAe.rst
@@ -0,0 +1,2 @@
+Fix interpreter crash when a NameError is raised inside the type parameters
+of a generic class.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-07-18-49-01.gh-issue-109052.TBU4nC.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-18-49-01.gh-issue-109052.TBU4nC.rst
new file mode 100644
index 00000000000000..175046c771cdf3
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-18-49-01.gh-issue-109052.TBU4nC.rst
@@ -0,0 +1 @@
+Use the base opcode when comparing code objects to avoid interference from instrumentation
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-07-20-52-27.gh-issue-105848.p799D1.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-20-52-27.gh-issue-105848.p799D1.rst
new file mode 100644
index 00000000000000..14661d14e190ce
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-07-20-52-27.gh-issue-105848.p799D1.rst
@@ -0,0 +1,3 @@
+Add a new :opcode:`CALL_KW` opcode, used for calls containing keyword
+arguments. Also, fix a possible crash when jumping over method calls in a
+debugger.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-08-01-50-41.gh-issue-109114.adqgtb.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-08-01-50-41.gh-issue-109114.adqgtb.rst
new file mode 100644
index 00000000000000..3d95dd5d29450c
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-08-01-50-41.gh-issue-109114.adqgtb.rst
@@ -0,0 +1,3 @@
+Relax the detection of the error message for invalid lambdas inside
+f-strings to not search for arbitrary replacement fields to avoid false
+positives. Patch by Pablo Galindo
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-08-18-31-04.gh-issue-109156.KK1EXI.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-08-18-31-04.gh-issue-109156.KK1EXI.rst
new file mode 100644
index 00000000000000..e681482c3a879e
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-08-18-31-04.gh-issue-109156.KK1EXI.rst
@@ -0,0 +1 @@
+Add tests for de-instrumenting instructions while keeping the instrumentation for lines
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-09-12-49-46.gh-issue-109118.gx0X4h.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-09-12-49-46.gh-issue-109118.gx0X4h.rst
new file mode 100644
index 00000000000000..87069c85870410
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-09-12-49-46.gh-issue-109118.gx0X4h.rst
@@ -0,0 +1,2 @@
+Disallow nested scopes (lambdas, generator expressions, and comprehensions)
+within PEP 695 annotation scopes that are nested within classes.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-09-21-17-18.gh-issue-109179.ZR8qs2.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-09-21-17-18.gh-issue-109179.ZR8qs2.rst
new file mode 100644
index 00000000000000..dd95a8ec7920aa
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-09-21-17-18.gh-issue-109179.ZR8qs2.rst
@@ -0,0 +1 @@
+Fix bug where the C traceback display drops notes from :exc:`SyntaxError`.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-10-18-53-55.gh-issue-109207.Fei8bY.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-10-18-53-55.gh-issue-109207.Fei8bY.rst
new file mode 100644
index 00000000000000..f9da3ac4d1abbd
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-10-18-53-55.gh-issue-109207.Fei8bY.rst
@@ -0,0 +1 @@
+Fix a SystemError in ``__repr__`` of symtable entry object.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-11-12-41-42.gh-issue-109216.60QOSb.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-11-12-41-42.gh-issue-109216.60QOSb.rst
new file mode 100644
index 00000000000000..aa8b2832af23a5
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-11-12-41-42.gh-issue-109216.60QOSb.rst
@@ -0,0 +1 @@
+Fix possible memory leak in :opcode:`BUILD_MAP`.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-11-15-11-03.gh-issue-109256.6mfhvF.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-11-15-11-03.gh-issue-109256.6mfhvF.rst
new file mode 100644
index 00000000000000..6c33faea0ae6c4
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-11-15-11-03.gh-issue-109256.6mfhvF.rst
@@ -0,0 +1,2 @@
+Opcode IDs for specialized opcodes are allocated in their own range to
+improve stability of the IDs for the 'real' opcodes.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-11-15-51-55.gh-issue-109195.iwxmuo.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-11-15-51-55.gh-issue-109195.iwxmuo.rst
new file mode 100644
index 00000000000000..5427232c2df9a0
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-11-15-51-55.gh-issue-109195.iwxmuo.rst
@@ -0,0 +1,4 @@
+Fix source location for the ``LOAD_*`` instruction preceding a
+``LOAD_SUPER_ATTR`` to load the ``super`` global (or shadowing variable) so
+that it encompasses only the name ``super`` and not the following
+parentheses.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-12-15-45-49.gh-issue-109341.4V5bkm.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-12-15-45-49.gh-issue-109341.4V5bkm.rst
new file mode 100644
index 00000000000000..9e99ef7eb73273
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-12-15-45-49.gh-issue-109341.4V5bkm.rst
@@ -0,0 +1 @@
+Fix crash when compiling an invalid AST involving a :class:`ast.TypeAlias`.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-12-16-00-42.gh-issue-109351.kznGeR.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-12-16-00-42.gh-issue-109351.kznGeR.rst
new file mode 100644
index 00000000000000..23b81c1c0a3baa
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-12-16-00-42.gh-issue-109351.kznGeR.rst
@@ -0,0 +1,2 @@
+Fix crash when compiling an invalid AST involving a named (walrus)
+expression.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-13-08-42-45.gh-issue-109219.UiN8sc.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-13-08-42-45.gh-issue-109219.UiN8sc.rst
new file mode 100644
index 00000000000000..2c141f09d7e754
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-13-08-42-45.gh-issue-109219.UiN8sc.rst
@@ -0,0 +1,2 @@
+Fix compiling type param scopes that use a name which is also free in an
+inner scope.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-13-19-16-51.gh-issue-105658.z2nR2u.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-13-19-16-51.gh-issue-105658.z2nR2u.rst
new file mode 100644
index 00000000000000..e95f5b84e8e187
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-13-19-16-51.gh-issue-105658.z2nR2u.rst
@@ -0,0 +1,2 @@
+Fix bug where the line trace of an except block ending with a conditional
+includes an excess event with the line of the conditional expression.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-13-21-04-04.gh-issue-109371.HPEJr8.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-13-21-04-04.gh-issue-109371.HPEJr8.rst
new file mode 100644
index 00000000000000..2fb18d5ae88347
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-13-21-04-04.gh-issue-109371.HPEJr8.rst
@@ -0,0 +1 @@
+Deopted instructions correctly for tool initialization and modified the incorrect assertion in instrumentation, when a previous tool already sets INSTRUCTION events
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-14-20-15-57.gh-issue-107265.qHZL_6.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-14-20-15-57.gh-issue-107265.qHZL_6.rst
new file mode 100644
index 00000000000000..c30c21f034a1bc
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-14-20-15-57.gh-issue-107265.qHZL_6.rst
@@ -0,0 +1 @@
+Deopt opcodes hidden by the executor when base opcode is needed
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-18-15-35-08.gh-issue-109496.Kleoz3.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-18-15-35-08.gh-issue-109496.Kleoz3.rst
new file mode 100644
index 00000000000000..51b2144fed7841
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-18-15-35-08.gh-issue-109496.Kleoz3.rst
@@ -0,0 +1,5 @@
+On a Python built in debug mode, :c:func:`Py_DECREF()` now calls
+``_Py_NegativeRefcount()`` if the object is a dangling pointer to
+deallocated memory: memory filled with ``0xDD`` "dead byte" by the debug
+hook on memory allocators. The fix is to check the reference count *before*
+checking for ``_Py_IsImmortal()``. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-20-13-18-08.gh-issue-109596.RG0K2G.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-20-13-18-08.gh-issue-109596.RG0K2G.rst
new file mode 100644
index 00000000000000..23ef73d578651d
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-20-13-18-08.gh-issue-109596.RG0K2G.rst
@@ -0,0 +1,3 @@
+Fix some tokens in the grammar that were incorrectly marked as soft
+keywords. Also fix some repeated rule names and ensure that repeated rules
+are not allowed. Patch by Pablo Galindo
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-20-23-04-15.gh-issue-109627.xxe7De.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-20-23-04-15.gh-issue-109627.xxe7De.rst
new file mode 100644
index 00000000000000..397d76e291419f
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-20-23-04-15.gh-issue-109627.xxe7De.rst
@@ -0,0 +1,2 @@
+Fix bug where the compiler does not assign a new jump target label to a
+duplicated small exit block.
diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-09-22-13-38-17.gh-issue-109719.fx5OTz.rst b/Misc/NEWS.d/next/Core and Builtins/2023-09-22-13-38-17.gh-issue-109719.fx5OTz.rst
new file mode 100644
index 00000000000000..83be54c9ca793e
--- /dev/null
+++ b/Misc/NEWS.d/next/Core and Builtins/2023-09-22-13-38-17.gh-issue-109719.fx5OTz.rst
@@ -0,0 +1 @@
+Fix missing jump target labels when compiler reorders cold/warm blocks.
diff --git a/Misc/NEWS.d/next/Documentation/2023-03-16-15-39-26.gh-issue-102759.ehpHw6.rst b/Misc/NEWS.d/next/Documentation/2023-03-16-15-39-26.gh-issue-102759.ehpHw6.rst
new file mode 100644
index 00000000000000..d3df6c8997aa35
--- /dev/null
+++ b/Misc/NEWS.d/next/Documentation/2023-03-16-15-39-26.gh-issue-102759.ehpHw6.rst
@@ -0,0 +1,2 @@
+Align function signature for ``functools.reduce`` in documentation and docstring
+with the C implementation.
diff --git a/Misc/NEWS.d/next/Documentation/2023-03-19-09-39-31.gh-issue-102823.OzsOz0.rst b/Misc/NEWS.d/next/Documentation/2023-03-19-09-39-31.gh-issue-102823.OzsOz0.rst
new file mode 100644
index 00000000000000..1e32f3c89231c8
--- /dev/null
+++ b/Misc/NEWS.d/next/Documentation/2023-03-19-09-39-31.gh-issue-102823.OzsOz0.rst
@@ -0,0 +1,2 @@
+Document the return type of ``x // y`` when ``x`` and ``y`` have type
+:class:`float`.
diff --git a/Misc/NEWS.d/next/Documentation/2023-09-10-02-39-06.gh-issue-109209.0LBewo.rst b/Misc/NEWS.d/next/Documentation/2023-09-10-02-39-06.gh-issue-109209.0LBewo.rst
new file mode 100644
index 00000000000000..79cc0b72ec742f
--- /dev/null
+++ b/Misc/NEWS.d/next/Documentation/2023-09-10-02-39-06.gh-issue-109209.0LBewo.rst
@@ -0,0 +1 @@
+The minimum Sphinx version required for the documentation is now 4.2.
diff --git a/Misc/NEWS.d/next/Library/2022-12-24-12-50-54.gh-issue-84867.OhaLbU.rst b/Misc/NEWS.d/next/Library/2022-12-24-12-50-54.gh-issue-84867.OhaLbU.rst
new file mode 100644
index 00000000000000..8b45dcee481916
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2022-12-24-12-50-54.gh-issue-84867.OhaLbU.rst
@@ -0,0 +1,2 @@
+:class:`unittest.TestLoader` no longer loads test cases from exact
+:class:`unittest.TestCase` and :class:`unittest.FunctionTestCase` classes.
diff --git a/Misc/NEWS.d/next/Library/2023-07-11-08-56-40.gh-issue-106584.g-SBtC.rst b/Misc/NEWS.d/next/Library/2023-07-11-08-56-40.gh-issue-106584.g-SBtC.rst
new file mode 100644
index 00000000000000..a13b61bf1c121b
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-07-11-08-56-40.gh-issue-106584.g-SBtC.rst
@@ -0,0 +1,2 @@
+Fix exit code for ``unittest`` if all tests are skipped.
+Patch by Egor Eliseev.
diff --git a/Misc/NEWS.d/next/Library/2023-07-14-01-47-39.gh-issue-106734.eMYSoz.rst b/Misc/NEWS.d/next/Library/2023-07-14-01-47-39.gh-issue-106734.eMYSoz.rst
new file mode 100644
index 00000000000000..37d2ab19ed1017
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-07-14-01-47-39.gh-issue-106734.eMYSoz.rst
@@ -0,0 +1 @@
+Disable tab completion in multiline mode of :mod:`pdb`
diff --git a/Misc/NEWS.d/next/Library/2023-07-18-23-05-12.gh-issue-106751.tVvzN_.rst b/Misc/NEWS.d/next/Library/2023-07-18-23-05-12.gh-issue-106751.tVvzN_.rst
index 1cb8424b6221ee..d26ac90d3978d4 100644
--- a/Misc/NEWS.d/next/Library/2023-07-18-23-05-12.gh-issue-106751.tVvzN_.rst
+++ b/Misc/NEWS.d/next/Library/2023-07-18-23-05-12.gh-issue-106751.tVvzN_.rst
@@ -1,2 +1,2 @@
Optimize :meth:`KqueueSelector.select` for many iteration case. Patch By
-Dong-hee Na.
+Donghee Na.
diff --git a/Misc/NEWS.d/next/Library/2023-07-19-10-45-24.gh-issue-106751.3HJ1of.rst b/Misc/NEWS.d/next/Library/2023-07-19-10-45-24.gh-issue-106751.3HJ1of.rst
index 2696b560371d13..1b3ffdc95120a2 100644
--- a/Misc/NEWS.d/next/Library/2023-07-19-10-45-24.gh-issue-106751.3HJ1of.rst
+++ b/Misc/NEWS.d/next/Library/2023-07-19-10-45-24.gh-issue-106751.3HJ1of.rst
@@ -1,2 +1,2 @@
Optimize :meth:`SelectSelector.select` for many iteration case. Patch By
-Dong-hee Na.
+Donghee Na.
diff --git a/Misc/NEWS.d/next/Library/2023-07-24-01-21-16.gh-issue-46376.w-xuDL.rst b/Misc/NEWS.d/next/Library/2023-07-24-01-21-16.gh-issue-46376.w-xuDL.rst
deleted file mode 100644
index 8e8f0245b4539b..00000000000000
--- a/Misc/NEWS.d/next/Library/2023-07-24-01-21-16.gh-issue-46376.w-xuDL.rst
+++ /dev/null
@@ -1 +0,0 @@
-Prevent memory leak and use-after-free when using pointers to pointers with ctypes
diff --git a/Misc/NEWS.d/next/Library/2023-08-15-18-20-00.gh-issue-107963.20g5BG.rst b/Misc/NEWS.d/next/Library/2023-08-15-18-20-00.gh-issue-107963.20g5BG.rst
index 3a73b2da0c4334..ea968367d0bdee 100644
--- a/Misc/NEWS.d/next/Library/2023-08-15-18-20-00.gh-issue-107963.20g5BG.rst
+++ b/Misc/NEWS.d/next/Library/2023-08-15-18-20-00.gh-issue-107963.20g5BG.rst
@@ -1,2 +1,2 @@
Fix :func:`multiprocessing.set_forkserver_preload` to check the given list
-of modules names. Patch by Dong-hee Na.
+of modules names. Patch by Donghee Na.
diff --git a/Misc/NEWS.d/next/Library/2023-08-18-22-58-07.gh-issue-83417.61J4yM.rst b/Misc/NEWS.d/next/Library/2023-08-18-22-58-07.gh-issue-83417.61J4yM.rst
new file mode 100644
index 00000000000000..fbb8bdb2073efa
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-08-18-22-58-07.gh-issue-83417.61J4yM.rst
@@ -0,0 +1,3 @@
+Add the ability for venv to create a ``.gitignore`` file which causes the
+created environment to be ignored by Git. It is on by default when venv is
+called via its CLI.
diff --git a/Misc/NEWS.d/next/Library/2023-08-25-00-14-34.gh-issue-108463.mQApp_.rst b/Misc/NEWS.d/next/Library/2023-08-25-00-14-34.gh-issue-108463.mQApp_.rst
new file mode 100644
index 00000000000000..a5ab8e2f9d4b59
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-08-25-00-14-34.gh-issue-108463.mQApp_.rst
@@ -0,0 +1 @@
+Make expressions/statements work as expected in pdb
diff --git a/Misc/NEWS.d/next/Library/2023-08-26-12-35-39.gh-issue-105829.kyYhWI.rst b/Misc/NEWS.d/next/Library/2023-08-26-12-35-39.gh-issue-105829.kyYhWI.rst
new file mode 100644
index 00000000000000..eaa2a5a4330e28
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-08-26-12-35-39.gh-issue-105829.kyYhWI.rst
@@ -0,0 +1 @@
+Fix concurrent.futures.ProcessPoolExecutor deadlock
diff --git a/Misc/NEWS.d/next/Library/2023-09-01-13-14-08.gh-issue-108751.2itqwe.rst b/Misc/NEWS.d/next/Library/2023-09-01-13-14-08.gh-issue-108751.2itqwe.rst
new file mode 100644
index 00000000000000..7bc21fe6c81760
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-01-13-14-08.gh-issue-108751.2itqwe.rst
@@ -0,0 +1,2 @@
+Add :func:`copy.replace` function which allows to create a modified copy of
+an object. It supports named tuples, dataclasses, and many other objects.
diff --git a/Misc/NEWS.d/next/Library/2023-09-03-04-37-52.gh-issue-108469.kusj40.rst b/Misc/NEWS.d/next/Library/2023-09-03-04-37-52.gh-issue-108469.kusj40.rst
new file mode 100644
index 00000000000000..ac0f682963daec
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-03-04-37-52.gh-issue-108469.kusj40.rst
@@ -0,0 +1,3 @@
+:func:`ast.unparse` now supports new :term:`f-string` syntax introduced in
+Python 3.12. Note that the :term:`f-string` quotes are reselected for simplicity
+under the new syntax. (Patch by Steven Sun)
diff --git a/Misc/NEWS.d/next/Library/2023-09-06-06-17-23.gh-issue-108843.WJMhsS.rst b/Misc/NEWS.d/next/Library/2023-09-06-06-17-23.gh-issue-108843.WJMhsS.rst
new file mode 100644
index 00000000000000..0f15761c14bb7d
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-06-06-17-23.gh-issue-108843.WJMhsS.rst
@@ -0,0 +1 @@
+Fix an issue in :func:`ast.unparse` when unparsing f-strings containing many quote types.
diff --git a/Misc/NEWS.d/next/Library/2023-09-06-14-47-28.gh-issue-109033.piUzDx.rst b/Misc/NEWS.d/next/Library/2023-09-06-14-47-28.gh-issue-109033.piUzDx.rst
new file mode 100644
index 00000000000000..15ec0b437d4339
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-06-14-47-28.gh-issue-109033.piUzDx.rst
@@ -0,0 +1,2 @@
+Exceptions raised by os.utime builtin function now include the related
+filename
diff --git a/Misc/NEWS.d/next/Library/2023-09-06-19-33-41.gh-issue-108682.35Xnc5.rst b/Misc/NEWS.d/next/Library/2023-09-06-19-33-41.gh-issue-108682.35Xnc5.rst
new file mode 100644
index 00000000000000..8c13d43ee9744b
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-06-19-33-41.gh-issue-108682.35Xnc5.rst
@@ -0,0 +1,2 @@
+Enum: require ``names=()`` or ``type=...`` to create an empty enum using
+the functional syntax.
diff --git a/Misc/NEWS.d/next/Library/2023-09-08-12-09-55.gh-issue-108987.x5AIG8.rst b/Misc/NEWS.d/next/Library/2023-09-08-12-09-55.gh-issue-108987.x5AIG8.rst
new file mode 100644
index 00000000000000..16526ee748d869
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-08-12-09-55.gh-issue-108987.x5AIG8.rst
@@ -0,0 +1,4 @@
+Fix :func:`_thread.start_new_thread` race condition. If a thread is created
+during Python finalization, the newly spawned thread now exits immediately
+instead of trying to access freed memory and lead to a crash. Patch by
+Victor Stinner.
diff --git a/Misc/NEWS.d/next/Library/2023-09-08-22-26-26.gh-issue-109164.-9BFWR.rst b/Misc/NEWS.d/next/Library/2023-09-08-22-26-26.gh-issue-109164.-9BFWR.rst
new file mode 100644
index 00000000000000..b439c14ff535ff
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-08-22-26-26.gh-issue-109164.-9BFWR.rst
@@ -0,0 +1 @@
+:mod:`pdb`: Replace :mod:`getopt` with :mod:`argparse` for parsing command line arguments.
diff --git a/Misc/NEWS.d/next/Library/2023-09-09-09-05-41.gh-issue-109174.OJea5s.rst b/Misc/NEWS.d/next/Library/2023-09-09-09-05-41.gh-issue-109174.OJea5s.rst
new file mode 100644
index 00000000000000..63461fac3b96f7
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-09-09-05-41.gh-issue-109174.OJea5s.rst
@@ -0,0 +1 @@
+Add support of :class:`types.SimpleNamespace` in :func:`copy.replace`.
diff --git a/Misc/NEWS.d/next/Library/2023-09-09-15-08-37.gh-issue-50644.JUAZOh.rst b/Misc/NEWS.d/next/Library/2023-09-09-15-08-37.gh-issue-50644.JUAZOh.rst
new file mode 100644
index 00000000000000..a7a442e35289d3
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-09-15-08-37.gh-issue-50644.JUAZOh.rst
@@ -0,0 +1,4 @@
+Attempts to pickle or create a shallow or deep copy of :mod:`codecs` streams
+now raise a TypeError. Previously, copying failed with a RecursionError,
+while pickling produced wrong results that eventually caused unpickling
+to fail with a RecursionError.
diff --git a/Misc/NEWS.d/next/Library/2023-09-11-00-32-18.gh-issue-107219.3zqyFT.rst b/Misc/NEWS.d/next/Library/2023-09-11-00-32-18.gh-issue-107219.3zqyFT.rst
new file mode 100644
index 00000000000000..10afbcf823386a
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-11-00-32-18.gh-issue-107219.3zqyFT.rst
@@ -0,0 +1,5 @@
+Fix a race condition in ``concurrent.futures``. When a process in the
+process pool was terminated abruptly (while the future was running or
+pending), close the connection write end. If the call queue is blocked on
+sending bytes to a worker process, closing the connection write end interrupts
+the send, so the queue can be closed. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Library/2023-09-12-13-01-55.gh-issue-109319.YaCMtW.rst b/Misc/NEWS.d/next/Library/2023-09-12-13-01-55.gh-issue-109319.YaCMtW.rst
new file mode 100644
index 00000000000000..d3cd86b040821a
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-12-13-01-55.gh-issue-109319.YaCMtW.rst
@@ -0,0 +1 @@
+Deprecate the ``dis.HAVE_ARGUMENT`` field in favour of ``dis.hasarg``.
diff --git a/Misc/NEWS.d/next/Library/2023-09-13-17-22-44.gh-issue-109375.ijJHZ9.rst b/Misc/NEWS.d/next/Library/2023-09-13-17-22-44.gh-issue-109375.ijJHZ9.rst
new file mode 100644
index 00000000000000..9b7a85d05f66ca
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-13-17-22-44.gh-issue-109375.ijJHZ9.rst
@@ -0,0 +1 @@
+The :mod:`pdb` ``alias`` command now prevents registering aliases without arguments.
diff --git a/Misc/NEWS.d/next/Library/2023-09-15-12-20-23.gh-issue-109096.VksX1D.rst b/Misc/NEWS.d/next/Library/2023-09-15-12-20-23.gh-issue-109096.VksX1D.rst
new file mode 100644
index 00000000000000..bf1308498a8eb0
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-15-12-20-23.gh-issue-109096.VksX1D.rst
@@ -0,0 +1,3 @@
+:class:`http.server.CGIHTTPRequestHandler` has been deprecated for removal
+in 3.15. Its design is old and the web world has long since moved beyond
+CGI.
diff --git a/Misc/NEWS.d/next/Library/2023-09-18-07-43-22.gh-issue-109543.1tOGoV.rst b/Misc/NEWS.d/next/Library/2023-09-18-07-43-22.gh-issue-109543.1tOGoV.rst
new file mode 100644
index 00000000000000..e790f7750c332a
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-18-07-43-22.gh-issue-109543.1tOGoV.rst
@@ -0,0 +1,2 @@
+Remove unnecessary :func:`hasattr` check during :data:`typing.TypedDict`
+creation.
diff --git a/Misc/NEWS.d/next/Library/2023-09-19-01-22-43.gh-issue-109559.ijaycU.rst b/Misc/NEWS.d/next/Library/2023-09-19-01-22-43.gh-issue-109559.ijaycU.rst
new file mode 100644
index 00000000000000..2c25a7b302dd02
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-19-01-22-43.gh-issue-109559.ijaycU.rst
@@ -0,0 +1 @@
+Update :mod:`unicodedata` database to Unicode 15.1.0.
diff --git a/Misc/NEWS.d/next/Library/2023-09-19-17-56-24.gh-issue-109109.WJvvX2.rst b/Misc/NEWS.d/next/Library/2023-09-19-17-56-24.gh-issue-109109.WJvvX2.rst
new file mode 100644
index 00000000000000..e741e60ff41a9b
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-19-17-56-24.gh-issue-109109.WJvvX2.rst
@@ -0,0 +1,5 @@
+You can now get the raw TLS certificate chains from TLS connections via
+:meth:`ssl.SSLSocket.get_verified_chain` and
+:meth:`ssl.SSLSocket.get_unverified_chain` methods.
+
+Contributed by Mateusz Nowak.
diff --git a/Misc/NEWS.d/next/Library/2023-09-20-17-45-46.gh-issue-109613.P13ogN.rst b/Misc/NEWS.d/next/Library/2023-09-20-17-45-46.gh-issue-109613.P13ogN.rst
new file mode 100644
index 00000000000000..e21a758fc2eb05
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-20-17-45-46.gh-issue-109613.P13ogN.rst
@@ -0,0 +1,4 @@
+Fix :func:`os.stat` and :meth:`os.DirEntry.stat`: check for exceptions.
+Previously, on Python built in debug mode, these functions could trigger a
+fatal Python error (and abort the process) when a function succeeded with an
+exception set. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Library/2023-09-21-14-26-44.gh-issue-74481.KAUDcD.rst b/Misc/NEWS.d/next/Library/2023-09-21-14-26-44.gh-issue-74481.KAUDcD.rst
new file mode 100644
index 00000000000000..c2aca4eae64eda
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-21-14-26-44.gh-issue-74481.KAUDcD.rst
@@ -0,0 +1 @@
+Add ``set_error_mode`` related constants in ``msvcrt`` module in Python debug build.
diff --git a/Misc/NEWS.d/next/Library/2023-09-21-19-42-22.gh-issue-109653.bL3iLH.rst b/Misc/NEWS.d/next/Library/2023-09-21-19-42-22.gh-issue-109653.bL3iLH.rst
new file mode 100644
index 00000000000000..9f794bb58ba63b
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-21-19-42-22.gh-issue-109653.bL3iLH.rst
@@ -0,0 +1,2 @@
+Reduce the import time of :mod:`typing` by around a third.
+Patch by Alex Waygood.
diff --git a/Misc/NEWS.d/next/Library/2023-09-23-12-47-45.gh-issue-109653.9wZBfs.rst b/Misc/NEWS.d/next/Library/2023-09-23-12-47-45.gh-issue-109653.9wZBfs.rst
new file mode 100644
index 00000000000000..1d0f0e4f83b5e1
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-23-12-47-45.gh-issue-109653.9wZBfs.rst
@@ -0,0 +1 @@
+Reduce the import time of :mod:`enum` by over 50%. Patch by Alex Waygood.
diff --git a/Misc/NEWS.d/next/Library/2023-09-24-13-28-35.gh-issue-109653.9IFU0B.rst b/Misc/NEWS.d/next/Library/2023-09-24-13-28-35.gh-issue-109653.9IFU0B.rst
new file mode 100644
index 00000000000000..c4f5a62433a2c1
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-09-24-13-28-35.gh-issue-109653.9IFU0B.rst
@@ -0,0 +1,2 @@
+Improve import time of :mod:`functools` by around 13%. Patch by Alex
+Waygood.
diff --git a/Misc/NEWS.d/next/Security/2023-06-01-03-24-58.gh-issue-103142.GLWDMX.rst b/Misc/NEWS.d/next/Security/2023-06-01-03-24-58.gh-issue-103142.GLWDMX.rst
deleted file mode 100644
index 7e0836879e4f81..00000000000000
--- a/Misc/NEWS.d/next/Security/2023-06-01-03-24-58.gh-issue-103142.GLWDMX.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-The version of OpenSSL used in our binary builds has been upgraded to 1.1.1u
-to address several CVEs.
diff --git a/Misc/NEWS.d/next/Tests/2023-04-05-06-45-20.gh-issue-103186.640Eg-.rst b/Misc/NEWS.d/next/Tests/2023-04-05-06-45-20.gh-issue-103186.640Eg-.rst
new file mode 100644
index 00000000000000..2f596aa5f47bda
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-04-05-06-45-20.gh-issue-103186.640Eg-.rst
@@ -0,0 +1 @@
+Suppress and assert expected RuntimeWarnings in test_sys_settrace.py
diff --git a/Misc/NEWS.d/next/Tests/2023-09-02-19-06-52.gh-issue-108822.arTbBI.rst b/Misc/NEWS.d/next/Tests/2023-09-02-19-06-52.gh-issue-108822.arTbBI.rst
new file mode 100644
index 00000000000000..e1c6df2adcb0ae
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-02-19-06-52.gh-issue-108822.arTbBI.rst
@@ -0,0 +1,4 @@
+``regrtest`` now computes statistics on all tests: successes, failures and
+skipped. ``test_netrc``, ``test_pep646_syntax`` and ``test_xml_etree`` now
+return results in their ``test_main()`` function. Patch by Victor Stinner
+and Alex Waygood.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-03-02-01-55.gh-issue-108834.iAwXzj.rst b/Misc/NEWS.d/next/Tests/2023-09-03-02-01-55.gh-issue-108834.iAwXzj.rst
new file mode 100644
index 00000000000000..43b9948db0075c
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-03-02-01-55.gh-issue-108834.iAwXzj.rst
@@ -0,0 +1,6 @@
+When regrtest reruns failed tests in verbose mode (``./python -m test
+--rerun``), tests are now rerun in fresh worker processes rather than being
+executed in the main process. If a test does crash or is killed by a timeout,
+the main process can detect and handle the killed worker process. Tests are
+rerun in parallel if the ``-jN`` option is used to run tests in parallel.
+Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-03-06-17-12.gh-issue-108834.fjV-CJ.rst b/Misc/NEWS.d/next/Tests/2023-09-03-06-17-12.gh-issue-108834.fjV-CJ.rst
new file mode 100644
index 00000000000000..734cc66aebee15
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-03-06-17-12.gh-issue-108834.fjV-CJ.rst
@@ -0,0 +1,2 @@
+Rename regrtest ``--verbose2`` option (``-w``) to ``--rerun``. Keep
+``--verbose2`` as a deprecated alias. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-03-20-15-49.gh-issue-108834.Osvmhf.rst b/Misc/NEWS.d/next/Tests/2023-09-03-20-15-49.gh-issue-108834.Osvmhf.rst
new file mode 100644
index 00000000000000..098861ffa30374
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-03-20-15-49.gh-issue-108834.Osvmhf.rst
@@ -0,0 +1,3 @@
+Add ``--fail-rerun option`` option to regrtest: if a test failed when then
+passed when rerun in verbose mode, exit the process with exit code 2
+(error), instead of exit code 0 (success). Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-03-21-18-35.gh-issue-108851.CCuHyI.rst b/Misc/NEWS.d/next/Tests/2023-09-03-21-18-35.gh-issue-108851.CCuHyI.rst
new file mode 100644
index 00000000000000..7a5b3052af22f2
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-03-21-18-35.gh-issue-108851.CCuHyI.rst
@@ -0,0 +1,2 @@
+Add ``get_recursion_available()`` and ``get_recursion_depth()`` functions to
+the :mod:`test.support` module. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-03-21-41-10.gh-issue-108851.xFTYOE.rst b/Misc/NEWS.d/next/Tests/2023-09-03-21-41-10.gh-issue-108851.xFTYOE.rst
new file mode 100644
index 00000000000000..b35aaebb410afb
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-03-21-41-10.gh-issue-108851.xFTYOE.rst
@@ -0,0 +1,3 @@
+Fix ``test_tomllib`` recursion tests for WASI buildbots: reduce the recursion
+limit and compute the maximum nested array/dict depending on the current
+available recursion limit. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-04-15-18-14.gh-issue-89392.8A4T5p.rst b/Misc/NEWS.d/next/Tests/2023-09-04-15-18-14.gh-issue-89392.8A4T5p.rst
new file mode 100644
index 00000000000000..e1dea8e78cdd4e
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-04-15-18-14.gh-issue-89392.8A4T5p.rst
@@ -0,0 +1,2 @@
+Removed support of ``test_main()`` function in tests. They now always use
+normal unittest test runner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-05-21-42-54.gh-issue-91960.abClTs.rst b/Misc/NEWS.d/next/Tests/2023-09-05-21-42-54.gh-issue-91960.abClTs.rst
new file mode 100644
index 00000000000000..f63e0874499193
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-05-21-42-54.gh-issue-91960.abClTs.rst
@@ -0,0 +1 @@
+FreeBSD 13.2 CI coverage for pull requests is now provided by Cirrus-CI (a hosted CI service that supports Linux, macOS, Windows, and FreeBSD).
diff --git a/Misc/NEWS.d/next/Tests/2023-09-05-23-00-09.gh-issue-108962.R4NwuU.rst b/Misc/NEWS.d/next/Tests/2023-09-05-23-00-09.gh-issue-108962.R4NwuU.rst
new file mode 100644
index 00000000000000..380fb20b8881b2
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-05-23-00-09.gh-issue-108962.R4NwuU.rst
@@ -0,0 +1,3 @@
+Skip ``test_tempfile.test_flags()`` if ``chflags()`` fails with "OSError:
+[Errno 45] Operation not supported" (ex: on FreeBSD 13). Patch by Victor
+Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-06-15-36-51.gh-issue-91960.P3nD5v.rst b/Misc/NEWS.d/next/Tests/2023-09-06-15-36-51.gh-issue-91960.P3nD5v.rst
new file mode 100644
index 00000000000000..46472abf9802bc
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-06-15-36-51.gh-issue-91960.P3nD5v.rst
@@ -0,0 +1,7 @@
+Skip ``test_gdb`` if gdb is unable to retrieve Python frame objects: if a
+frame is ````. When Python is built with "clang -Og", gdb can
+fail to retrive the *frame* parameter of ``_PyEval_EvalFrameDefault()``. In
+this case, tests like ``py_bt()`` are likely to fail. Without getting access
+to Python frames, ``python-gdb.py`` is mostly clueless on retrieving the
+Python traceback. Moreover, ``test_gdb`` is no longer skipped on macOS if
+Python is built with Clang. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-06-18-27-53.gh-issue-109015.1dS1AQ.rst b/Misc/NEWS.d/next/Tests/2023-09-06-18-27-53.gh-issue-109015.1dS1AQ.rst
new file mode 100644
index 00000000000000..cb641be9312e1a
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-06-18-27-53.gh-issue-109015.1dS1AQ.rst
@@ -0,0 +1,6 @@
+Fix test_asyncio, test_imaplib and test_socket tests on FreeBSD if the TCP
+blackhole is enabled (``sysctl net.inet.tcp.blackhole``). Skip the few tests
+which failed with ``ETIMEDOUT`` which such non standard configuration.
+Currently, the `FreeBSD GCP image enables TCP and UDP blackhole
+`_ (``sysctl net.inet.tcp.blackhole=2``
+and ``sysctl net.inet.udp.blackhole=1``). Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-06-22-06-22.gh-issue-108996.IBhR3U.rst b/Misc/NEWS.d/next/Tests/2023-09-06-22-06-22.gh-issue-108996.IBhR3U.rst
new file mode 100644
index 00000000000000..887f8b74bcfa30
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-06-22-06-22.gh-issue-108996.IBhR3U.rst
@@ -0,0 +1 @@
+Add tests for ``msvcrt``.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-10-19-59-57.gh-issue-109230.SRNLFQ.rst b/Misc/NEWS.d/next/Tests/2023-09-10-19-59-57.gh-issue-109230.SRNLFQ.rst
new file mode 100644
index 00000000000000..18e1e85242005a
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-10-19-59-57.gh-issue-109230.SRNLFQ.rst
@@ -0,0 +1,5 @@
+Fix ``test_pyexpat.test_exception()``: it can now be run from a directory
+different than Python source code directory. Before, the test failed in this
+case. Skip the test if Modules/pyexpat.c source is not available. Skip also
+the test on Python implementations other than CPython. Patch by Victor
+Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-10-22-32-20.gh-issue-109237.SvgKwD.rst b/Misc/NEWS.d/next/Tests/2023-09-10-22-32-20.gh-issue-109237.SvgKwD.rst
new file mode 100644
index 00000000000000..1d762bbe1d2592
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-10-22-32-20.gh-issue-109237.SvgKwD.rst
@@ -0,0 +1,4 @@
+Fix ``test_site.test_underpth_basic()`` when the working directory contains
+at least one non-ASCII character: encode the ``._pth`` file to UTF-8 and
+enable the UTF-8 Mode to use UTF-8 for the child process stdout. Patch by
+Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-10-23-05-50.gh-issue-108996.tJBru6.rst b/Misc/NEWS.d/next/Tests/2023-09-10-23-05-50.gh-issue-108996.tJBru6.rst
new file mode 100644
index 00000000000000..ab6b5b5952b044
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-10-23-05-50.gh-issue-108996.tJBru6.rst
@@ -0,0 +1 @@
+Fix and enable ``test_msvcrt``.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-11-18-19-52.gh-issue-109276.btfFtT.rst b/Misc/NEWS.d/next/Tests/2023-09-11-18-19-52.gh-issue-109276.btfFtT.rst
new file mode 100644
index 00000000000000..5fcf6624f2e84d
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-11-18-19-52.gh-issue-109276.btfFtT.rst
@@ -0,0 +1,3 @@
+libregrtest now uses a separated file descriptor to write test result as JSON.
+Previously, if a test wrote debug messages late around the JSON, the main test
+process failed to parse JSON. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-11-19-11-57.gh-issue-109276.qxI4OG.rst b/Misc/NEWS.d/next/Tests/2023-09-11-19-11-57.gh-issue-109276.qxI4OG.rst
new file mode 100644
index 00000000000000..cf4074b2fe23cc
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-11-19-11-57.gh-issue-109276.qxI4OG.rst
@@ -0,0 +1,6 @@
+libregrtest now calls :func:`random.seed()` before running each test file
+when ``-r/--randomize`` command line option is used. Moreover, it's also
+called in worker processes. It should help to make tests more
+deterministic. Previously, it was only called once in the main process before
+running all test files and it was not called in worker processes. Patch by
+Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-13-05-58-09.gh-issue-104736.lA25Fu.rst b/Misc/NEWS.d/next/Tests/2023-09-13-05-58-09.gh-issue-104736.lA25Fu.rst
new file mode 100644
index 00000000000000..85c370fc87ac41
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-13-05-58-09.gh-issue-104736.lA25Fu.rst
@@ -0,0 +1,4 @@
+Fix test_gdb on Python built with LLVM clang 16 on Linux ppc64le (ex: Fedora
+38). Search patterns in gdb "bt" command output to detect when gdb fails to
+retrieve the traceback. For example, skip a test if ``Backtrace stopped: frame
+did not save the PC`` is found. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-14-22-58-47.gh-issue-109396.J1a4jR.rst b/Misc/NEWS.d/next/Tests/2023-09-14-22-58-47.gh-issue-109396.J1a4jR.rst
new file mode 100644
index 00000000000000..71150ecae76434
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-14-22-58-47.gh-issue-109396.J1a4jR.rst
@@ -0,0 +1,3 @@
+Fix ``test_socket.test_hmac_sha1()`` in FIPS mode. Use a longer key: FIPS
+mode requires at least of at least 112 bits. The previous key was only 32
+bits. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-14-23-27-40.gh-issue-109425.j-uFep.rst b/Misc/NEWS.d/next/Tests/2023-09-14-23-27-40.gh-issue-109425.j-uFep.rst
new file mode 100644
index 00000000000000..bfe18569ae97f3
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-14-23-27-40.gh-issue-109425.j-uFep.rst
@@ -0,0 +1,3 @@
+libregrtest now decodes stdout of test worker processes with the
+"backslashreplace" error handler to log corrupted stdout, instead of failing
+with an error and not logging the stdout. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-19-19-08-22.gh-issue-109580.G02Zam.rst b/Misc/NEWS.d/next/Tests/2023-09-19-19-08-22.gh-issue-109580.G02Zam.rst
new file mode 100644
index 00000000000000..b917cbf6fd0a05
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-19-19-08-22.gh-issue-109580.G02Zam.rst
@@ -0,0 +1,3 @@
+Skip ``test_perf_profiler`` if Python is built with ASAN, MSAN or UBSAN
+sanitizer. Python does crash randomly in this test on such build. Patch by
+Victor Stinner.
diff --git a/Misc/NEWS.d/next/Tests/2023-09-20-02-32-17.gh-issue-103053.AoUJuK.rst b/Misc/NEWS.d/next/Tests/2023-09-20-02-32-17.gh-issue-103053.AoUJuK.rst
new file mode 100644
index 00000000000000..6d67bf237bdbb2
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2023-09-20-02-32-17.gh-issue-103053.AoUJuK.rst
@@ -0,0 +1,4 @@
+Skip test_freeze_simple_script() of test_tools.test_freeze if Python is built
+with ``./configure --enable-optimizations``, which means with Profile Guided
+Optimization (PGO): it just makes the test too slow. The freeze tool is tested
+by many other CIs with other (faster) compiler flags. Patch by Victor Stinner.
diff --git a/Misc/NEWS.d/next/Windows/2023-07-11-20-48-17.gh-issue-99079.CIMftz.rst b/Misc/NEWS.d/next/Windows/2023-07-11-20-48-17.gh-issue-99079.CIMftz.rst
deleted file mode 100644
index 11f411be0f17c5..00000000000000
--- a/Misc/NEWS.d/next/Windows/2023-07-11-20-48-17.gh-issue-99079.CIMftz.rst
+++ /dev/null
@@ -1 +0,0 @@
-Update Windows build to use OpenSSL 3.0.9
diff --git a/Misc/NEWS.d/next/Windows/2023-09-05-10-08-47.gh-issue-107565.CIMftz.rst b/Misc/NEWS.d/next/Windows/2023-09-05-10-08-47.gh-issue-107565.CIMftz.rst
new file mode 100644
index 00000000000000..024a58299caed9
--- /dev/null
+++ b/Misc/NEWS.d/next/Windows/2023-09-05-10-08-47.gh-issue-107565.CIMftz.rst
@@ -0,0 +1 @@
+Update Windows build to use OpenSSL 3.0.10.
diff --git a/Misc/NEWS.d/next/macOS/2023-05-30-23-30-46.gh-issue-103142.55lMXQ.rst b/Misc/NEWS.d/next/macOS/2023-05-30-23-30-46.gh-issue-103142.55lMXQ.rst
deleted file mode 100644
index 1afd949d6a9f03..00000000000000
--- a/Misc/NEWS.d/next/macOS/2023-05-30-23-30-46.gh-issue-103142.55lMXQ.rst
+++ /dev/null
@@ -1 +0,0 @@
-Update macOS installer to use OpenSSL 1.1.1u.
diff --git a/Misc/stable_abi.toml b/Misc/stable_abi.toml
index 2030a085abf27c..8df3f85e61eec6 100644
--- a/Misc/stable_abi.toml
+++ b/Misc/stable_abi.toml
@@ -2452,3 +2452,11 @@
added = '3.13'
[function.PyLong_AsInt]
added = '3.13'
+[function.PyObject_HasAttrWithError]
+ added = '3.13'
+[function.PyObject_HasAttrStringWithError]
+ added = '3.13'
+[function.PyMapping_HasKeyWithError]
+ added = '3.13'
+[function.PyMapping_HasKeyStringWithError]
+ added = '3.13'
diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in
index 56c1badf6b44a0..7b3216a50bb284 100644
--- a/Modules/Setup.stdlib.in
+++ b/Modules/Setup.stdlib.in
@@ -158,7 +158,7 @@
@MODULE_XXSUBTYPE_TRUE@xxsubtype xxsubtype.c
@MODULE__XXTESTFUZZ_TRUE@_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c
@MODULE__TESTBUFFER_TRUE@_testbuffer _testbuffer.c
-@MODULE__TESTINTERNALCAPI_TRUE@_testinternalcapi _testinternalcapi.c _testinternalcapi/pytime.c
+@MODULE__TESTINTERNALCAPI_TRUE@_testinternalcapi _testinternalcapi.c _testinternalcapi/test_lock.c _testinternalcapi/pytime.c
@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/unicode.c _testcapi/dict.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c
@MODULE__TESTCLINIC_TRUE@_testclinic _testclinic.c
@MODULE__TESTCLINIC_LIMITED_TRUE@_testclinic_limited _testclinic_limited.c
diff --git a/Modules/_ctypes/_ctypes.c b/Modules/_ctypes/_ctypes.c
index ed9efcad9ab0c8..184af2132c2707 100644
--- a/Modules/_ctypes/_ctypes.c
+++ b/Modules/_ctypes/_ctypes.c
@@ -110,6 +110,7 @@ bytes(cdata)
#include "pycore_call.h" // _PyObject_CallNoArgs()
#include "pycore_ceval.h" // _Py_EnterRecursiveCall()
+#include "pycore_pyerrors.h" // _PyErr_WriteUnraisableMsg()
#include
@@ -5148,41 +5149,6 @@ Pointer_get_contents(CDataObject *self, void *closure)
stgdict = PyObject_stgdict((PyObject *)self);
assert(stgdict); /* Cannot be NULL for pointer instances */
-
- PyObject *keep = GetKeepedObjects(self);
- if (keep != NULL) {
- // check if it's a pointer to a pointer:
- // pointers will have '0' key in the _objects
- int ptr_probe = PyDict_ContainsString(keep, "0");
- if (ptr_probe < 0) {
- return NULL;
- }
- if (ptr_probe) {
- PyObject *item;
- if (PyDict_GetItemStringRef(keep, "1", &item) < 0) {
- return NULL;
- }
- if (item == NULL) {
- PyErr_SetString(PyExc_ValueError,
- "Unexpected NULL pointer in _objects");
- return NULL;
- }
-#ifndef NDEBUG
- CDataObject *ptr2ptr = (CDataObject *)item;
- // Don't construct a new object,
- // return existing one instead to preserve refcount.
- // Double-check that we are returning the same thing.
- assert(
- *(void**) self->b_ptr == ptr2ptr->b_ptr ||
- *(void**) self->b_value.c == ptr2ptr->b_ptr ||
- *(void**) self->b_ptr == ptr2ptr->b_value.c ||
- *(void**) self->b_value.c == ptr2ptr->b_value.c
- );
-#endif
- return item;
- }
- }
-
return PyCData_FromBaseObj(stgdict->proto,
(PyObject *)self, 0,
*(void **)self->b_ptr);
diff --git a/Modules/_ctypes/callbacks.c b/Modules/_ctypes/callbacks.c
index 0d8ecce009a67a..1bd8fec97179e9 100644
--- a/Modules/_ctypes/callbacks.c
+++ b/Modules/_ctypes/callbacks.c
@@ -8,9 +8,9 @@
# include
#endif
-#include "pycore_call.h" // _PyObject_CallNoArgs()
-#include "pycore_runtime.h" // _PyRuntime
-#include "pycore_global_objects.h" // _Py_ID()
+#include "pycore_call.h" // _PyObject_CallNoArgs()
+#include "pycore_pyerrors.h" // _PyErr_WriteUnraisableMsg()
+#include "pycore_runtime.h" // _Py_ID()
#include
diff --git a/Modules/_ctypes/cfield.c b/Modules/_ctypes/cfield.c
index 128506a9eed920..bfb40e5c5393fc 100644
--- a/Modules/_ctypes/cfield.c
+++ b/Modules/_ctypes/cfield.c
@@ -250,8 +250,8 @@ PyCField_get_size(PyObject *self, void *data)
}
static PyGetSetDef PyCField_getset[] = {
- { "offset", PyCField_get_offset, NULL, "offset in bytes of this field" },
- { "size", PyCField_get_size, NULL, "size in bytes of this field" },
+ { "offset", PyCField_get_offset, NULL, PyDoc_STR("offset in bytes of this field") },
+ { "size", PyCField_get_size, NULL, PyDoc_STR("size in bytes of this field") },
{ NULL, NULL, NULL, NULL },
};
diff --git a/Modules/_ctypes/stgdict.c b/Modules/_ctypes/stgdict.c
index 9b0ca73a8b1751..6fbcf77a115371 100644
--- a/Modules/_ctypes/stgdict.c
+++ b/Modules/_ctypes/stgdict.c
@@ -386,11 +386,11 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct
if (fields == NULL)
return 0;
- if (PyObject_GetOptionalAttr(type, &_Py_ID(_swappedbytes_), &tmp) < 0) {
+ int rc = PyObject_HasAttrWithError(type, &_Py_ID(_swappedbytes_));
+ if (rc < 0) {
return -1;
}
- if (tmp) {
- Py_DECREF(tmp);
+ if (rc) {
big_endian = !PY_BIG_ENDIAN;
}
else {
diff --git a/Modules/_datetimemodule.c b/Modules/_datetimemodule.c
index 191db3f84088d5..0d356779cfe192 100644
--- a/Modules/_datetimemodule.c
+++ b/Modules/_datetimemodule.c
@@ -3590,6 +3590,8 @@ static PyMethodDef date_methods[] = {
{"replace", _PyCFunction_CAST(date_replace), METH_VARARGS | METH_KEYWORDS,
PyDoc_STR("Return date with new specified fields.")},
+ {"__replace__", _PyCFunction_CAST(date_replace), METH_VARARGS | METH_KEYWORDS},
+
{"__reduce__", (PyCFunction)date_reduce, METH_NOARGS,
PyDoc_STR("__reduce__() -> (cls, state)")},
@@ -4719,6 +4721,8 @@ static PyMethodDef time_methods[] = {
{"replace", _PyCFunction_CAST(time_replace), METH_VARARGS | METH_KEYWORDS,
PyDoc_STR("Return time with new specified fields.")},
+ {"__replace__", _PyCFunction_CAST(time_replace), METH_VARARGS | METH_KEYWORDS},
+
{"fromisoformat", (PyCFunction)time_fromisoformat, METH_O | METH_CLASS,
PyDoc_STR("string -> time from a string in ISO 8601 format")},
@@ -6579,6 +6583,8 @@ static PyMethodDef datetime_methods[] = {
{"replace", _PyCFunction_CAST(datetime_replace), METH_VARARGS | METH_KEYWORDS,
PyDoc_STR("Return datetime with new specified fields.")},
+ {"__replace__", _PyCFunction_CAST(datetime_replace), METH_VARARGS | METH_KEYWORDS},
+
{"astimezone", _PyCFunction_CAST(datetime_astimezone), METH_VARARGS | METH_KEYWORDS,
PyDoc_STR("tz -> convert to local time in new timezone tz\n")},
diff --git a/Modules/_decimal/tests/bench.py b/Modules/_decimal/tests/bench.py
index 24e091b6887ccd..640290f2ec7962 100644
--- a/Modules/_decimal/tests/bench.py
+++ b/Modules/_decimal/tests/bench.py
@@ -7,6 +7,8 @@
import time
+import sys
+from functools import wraps
from test.support.import_helper import import_fresh_module
C = import_fresh_module('decimal', fresh=['_decimal'])
@@ -64,66 +66,85 @@ def factorial(n, m):
else:
return factorial(n, (n+m)//2) * factorial((n+m)//2 + 1, m)
+# Fix failed test cases caused by CVE-2020-10735 patch.
+# See gh-95778 for details.
+def increase_int_max_str_digits(maxdigits):
+ def _increase_int_max_str_digits(func, maxdigits=maxdigits):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ previous_int_limit = sys.get_int_max_str_digits()
+ sys.set_int_max_str_digits(maxdigits)
+ ans = func(*args, **kwargs)
+ sys.set_int_max_str_digits(previous_int_limit)
+ return ans
+ return wrapper
+ return _increase_int_max_str_digits
+
+def test_calc_pi():
+ print("\n# ======================================================================")
+ print("# Calculating pi, 10000 iterations")
+ print("# ======================================================================\n")
+
+ to_benchmark = [pi_float, pi_decimal]
+ if C is not None:
+ to_benchmark.insert(1, pi_cdecimal)
+
+ for prec in [9, 19]:
+ print("\nPrecision: %d decimal digits\n" % prec)
+ for func in to_benchmark:
+ start = time.time()
+ if C is not None:
+ C.getcontext().prec = prec
+ P.getcontext().prec = prec
+ for i in range(10000):
+ x = func()
+ print("%s:" % func.__name__.replace("pi_", ""))
+ print("result: %s" % str(x))
+ print("time: %fs\n" % (time.time()-start))
+
+@increase_int_max_str_digits(maxdigits=10000000)
+def test_factorial():
+ print("\n# ======================================================================")
+ print("# Factorial")
+ print("# ======================================================================\n")
-print("\n# ======================================================================")
-print("# Calculating pi, 10000 iterations")
-print("# ======================================================================\n")
-
-to_benchmark = [pi_float, pi_decimal]
-if C is not None:
- to_benchmark.insert(1, pi_cdecimal)
-
-for prec in [9, 19]:
- print("\nPrecision: %d decimal digits\n" % prec)
- for func in to_benchmark:
- start = time.time()
- if C is not None:
- C.getcontext().prec = prec
- P.getcontext().prec = prec
- for i in range(10000):
- x = func()
- print("%s:" % func.__name__.replace("pi_", ""))
- print("result: %s" % str(x))
- print("time: %fs\n" % (time.time()-start))
-
-
-print("\n# ======================================================================")
-print("# Factorial")
-print("# ======================================================================\n")
-
-if C is not None:
- c = C.getcontext()
- c.prec = C.MAX_PREC
- c.Emax = C.MAX_EMAX
- c.Emin = C.MIN_EMIN
+ if C is not None:
+ c = C.getcontext()
+ c.prec = C.MAX_PREC
+ c.Emax = C.MAX_EMAX
+ c.Emin = C.MIN_EMIN
-for n in [100000, 1000000]:
+ for n in [100000, 1000000]:
- print("n = %d\n" % n)
+ print("n = %d\n" % n)
- if C is not None:
- # C version of decimal
+ if C is not None:
+ # C version of decimal
+ start_calc = time.time()
+ x = factorial(C.Decimal(n), 0)
+ end_calc = time.time()
+ start_conv = time.time()
+ sx = str(x)
+ end_conv = time.time()
+ print("cdecimal:")
+ print("calculation time: %fs" % (end_calc-start_calc))
+ print("conversion time: %fs\n" % (end_conv-start_conv))
+
+ # Python integers
start_calc = time.time()
- x = factorial(C.Decimal(n), 0)
+ y = factorial(n, 0)
end_calc = time.time()
start_conv = time.time()
- sx = str(x)
- end_conv = time.time()
- print("cdecimal:")
- print("calculation time: %fs" % (end_calc-start_calc))
- print("conversion time: %fs\n" % (end_conv-start_conv))
+ sy = str(y)
+ end_conv = time.time()
- # Python integers
- start_calc = time.time()
- y = factorial(n, 0)
- end_calc = time.time()
- start_conv = time.time()
- sy = str(y)
- end_conv = time.time()
+ print("int:")
+ print("calculation time: %fs" % (end_calc-start_calc))
+ print("conversion time: %fs\n\n" % (end_conv-start_conv))
- print("int:")
- print("calculation time: %fs" % (end_calc-start_calc))
- print("conversion time: %fs\n\n" % (end_conv-start_conv))
+ if C is not None:
+ assert(sx == sy)
- if C is not None:
- assert(sx == sy)
+if __name__ == "__main__":
+ test_calc_pi()
+ test_factorial()
diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c
index 8cb57e693d81d7..f9d5793f9b6497 100644
--- a/Modules/_elementtree.c
+++ b/Modules/_elementtree.c
@@ -3532,12 +3532,11 @@ expat_start_doctype_handler(XMLParserObject *self,
sysid_obj, NULL);
Py_XDECREF(res);
}
- else if (PyObject_GetOptionalAttr((PyObject *)self, st->str_doctype, &res) > 0) {
+ else if (PyObject_HasAttrWithError((PyObject *)self, st->str_doctype) > 0) {
(void)PyErr_WarnEx(PyExc_RuntimeWarning,
"The doctype() method of XMLParser is ignored. "
"Define doctype() method on the TreeBuilder target.",
1);
- Py_DECREF(res);
}
Py_DECREF(doctype_name_obj);
diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c
index 389ff4391de0be..8ea493ad9ab278 100644
--- a/Modules/_functoolsmodule.c
+++ b/Modules/_functoolsmodule.c
@@ -725,7 +725,7 @@ functools_reduce(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(functools_reduce_doc,
-"reduce(function, iterable[, initial]) -> value\n\
+"reduce(function, iterable[, initial], /) -> value\n\
\n\
Apply a function of two arguments cumulatively to the items of a sequence\n\
or iterable, from left to right, so as to reduce the iterable to a single\n\
diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c
index 0983a7bd151f40..e8caf9f0df6dbf 100644
--- a/Modules/_io/bufferedio.c
+++ b/Modules/_io/bufferedio.c
@@ -553,17 +553,14 @@ _io__Buffered_close_impl(buffered *self)
}
/* flush() will most probably re-take the lock, so drop it first */
LEAVE_BUFFERED(self)
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
+ r = _PyFile_Flush((PyObject *)self);
if (!ENTER_BUFFERED(self)) {
return NULL;
}
PyObject *exc = NULL;
- if (res == NULL) {
+ if (r < 0) {
exc = PyErr_GetRaisedException();
}
- else {
- Py_DECREF(res);
- }
res = PyObject_CallMethodNoArgs(self->raw, &_Py_ID(close));
@@ -593,12 +590,11 @@ static PyObject *
_io__Buffered_detach_impl(buffered *self)
/*[clinic end generated code: output=dd0fc057b8b779f7 input=482762a345cc9f44]*/
{
- PyObject *raw, *res;
+ PyObject *raw;
CHECK_INITIALIZED(self)
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL)
+ if (_PyFile_Flush((PyObject *)self) < 0) {
return NULL;
- Py_DECREF(res);
+ }
raw = self->raw;
self->raw = NULL;
self->detached = 1;
diff --git a/Modules/_io/clinic/bufferedio.c.h b/Modules/_io/clinic/bufferedio.c.h
index b2c52cf2a95a44..7577bdec5c3b20 100644
--- a/Modules/_io/clinic/bufferedio.c.h
+++ b/Modules/_io/clinic/bufferedio.c.h
@@ -26,7 +26,6 @@ _io__BufferedIOBase_readinto(PyObject *self, PyObject *arg)
Py_buffer buffer = {NULL, NULL};
if (PyObject_GetBuffer(arg, &buffer, PyBUF_WRITABLE) < 0) {
- PyErr_Clear();
_PyArg_BadArgument("readinto", "argument", "read-write bytes-like object", arg);
goto exit;
}
@@ -63,7 +62,6 @@ _io__BufferedIOBase_readinto1(PyObject *self, PyObject *arg)
Py_buffer buffer = {NULL, NULL};
if (PyObject_GetBuffer(arg, &buffer, PyBUF_WRITABLE) < 0) {
- PyErr_Clear();
_PyArg_BadArgument("readinto1", "argument", "read-write bytes-like object", arg);
goto exit;
}
@@ -590,7 +588,6 @@ _io__Buffered_readinto(buffered *self, PyObject *arg)
Py_buffer buffer = {NULL, NULL};
if (PyObject_GetBuffer(arg, &buffer, PyBUF_WRITABLE) < 0) {
- PyErr_Clear();
_PyArg_BadArgument("readinto", "argument", "read-write bytes-like object", arg);
goto exit;
}
@@ -627,7 +624,6 @@ _io__Buffered_readinto1(buffered *self, PyObject *arg)
Py_buffer buffer = {NULL, NULL};
if (PyObject_GetBuffer(arg, &buffer, PyBUF_WRITABLE) < 0) {
- PyErr_Clear();
_PyArg_BadArgument("readinto1", "argument", "read-write bytes-like object", arg);
goto exit;
}
@@ -1098,4 +1094,4 @@ _io_BufferedRandom___init__(PyObject *self, PyObject *args, PyObject *kwargs)
exit:
return return_value;
}
-/*[clinic end generated code: output=9e09091995ae02b0 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=f940cea085f0bf91 input=a9049054013a1b77]*/
diff --git a/Modules/_io/clinic/bytesio.c.h b/Modules/_io/clinic/bytesio.c.h
index d7364779200827..d42ab48cef2859 100644
--- a/Modules/_io/clinic/bytesio.c.h
+++ b/Modules/_io/clinic/bytesio.c.h
@@ -328,7 +328,6 @@ _io_BytesIO_readinto(bytesio *self, PyObject *arg)
Py_buffer buffer = {NULL, NULL};
if (PyObject_GetBuffer(arg, &buffer, PyBUF_WRITABLE) < 0) {
- PyErr_Clear();
_PyArg_BadArgument("readinto", "argument", "read-write bytes-like object", arg);
goto exit;
}
@@ -538,4 +537,4 @@ _io_BytesIO___init__(PyObject *self, PyObject *args, PyObject *kwargs)
exit:
return return_value;
}
-/*[clinic end generated code: output=8ab65edc03edbfe0 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=b753fdf1ba36c461 input=a9049054013a1b77]*/
diff --git a/Modules/_io/clinic/fileio.c.h b/Modules/_io/clinic/fileio.c.h
index 29f8cf6aa9a85c..deb99fa9d99bd0 100644
--- a/Modules/_io/clinic/fileio.c.h
+++ b/Modules/_io/clinic/fileio.c.h
@@ -245,7 +245,6 @@ _io_FileIO_readinto(fileio *self, PyTypeObject *cls, PyObject *const *args, Py_s
goto exit;
}
if (PyObject_GetBuffer(args[0], &buffer, PyBUF_WRITABLE) < 0) {
- PyErr_Clear();
_PyArg_BadArgument("readinto", "argument 1", "read-write bytes-like object", args[0]);
goto exit;
}
@@ -536,4 +535,4 @@ _io_FileIO_isatty(fileio *self, PyObject *Py_UNUSED(ignored))
#ifndef _IO_FILEIO_TRUNCATE_METHODDEF
#define _IO_FILEIO_TRUNCATE_METHODDEF
#endif /* !defined(_IO_FILEIO_TRUNCATE_METHODDEF) */
-/*[clinic end generated code: output=238dd48819076434 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=2ce6ce923ccef86e input=a9049054013a1b77]*/
diff --git a/Modules/_io/clinic/winconsoleio.c.h b/Modules/_io/clinic/winconsoleio.c.h
index 0683eecdfebb37..ecc71e552c23f4 100644
--- a/Modules/_io/clinic/winconsoleio.c.h
+++ b/Modules/_io/clinic/winconsoleio.c.h
@@ -243,7 +243,6 @@ _io__WindowsConsoleIO_readinto(winconsoleio *self, PyTypeObject *cls, PyObject *
goto exit;
}
if (PyObject_GetBuffer(args[0], &buffer, PyBUF_WRITABLE) < 0) {
- PyErr_Clear();
_PyArg_BadArgument("readinto", "argument 1", "read-write bytes-like object", args[0]);
goto exit;
}
@@ -465,4 +464,4 @@ _io__WindowsConsoleIO_isatty(winconsoleio *self, PyObject *Py_UNUSED(ignored))
#ifndef _IO__WINDOWSCONSOLEIO_ISATTY_METHODDEF
#define _IO__WINDOWSCONSOLEIO_ISATTY_METHODDEF
#endif /* !defined(_IO__WINDOWSCONSOLEIO_ISATTY_METHODDEF) */
-/*[clinic end generated code: output=7be51d48ddb7c8c8 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=37febc4c96732b3b input=a9049054013a1b77]*/
diff --git a/Modules/_io/iobase.c b/Modules/_io/iobase.c
index 34fcd702391f32..4da8e5bd572d74 100644
--- a/Modules/_io/iobase.c
+++ b/Modules/_io/iobase.c
@@ -148,13 +148,9 @@ _io__IOBase_truncate_impl(PyObject *self, PyTypeObject *cls,
static int
iobase_is_closed(PyObject *self)
{
- PyObject *res;
- int ret;
/* This gets the derived attribute, which is *not* __IOBase_closed
in most cases! */
- ret = PyObject_GetOptionalAttr(self, &_Py_ID(__IOBase_closed), &res);
- Py_XDECREF(res);
- return ret;
+ return PyObject_HasAttrWithError(self, &_Py_ID(__IOBase_closed));
}
/* Flush and close methods */
@@ -269,7 +265,7 @@ static PyObject *
_io__IOBase_close_impl(PyObject *self)
/*[clinic end generated code: output=63c6a6f57d783d6d input=f4494d5c31dbc6b7]*/
{
- int rc, closed = iobase_is_closed(self);
+ int rc1, rc2, closed = iobase_is_closed(self);
if (closed < 0) {
return NULL;
@@ -278,19 +274,14 @@ _io__IOBase_close_impl(PyObject *self)
Py_RETURN_NONE;
}
- PyObject *res = PyObject_CallMethodNoArgs(self, &_Py_ID(flush));
-
+ rc1 = _PyFile_Flush(self);
PyObject *exc = PyErr_GetRaisedException();
- rc = PyObject_SetAttr(self, &_Py_ID(__IOBase_closed), Py_True);
+ rc2 = PyObject_SetAttr(self, &_Py_ID(__IOBase_closed), Py_True);
_PyErr_ChainExceptions1(exc);
- if (rc < 0) {
- Py_CLEAR(res);
- }
-
- if (res == NULL)
+ if (rc1 < 0 || rc2 < 0) {
return NULL;
+ }
- Py_DECREF(res);
Py_RETURN_NONE;
}
diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c
index 0a727a6e0ecd8a..10ef8a803c50fd 100644
--- a/Modules/_io/textio.c
+++ b/Modules/_io/textio.c
@@ -1223,11 +1223,10 @@ _io_TextIOWrapper___init___impl(textio *self, PyObject *buffer,
goto error;
self->seekable = self->telling = r;
- r = PyObject_GetOptionalAttr(buffer, &_Py_ID(read1), &res);
+ r = PyObject_HasAttrWithError(buffer, &_Py_ID(read1));
if (r < 0) {
goto error;
}
- Py_XDECREF(res);
self->has_read1 = r;
self->encoding_start_of_stream = 0;
@@ -1369,11 +1368,9 @@ _io_TextIOWrapper_reconfigure_impl(textio *self, PyObject *encoding,
return NULL;
}
- PyObject *res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL) {
+ if (_PyFile_Flush((PyObject *)self) < 0) {
return NULL;
}
- Py_DECREF(res);
self->b2cratio = 0;
if (newline_obj != NULL && set_newline(self, newline) < 0) {
@@ -1509,12 +1506,11 @@ static PyObject *
_io_TextIOWrapper_detach_impl(textio *self)
/*[clinic end generated code: output=7ba3715cd032d5f2 input=e5a71fbda9e1d9f9]*/
{
- PyObject *buffer, *res;
+ PyObject *buffer;
CHECK_ATTACHED(self);
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL)
+ if (_PyFile_Flush((PyObject *)self) < 0) {
return NULL;
- Py_DECREF(res);
+ }
buffer = self->buffer;
self->buffer = NULL;
self->detached = 1;
@@ -1714,10 +1710,9 @@ _io_TextIOWrapper_write_impl(textio *self, PyObject *text)
}
if (needflush) {
- ret = PyObject_CallMethodNoArgs(self->buffer, &_Py_ID(flush));
- if (ret == NULL)
+ if (_PyFile_Flush(self->buffer) < 0) {
return NULL;
- Py_DECREF(ret);
+ }
}
textiowrapper_set_decoded_chars(self, NULL);
@@ -2503,10 +2498,9 @@ _io_TextIOWrapper_seek_impl(textio *self, PyObject *cookieObj, int whence)
goto fail;
}
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL)
+ if (_PyFile_Flush((PyObject *)self) < 0) {
goto fail;
- Py_DECREF(res);
+ }
textiowrapper_set_decoded_chars(self, NULL);
Py_CLEAR(self->snapshot);
@@ -2551,10 +2545,9 @@ _io_TextIOWrapper_seek_impl(textio *self, PyObject *cookieObj, int whence)
goto fail;
}
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL)
+ if (_PyFile_Flush((PyObject *)self) < 0) {
goto fail;
- Py_DECREF(res);
+ }
/* The strategy of seek() is to go back to the safe start point
* and replay the effect of read(chars_to_skip) from there.
@@ -2678,10 +2671,9 @@ _io_TextIOWrapper_tell_impl(textio *self)
if (_textiowrapper_writeflush(self) < 0)
return NULL;
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL)
+ if (_PyFile_Flush((PyObject *)self) < 0) {
goto fail;
- Py_DECREF(res);
+ }
posobj = PyObject_CallMethodNoArgs(self->buffer, &_Py_ID(tell));
if (posobj == NULL)
@@ -2886,14 +2878,11 @@ static PyObject *
_io_TextIOWrapper_truncate_impl(textio *self, PyObject *pos)
/*[clinic end generated code: output=90ec2afb9bb7745f input=56ec8baa65aea377]*/
{
- PyObject *res;
-
CHECK_ATTACHED(self)
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL)
+ if (_PyFile_Flush((PyObject *)self) < 0) {
return NULL;
- Py_DECREF(res);
+ }
return PyObject_CallMethodOneArg(self->buffer, &_Py_ID(truncate), pos);
}
@@ -3077,13 +3066,9 @@ _io_TextIOWrapper_close_impl(textio *self)
PyErr_Clear();
}
}
- res = PyObject_CallMethodNoArgs((PyObject *)self, &_Py_ID(flush));
- if (res == NULL) {
+ if (_PyFile_Flush((PyObject *)self) < 0) {
exc = PyErr_GetRaisedException();
}
- else {
- Py_DECREF(res);
- }
res = PyObject_CallMethodNoArgs(self->buffer, &_Py_ID(close));
if (exc != NULL) {
diff --git a/Modules/_localemodule.c b/Modules/_localemodule.c
index 1847a4811e8ee9..fe8e4c5e30035b 100644
--- a/Modules/_localemodule.c
+++ b/Modules/_localemodule.c
@@ -10,35 +10,25 @@ This software comes with no warranty. Use at your own risk.
******************************************************************/
#include "Python.h"
-#include "pycore_fileutils.h"
-#include "pycore_pymem.h" // _PyMem_Strdup
-
-#include
-#include
-#include
-#include
+#include "pycore_fileutils.h" // _Py_GetLocaleconvNumeric()
+#include "pycore_pymem.h" // _PyMem_Strdup()
+#include // setlocale()
+#include // strlen()
#ifdef HAVE_ERRNO_H
-#include
+# include // errno
#endif
-
#ifdef HAVE_LANGINFO_H
-#include
+# include // nl_langinfo()
#endif
-
#ifdef HAVE_LIBINTL_H
-#include
-#endif
-
-#ifdef HAVE_WCHAR_H
-#include
+# include
#endif
-
-#if defined(MS_WINDOWS)
-#ifndef WIN32_LEAN_AND_MEAN
-#define WIN32_LEAN_AND_MEAN
-#endif
-#include
+#ifdef MS_WINDOWS
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# endif
+# include
#endif
PyDoc_STRVAR(locale__doc__, "Support for POSIX locales.");
diff --git a/Modules/_lsprof.c b/Modules/_lsprof.c
index e7dcb6e1713212..d23a756ace887d 100644
--- a/Modules/_lsprof.c
+++ b/Modules/_lsprof.c
@@ -5,7 +5,9 @@
#include "Python.h"
#include "pycore_call.h" // _PyObject_CallNoArgs()
#include "pycore_ceval.h" // _PyEval_SetProfile()
+#include "pycore_pyerrors.h" // _PyErr_WriteUnraisableMsg()
#include "pycore_pystate.h" // _PyThreadState_GET()
+
#include "rotatingtree.h"
/************************************************************/
diff --git a/Modules/_multiprocessing/multiprocessing.c b/Modules/_multiprocessing/multiprocessing.c
index 16b5cb5dd9ec7a..2e6d8eb68c0243 100644
--- a/Modules/_multiprocessing/multiprocessing.c
+++ b/Modules/_multiprocessing/multiprocessing.c
@@ -14,16 +14,17 @@ class HANDLE_converter(CConverter):
type = "HANDLE"
format_unit = '"F_HANDLE"'
- def parse_arg(self, argname, displayname):
- return """
+ def parse_arg(self, argname, displayname, *, limited_capi):
+ return self.format_code("""
{paramname} = PyLong_AsVoidPtr({argname});
if (!{paramname} && PyErr_Occurred()) {{{{
goto exit;
}}}}
- """.format(argname=argname, paramname=self.parser_name)
+ """,
+ argname=argname)
[python start generated code]*/
-/*[python end generated code: output=da39a3ee5e6b4b0d input=3e537d244034affb]*/
+/*[python end generated code: output=da39a3ee5e6b4b0d input=3cf0318efc6a8772]*/
/*[clinic input]
module _multiprocessing
diff --git a/Modules/_multiprocessing/multiprocessing.h b/Modules/_multiprocessing/multiprocessing.h
index 296e0abb29a0f5..099004b437828e 100644
--- a/Modules/_multiprocessing/multiprocessing.h
+++ b/Modules/_multiprocessing/multiprocessing.h
@@ -10,6 +10,10 @@
#include "pythread.h"
#include "pycore_signal.h" // _PyOS_IsMainThread()
+#ifndef MS_WINDOWS
+# include // sysconf()
+#endif
+
/*
* Platform includes and definitions
*/
diff --git a/Modules/_multiprocessing/semaphore.c b/Modules/_multiprocessing/semaphore.c
index d22b8d18e33e42..f8f2afda28d06d 100644
--- a/Modules/_multiprocessing/semaphore.c
+++ b/Modules/_multiprocessing/semaphore.c
@@ -9,6 +9,10 @@
#include "multiprocessing.h"
+#ifdef HAVE_SYS_TIME_H
+# include // gettimeofday()
+#endif
+
#ifdef HAVE_MP_SEMAPHORE
enum { RECURSIVE_MUTEX, SEMAPHORE };
diff --git a/Modules/_pickle.c b/Modules/_pickle.c
index b97524856eeca8..a3cf34699ba509 100644
--- a/Modules/_pickle.c
+++ b/Modules/_pickle.c
@@ -5799,14 +5799,13 @@ instantiate(PyObject *cls, PyObject *args)
into a newly created tuple. */
assert(PyTuple_Check(args));
if (!PyTuple_GET_SIZE(args) && PyType_Check(cls)) {
- PyObject *func;
- if (PyObject_GetOptionalAttr(cls, &_Py_ID(__getinitargs__), &func) < 0) {
+ int rc = PyObject_HasAttrWithError(cls, &_Py_ID(__getinitargs__));
+ if (rc < 0) {
return NULL;
}
- if (func == NULL) {
+ if (!rc) {
return PyObject_CallMethodOneArg(cls, &_Py_ID(__new__), cls);
}
- Py_DECREF(func);
}
return PyObject_CallObject(cls, args);
}
diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c
index ef76d26282e1b3..2898eedc3e3a8f 100644
--- a/Modules/_posixsubprocess.c
+++ b/Modules/_posixsubprocess.c
@@ -87,15 +87,16 @@ class pid_t_converter(CConverter):
type = 'pid_t'
format_unit = '" _Py_PARSE_PID "'
- def parse_arg(self, argname, displayname):
- return """
+ def parse_arg(self, argname, displayname, *, limited_capi):
+ return self.format_code("""
{paramname} = PyLong_AsPid({argname});
if ({paramname} == -1 && PyErr_Occurred()) {{{{
goto exit;
}}}}
- """.format(argname=argname, paramname=self.parser_name)
+ """,
+ argname=argname)
[python start generated code]*/
-/*[python end generated code: output=da39a3ee5e6b4b0d input=5af1c116d56cbb5a]*/
+/*[python end generated code: output=da39a3ee5e6b4b0d input=c94349aa1aad151d]*/
#include "clinic/_posixsubprocess.c.h"
diff --git a/Modules/_sre/sre.c b/Modules/_sre/sre.c
index 3872c3663c7294..07da5da13f70d3 100644
--- a/Modules/_sre/sre.c
+++ b/Modules/_sre/sre.c
@@ -43,12 +43,40 @@ static const char copyright[] =
#include "pycore_long.h" // _PyLong_GetZero()
#include "pycore_moduleobject.h" // _PyModule_GetState()
+#include "sre.h" // SRE_CODE
-#include "sre.h"
+#include // tolower(), toupper(), isalnum()
#define SRE_CODE_BITS (8 * sizeof(SRE_CODE))
-#include
+// On macOS, use the wide character ctype API using btowc()
+#if defined(__APPLE__)
+# define USE_CTYPE_WINT_T
+#endif
+
+static int sre_isalnum(unsigned int ch) {
+#ifdef USE_CTYPE_WINT_T
+ return (unsigned int)iswalnum(btowc((int)ch));
+#else
+ return (unsigned int)isalnum((int)ch);
+#endif
+}
+
+static unsigned int sre_tolower(unsigned int ch) {
+#ifdef USE_CTYPE_WINT_T
+ return (unsigned int)towlower(btowc((int)ch));
+#else
+ return (unsigned int)tolower((int)ch);
+#endif
+}
+
+static unsigned int sre_toupper(unsigned int ch) {
+#ifdef USE_CTYPE_WINT_T
+ return (unsigned int)towupper(btowc((int)ch));
+#else
+ return (unsigned int)toupper((int)ch);
+#endif
+}
/* Defining this one controls tracing:
* 0 -- disabled
@@ -114,17 +142,17 @@ static unsigned int sre_lower_ascii(unsigned int ch)
/* locale-specific character predicates */
/* !(c & ~N) == (c < N+1) for any unsigned c, this avoids
* warnings when c's type supports only numbers < N+1 */
-#define SRE_LOC_IS_ALNUM(ch) (!((ch) & ~255) ? isalnum((ch)) : 0)
+#define SRE_LOC_IS_ALNUM(ch) (!((ch) & ~255) ? sre_isalnum((ch)) : 0)
#define SRE_LOC_IS_WORD(ch) (SRE_LOC_IS_ALNUM((ch)) || (ch) == '_')
static unsigned int sre_lower_locale(unsigned int ch)
{
- return ((ch) < 256 ? (unsigned int)tolower((ch)) : ch);
+ return ((ch) < 256 ? (unsigned int)sre_tolower((ch)) : ch);
}
static unsigned int sre_upper_locale(unsigned int ch)
{
- return ((ch) < 256 ? (unsigned int)toupper((ch)) : ch);
+ return ((ch) < 256 ? (unsigned int)sre_toupper((ch)) : ch);
}
/* unicode-specific character predicates */
diff --git a/Modules/_struct.c b/Modules/_struct.c
index 4ae21cce74f609..ff1bf4e96c5f21 100644
--- a/Modules/_struct.c
+++ b/Modules/_struct.c
@@ -12,7 +12,6 @@
#include "pycore_long.h" // _PyLong_AsByteArray()
#include "pycore_moduleobject.h" // _PyModule_GetState()
-#include
#include // offsetof()
/*[clinic input]
@@ -110,18 +109,20 @@ class cache_struct_converter(CConverter):
c_default = "NULL"
broken_limited_capi = True
- def parse_arg(self, argname, displayname):
- return """
+ def parse_arg(self, argname, displayname, *, limited_capi):
+ assert not limited_capi
+ return self.format_code("""
if (!{converter}(module, {argname}, &{paramname})) {{{{
goto exit;
}}}}
- """.format(argname=argname, paramname=self.name,
- converter=self.converter)
+ """,
+ argname=argname,
+ converter=self.converter)
def cleanup(self):
return "Py_XDECREF(%s);\n" % self.name
[python start generated code]*/
-/*[python end generated code: output=da39a3ee5e6b4b0d input=14e83804f599ed8f]*/
+/*[python end generated code: output=da39a3ee5e6b4b0d input=c33b27d6b06006c6]*/
static int cache_struct_converter(PyObject *, PyObject *, PyStructObject **);
@@ -2197,8 +2198,8 @@ static PyMemberDef s_members[] = {
};
static PyGetSetDef s_getsetlist[] = {
- {"format", (getter)s_get_format, (setter)NULL, "struct format string", NULL},
- {"size", (getter)s_get_size, (setter)NULL, "struct size in bytes", NULL},
+ {"format", (getter)s_get_format, (setter)NULL, PyDoc_STR("struct format string"), NULL},
+ {"size", (getter)s_get_size, (setter)NULL, PyDoc_STR("struct size in bytes"), NULL},
{NULL} /* sentinel */
};
diff --git a/Modules/_testcapi/abstract.c b/Modules/_testcapi/abstract.c
index 91a1ee3aaafc02..81a3dea4c1dfde 100644
--- a/Modules/_testcapi/abstract.c
+++ b/Modules/_testcapi/abstract.c
@@ -32,7 +32,7 @@ object_getattrstring(PyObject *self, PyObject *args)
static PyObject *
object_getoptionalattr(PyObject *self, PyObject *args)
{
- PyObject *obj, *attr_name, *value;
+ PyObject *obj, *attr_name, *value = UNINITIALIZED_PTR;
if (!PyArg_ParseTuple(args, "OO", &obj, &attr_name)) {
return NULL;
}
@@ -57,7 +57,7 @@ object_getoptionalattr(PyObject *self, PyObject *args)
static PyObject *
object_getoptionalattrstring(PyObject *self, PyObject *args)
{
- PyObject *obj, *value;
+ PyObject *obj, *value = UNINITIALIZED_PTR;
const char *attr_name;
Py_ssize_t size;
if (!PyArg_ParseTuple(args, "Oz#", &obj, &attr_name, &size)) {
@@ -105,6 +105,31 @@ object_hasattrstring(PyObject *self, PyObject *args)
return PyLong_FromLong(PyObject_HasAttrString(obj, attr_name));
}
+static PyObject *
+object_hasattrwitherror(PyObject *self, PyObject *args)
+{
+ PyObject *obj, *attr_name;
+ if (!PyArg_ParseTuple(args, "OO", &obj, &attr_name)) {
+ return NULL;
+ }
+ NULLABLE(obj);
+ NULLABLE(attr_name);
+ RETURN_INT(PyObject_HasAttrWithError(obj, attr_name));
+}
+
+static PyObject *
+object_hasattrstringwitherror(PyObject *self, PyObject *args)
+{
+ PyObject *obj;
+ const char *attr_name;
+ Py_ssize_t size;
+ if (!PyArg_ParseTuple(args, "Oz#", &obj, &attr_name, &size)) {
+ return NULL;
+ }
+ NULLABLE(obj);
+ RETURN_INT(PyObject_HasAttrStringWithError(obj, attr_name));
+}
+
static PyObject *
object_setattr(PyObject *self, PyObject *args)
{
@@ -207,7 +232,7 @@ mapping_getitemstring(PyObject *self, PyObject *args)
static PyObject *
mapping_getoptionalitem(PyObject *self, PyObject *args)
{
- PyObject *obj, *attr_name, *value;
+ PyObject *obj, *attr_name, *value = UNINITIALIZED_PTR;
if (!PyArg_ParseTuple(args, "OO", &obj, &attr_name)) {
return NULL;
}
@@ -232,7 +257,7 @@ mapping_getoptionalitem(PyObject *self, PyObject *args)
static PyObject *
mapping_getoptionalitemstring(PyObject *self, PyObject *args)
{
- PyObject *obj, *value;
+ PyObject *obj, *value = UNINITIALIZED_PTR;
const char *attr_name;
Py_ssize_t size;
if (!PyArg_ParseTuple(args, "Oz#", &obj, &attr_name, &size)) {
@@ -280,6 +305,31 @@ mapping_haskeystring(PyObject *self, PyObject *args)
return PyLong_FromLong(PyMapping_HasKeyString(mapping, key));
}
+static PyObject *
+mapping_haskeywitherror(PyObject *self, PyObject *args)
+{
+ PyObject *mapping, *key;
+ if (!PyArg_ParseTuple(args, "OO", &mapping, &key)) {
+ return NULL;
+ }
+ NULLABLE(mapping);
+ NULLABLE(key);
+ RETURN_INT(PyMapping_HasKeyWithError(mapping, key));
+}
+
+static PyObject *
+mapping_haskeystringwitherror(PyObject *self, PyObject *args)
+{
+ PyObject *mapping;
+ const char *key;
+ Py_ssize_t size;
+ if (!PyArg_ParseTuple(args, "Oz#", &mapping, &key, &size)) {
+ return NULL;
+ }
+ NULLABLE(mapping);
+ RETURN_INT(PyMapping_HasKeyStringWithError(mapping, key));
+}
+
static PyObject *
object_setitem(PyObject *self, PyObject *args)
{
@@ -568,6 +618,8 @@ static PyMethodDef test_methods[] = {
{"object_getoptionalattrstring", object_getoptionalattrstring, METH_VARARGS},
{"object_hasattr", object_hasattr, METH_VARARGS},
{"object_hasattrstring", object_hasattrstring, METH_VARARGS},
+ {"object_hasattrwitherror", object_hasattrwitherror, METH_VARARGS},
+ {"object_hasattrstringwitherror", object_hasattrstringwitherror, METH_VARARGS},
{"object_setattr", object_setattr, METH_VARARGS},
{"object_setattrstring", object_setattrstring, METH_VARARGS},
{"object_delattr", object_delattr, METH_VARARGS},
@@ -582,6 +634,8 @@ static PyMethodDef test_methods[] = {
{"mapping_getoptionalitemstring", mapping_getoptionalitemstring, METH_VARARGS},
{"mapping_haskey", mapping_haskey, METH_VARARGS},
{"mapping_haskeystring", mapping_haskeystring, METH_VARARGS},
+ {"mapping_haskeywitherror", mapping_haskeywitherror, METH_VARARGS},
+ {"mapping_haskeystringwitherror", mapping_haskeystringwitherror, METH_VARARGS},
{"object_setitem", object_setitem, METH_VARARGS},
{"mapping_setitemstring", mapping_setitemstring, METH_VARARGS},
{"object_delitem", object_delitem, METH_VARARGS},
diff --git a/Modules/_testcapi/clinic/exceptions.c.h b/Modules/_testcapi/clinic/exceptions.c.h
index 01881534329c9d..39b5f8b91a00db 100644
--- a/Modules/_testcapi/clinic/exceptions.c.h
+++ b/Modules/_testcapi/clinic/exceptions.c.h
@@ -394,38 +394,6 @@ PyDoc_STRVAR(_testcapi_set_exception__doc__,
#define _TESTCAPI_SET_EXCEPTION_METHODDEF \
{"set_exception", (PyCFunction)_testcapi_set_exception, METH_O, _testcapi_set_exception__doc__},
-PyDoc_STRVAR(_testcapi_write_unraisable_exc__doc__,
-"write_unraisable_exc($module, exception, err_msg, obj, /)\n"
-"--\n"
-"\n");
-
-#define _TESTCAPI_WRITE_UNRAISABLE_EXC_METHODDEF \
- {"write_unraisable_exc", _PyCFunction_CAST(_testcapi_write_unraisable_exc), METH_FASTCALL, _testcapi_write_unraisable_exc__doc__},
-
-static PyObject *
-_testcapi_write_unraisable_exc_impl(PyObject *module, PyObject *exc,
- PyObject *err_msg, PyObject *obj);
-
-static PyObject *
-_testcapi_write_unraisable_exc(PyObject *module, PyObject *const *args, Py_ssize_t nargs)
-{
- PyObject *return_value = NULL;
- PyObject *exc;
- PyObject *err_msg;
- PyObject *obj;
-
- if (!_PyArg_CheckPositional("write_unraisable_exc", nargs, 3, 3)) {
- goto exit;
- }
- exc = args[0];
- err_msg = args[1];
- obj = args[2];
- return_value = _testcapi_write_unraisable_exc_impl(module, exc, err_msg, obj);
-
-exit:
- return return_value;
-}
-
PyDoc_STRVAR(_testcapi_traceback_print__doc__,
"traceback_print($module, traceback, file, /)\n"
"--\n"
@@ -487,4 +455,4 @@ _testcapi_unstable_exc_prep_reraise_star(PyObject *module, PyObject *const *args
exit:
return return_value;
}
-/*[clinic end generated code: output=8f273949da28ffb5 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=ff19512450b3bbdb input=a9049054013a1b77]*/
diff --git a/Modules/_testcapi/clinic/long.c.h b/Modules/_testcapi/clinic/long.c.h
index b77cb51810cb65..e2f7042be12c48 100644
--- a/Modules/_testcapi/clinic/long.c.h
+++ b/Modules/_testcapi/clinic/long.c.h
@@ -133,23 +133,6 @@ _testcapi_test_long_as_double(PyObject *module, PyObject *Py_UNUSED(ignored))
return _testcapi_test_long_as_double_impl(module);
}
-PyDoc_STRVAR(_testcapi_test_long_numbits__doc__,
-"test_long_numbits($module, /)\n"
-"--\n"
-"\n");
-
-#define _TESTCAPI_TEST_LONG_NUMBITS_METHODDEF \
- {"test_long_numbits", (PyCFunction)_testcapi_test_long_numbits, METH_NOARGS, _testcapi_test_long_numbits__doc__},
-
-static PyObject *
-_testcapi_test_long_numbits_impl(PyObject *module);
-
-static PyObject *
-_testcapi_test_long_numbits(PyObject *module, PyObject *Py_UNUSED(ignored))
-{
- return _testcapi_test_long_numbits_impl(module);
-}
-
PyDoc_STRVAR(_testcapi_call_long_compact_api__doc__,
"call_long_compact_api($module, arg, /)\n"
"--\n"
@@ -165,4 +148,4 @@ PyDoc_STRVAR(_testcapi_PyLong_AsInt__doc__,
#define _TESTCAPI_PYLONG_ASINT_METHODDEF \
{"PyLong_AsInt", (PyCFunction)_testcapi_PyLong_AsInt, METH_O, _testcapi_PyLong_AsInt__doc__},
-/*[clinic end generated code: output=31267ab2dd90aa1d input=a9049054013a1b77]*/
+/*[clinic end generated code: output=de762870526e241d input=a9049054013a1b77]*/
diff --git a/Modules/_testcapi/code.c b/Modules/_testcapi/code.c
index 691dd5fe043811..c0193489b6f340 100644
--- a/Modules/_testcapi/code.c
+++ b/Modules/_testcapi/code.c
@@ -1,4 +1,5 @@
#include "parts.h"
+#include "util.h"
static Py_ssize_t
get_code_extra_index(PyInterpreterState* interp) {
@@ -75,7 +76,7 @@ test_code_extra(PyObject* self, PyObject *Py_UNUSED(callable))
}
// Check the value is initially NULL
- void *extra;
+ void *extra = UNINITIALIZED_PTR;
int res = PyUnstable_Code_GetExtra(test_func_code, code_extra_index, &extra);
if (res < 0) {
goto finally;
@@ -88,6 +89,7 @@ test_code_extra(PyObject* self, PyObject *Py_UNUSED(callable))
goto finally;
}
// Assert it was set correctly
+ extra = UNINITIALIZED_PTR;
res = PyUnstable_Code_GetExtra(test_func_code, code_extra_index, &extra);
if (res < 0) {
goto finally;
diff --git a/Modules/_testcapi/dict.c b/Modules/_testcapi/dict.c
index 6720f0437401ef..810989fbed85f9 100644
--- a/Modules/_testcapi/dict.c
+++ b/Modules/_testcapi/dict.c
@@ -139,7 +139,7 @@ dict_getitemwitherror(PyObject *self, PyObject *args)
static PyObject *
dict_getitemref(PyObject *self, PyObject *args)
{
- PyObject *obj, *attr_name, *value;
+ PyObject *obj, *attr_name, *value = UNINITIALIZED_PTR;
if (!PyArg_ParseTuple(args, "OO", &obj, &attr_name)) {
return NULL;
}
@@ -164,7 +164,7 @@ dict_getitemref(PyObject *self, PyObject *args)
static PyObject *
dict_getitemstringref(PyObject *self, PyObject *args)
{
- PyObject *obj, *value;
+ PyObject *obj, *value = UNINITIALIZED_PTR;
const char *attr_name;
Py_ssize_t size;
if (!PyArg_ParseTuple(args, "Oz#", &obj, &attr_name, &size)) {
@@ -276,7 +276,7 @@ dict_items(PyObject *self, PyObject *obj)
static PyObject *
dict_next(PyObject *self, PyObject *args)
{
- PyObject *mapping, *key, *value;
+ PyObject *mapping, *key = UNINITIALIZED_PTR, *value = UNINITIALIZED_PTR;
Py_ssize_t pos;
if (!PyArg_ParseTuple(args, "On", &mapping, &pos)) {
return NULL;
@@ -286,6 +286,8 @@ dict_next(PyObject *self, PyObject *args)
if (rc != 0) {
return Py_BuildValue("inOO", rc, pos, key, value);
}
+ assert(key == UNINITIALIZED_PTR);
+ assert(value == UNINITIALIZED_PTR);
if (PyErr_Occurred()) {
return NULL;
}
diff --git a/Modules/_testcapi/docstring.c b/Modules/_testcapi/docstring.c
index b680171cc1437a..d99fbdd904b594 100644
--- a/Modules/_testcapi/docstring.c
+++ b/Modules/_testcapi/docstring.c
@@ -100,6 +100,13 @@ static PyMethodDef test_methods[] = {
{"test_with_docstring",
test_with_docstring, METH_VARARGS,
PyDoc_STR("This is a pretty normal docstring.")},
+ {"func_with_unrepresentable_signature",
+ (PyCFunction)test_with_docstring, METH_VARARGS,
+ PyDoc_STR(
+ "func_with_unrepresentable_signature($module, /, a, b=)\n"
+ "--\n\n"
+ "This docstring has a signature with unrepresentable default."
+ )},
{NULL},
};
@@ -140,6 +147,40 @@ static PyTypeObject DocStringNoSignatureTest = {
.tp_new = PyType_GenericNew,
};
+static PyMethodDef DocStringUnrepresentableSignatureTest_methods[] = {
+ {"meth",
+ (PyCFunction)test_with_docstring, METH_VARARGS,
+ PyDoc_STR(
+ "meth($self, /, a, b=)\n"
+ "--\n\n"
+ "This docstring has a signature with unrepresentable default."
+ )},
+ {"classmeth",
+ (PyCFunction)test_with_docstring, METH_VARARGS|METH_CLASS,
+ PyDoc_STR(
+ "classmeth($type, /, a, b=)\n"
+ "--\n\n"
+ "This docstring has a signature with unrepresentable default."
+ )},
+ {"staticmeth",
+ (PyCFunction)test_with_docstring, METH_VARARGS|METH_STATIC,
+ PyDoc_STR(
+ "staticmeth(a, b=)\n"
+ "--\n\n"
+ "This docstring has a signature with unrepresentable default."
+ )},
+ {NULL},
+};
+
+static PyTypeObject DocStringUnrepresentableSignatureTest = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name = "_testcapi.DocStringUnrepresentableSignatureTest",
+ .tp_basicsize = sizeof(PyObject),
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_methods = DocStringUnrepresentableSignatureTest_methods,
+ .tp_new = PyType_GenericNew,
+};
+
int
_PyTestCapi_Init_Docstring(PyObject *mod)
{
@@ -149,5 +190,8 @@ _PyTestCapi_Init_Docstring(PyObject *mod)
if (PyModule_AddType(mod, &DocStringNoSignatureTest) < 0) {
return -1;
}
+ if (PyModule_AddType(mod, &DocStringUnrepresentableSignatureTest) < 0) {
+ return -1;
+ }
return 0;
}
diff --git a/Modules/_testcapi/exceptions.c b/Modules/_testcapi/exceptions.c
index 025b42db247e81..b54ce0cbb0dd20 100644
--- a/Modules/_testcapi/exceptions.c
+++ b/Modules/_testcapi/exceptions.c
@@ -120,12 +120,15 @@ _testcapi_exc_set_object_fetch_impl(PyObject *module, PyObject *exc,
PyObject *obj)
/*[clinic end generated code: output=7a5ff5f6d3cf687f input=77ec686f1f95fa38]*/
{
- PyObject *type;
- PyObject *value;
- PyObject *tb;
+ PyObject *type = UNINITIALIZED_PTR;
+ PyObject *value = UNINITIALIZED_PTR;
+ PyObject *tb = UNINITIALIZED_PTR;
PyErr_SetObject(exc, obj);
PyErr_Fetch(&type, &value, &tb);
+ assert(type != UNINITIALIZED_PTR);
+ assert(value != UNINITIALIZED_PTR);
+ assert(tb != UNINITIALIZED_PTR);
Py_XDECREF(type);
Py_XDECREF(tb);
return value;
@@ -244,7 +247,7 @@ _testcapi_set_exc_info_impl(PyObject *module, PyObject *new_type,
PyObject *new_value, PyObject *new_tb)
/*[clinic end generated code: output=b55fa35dec31300e input=ea9f19e0f55fe5b3]*/
{
- PyObject *type, *value, *tb;
+ PyObject *type = UNINITIALIZED_PTR, *value = UNINITIALIZED_PTR, *tb = UNINITIALIZED_PTR;
PyErr_GetExcInfo(&type, &value, &tb);
Py_INCREF(new_type);
@@ -278,36 +281,6 @@ _testcapi_set_exception(PyObject *module, PyObject *new_exc)
return exc;
}
-/*[clinic input]
-_testcapi.write_unraisable_exc
- exception as exc: object
- err_msg: object
- obj: object
- /
-[clinic start generated code]*/
-
-static PyObject *
-_testcapi_write_unraisable_exc_impl(PyObject *module, PyObject *exc,
- PyObject *err_msg, PyObject *obj)
-/*[clinic end generated code: output=39827c5e0a8c2092 input=582498da5b2ee6cf]*/
-{
-
- const char *err_msg_utf8;
- if (err_msg != Py_None) {
- err_msg_utf8 = PyUnicode_AsUTF8(err_msg);
- if (err_msg_utf8 == NULL) {
- return NULL;
- }
- }
- else {
- err_msg_utf8 = NULL;
- }
-
- PyErr_SetObject((PyObject *)Py_TYPE(exc), exc);
- _PyErr_WriteUnraisableMsg(err_msg_utf8, obj);
- Py_RETURN_NONE;
-}
-
/*[clinic input]
_testcapi.traceback_print
traceback: object
@@ -384,7 +357,6 @@ static PyMethodDef test_methods[] = {
_TESTCAPI_SET_EXC_INFO_METHODDEF
_TESTCAPI_SET_EXCEPTION_METHODDEF
_TESTCAPI_TRACEBACK_PRINT_METHODDEF
- _TESTCAPI_WRITE_UNRAISABLE_EXC_METHODDEF
_TESTCAPI_UNSTABLE_EXC_PREP_RERAISE_STAR_METHODDEF
{NULL},
};
diff --git a/Modules/_testcapi/getargs.c b/Modules/_testcapi/getargs.c
index 10a1c1dd05253d..5f4a6dc8ca7672 100644
--- a/Modules/_testcapi/getargs.c
+++ b/Modules/_testcapi/getargs.c
@@ -589,54 +589,6 @@ getargs_y_hash(PyObject *self, PyObject *args)
return PyBytes_FromStringAndSize(str, size);
}
-static PyObject *
-getargs_u(PyObject *self, PyObject *args)
-{
- wchar_t *str;
- if (!PyArg_ParseTuple(args, "u", &str)) {
- return NULL;
- }
- return PyUnicode_FromWideChar(str, -1);
-}
-
-static PyObject *
-getargs_u_hash(PyObject *self, PyObject *args)
-{
- wchar_t *str;
- Py_ssize_t size;
- if (!PyArg_ParseTuple(args, "u#", &str, &size)) {
- return NULL;
- }
- return PyUnicode_FromWideChar(str, size);
-}
-
-static PyObject *
-getargs_Z(PyObject *self, PyObject *args)
-{
- wchar_t *str;
- if (!PyArg_ParseTuple(args, "Z", &str)) {
- return NULL;
- }
- if (str != NULL) {
- return PyUnicode_FromWideChar(str, -1);
- }
- Py_RETURN_NONE;
-}
-
-static PyObject *
-getargs_Z_hash(PyObject *self, PyObject *args)
-{
- wchar_t *str;
- Py_ssize_t size;
- if (!PyArg_ParseTuple(args, "Z#", &str, &size)) {
- return NULL;
- }
- if (str != NULL) {
- return PyUnicode_FromWideChar(str, size);
- }
- Py_RETURN_NONE;
-}
-
static PyObject *
getargs_es(PyObject *self, PyObject *args)
{
@@ -845,8 +797,6 @@ static PyMethodDef test_methods[] = {
{"getargs_S", getargs_S, METH_VARARGS},
{"getargs_U", getargs_U, METH_VARARGS},
{"getargs_Y", getargs_Y, METH_VARARGS},
- {"getargs_Z", getargs_Z, METH_VARARGS},
- {"getargs_Z_hash", getargs_Z_hash, METH_VARARGS},
{"getargs_b", getargs_b, METH_VARARGS},
{"getargs_c", getargs_c, METH_VARARGS},
{"getargs_d", getargs_d, METH_VARARGS},
@@ -868,8 +818,6 @@ static PyMethodDef test_methods[] = {
{"getargs_s_hash", getargs_s_hash, METH_VARARGS},
{"getargs_s_star", getargs_s_star, METH_VARARGS},
{"getargs_tuple", getargs_tuple, METH_VARARGS},
- {"getargs_u", getargs_u, METH_VARARGS},
- {"getargs_u_hash", getargs_u_hash, METH_VARARGS},
{"getargs_w_star", getargs_w_star, METH_VARARGS},
{"getargs_y", getargs_y, METH_VARARGS},
{"getargs_y_hash", getargs_y_hash, METH_VARARGS},
diff --git a/Modules/_testcapi/heaptype_relative.c b/Modules/_testcapi/heaptype_relative.c
index c247ca33b33708..53dd01d1ed4f80 100644
--- a/Modules/_testcapi/heaptype_relative.c
+++ b/Modules/_testcapi/heaptype_relative.c
@@ -3,8 +3,6 @@
#include // max_align_t
#include