add python module
This commit is contained in:
parent
fd15c989d2
commit
68711e53c4
@ -84,13 +84,14 @@ SET(
|
||||
|
||||
INCLUDE_DIRECTORIES(
|
||||
${PROJECT_BINARY_DIR}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${Thirdparty}/spdlog/include
|
||||
${Thirdparty}/breakpad/include
|
||||
${Thirdparty}/3rdParty_x64/include
|
||||
${Thirdparty}/OpenSceneGraph-3.6.5/include
|
||||
${Thirdparty}/osgOcean/include
|
||||
${Thirdparty}/matlab/include
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${Thirdparty}/Python39/include
|
||||
)
|
||||
|
||||
LINK_DIRECTORIES(
|
||||
@ -99,6 +100,7 @@ INCLUDE_DIRECTORIES(
|
||||
${Thirdparty}/OpenSceneGraph-3.6.5/lib
|
||||
${Thirdparty}/osgOcean/lib
|
||||
${Thirdparty}/matlab/lib/win64/microsoft
|
||||
${Thirdparty}/Python39/libs
|
||||
)
|
||||
|
||||
if(MSVC)
|
||||
@ -167,6 +169,7 @@ target_link_libraries(
|
||||
osgSim
|
||||
libMatlabDataArray
|
||||
libMatlabEngine
|
||||
python39
|
||||
)
|
||||
|
||||
if(${QT_VERSION_MAJOR} LESS 6)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "viewer/OsgViewer.h"
|
||||
#include "scene/MeshManager.h"
|
||||
#include "network/NetDriver.h"
|
||||
#include "python/PythonModule.h"
|
||||
|
||||
|
||||
Application::Application(int& argc, char** argv, int /*= ApplicationFlags*/)
|
||||
@ -30,9 +31,11 @@ void Application::Init() {
|
||||
Singleton<EntitiesManager>::Create(this);
|
||||
Singleton<WorkSpaceManager>::Create(this);
|
||||
Singleton<NetDriver>::Create(this);
|
||||
Singleton<PythonModule>::Create(this);
|
||||
}
|
||||
|
||||
void Application::Uninit() {
|
||||
Singleton<PythonModule>::Destory();
|
||||
Singleton<NetDriver>::Destory();
|
||||
Singleton<WorkSpaceManager>::Destory();
|
||||
Singleton<EntitiesManager>::Destory();
|
||||
|
73
Source/src/python/PythonModule.cpp
Normal file
73
Source/src/python/PythonModule.cpp
Normal file
@ -0,0 +1,73 @@
|
||||
#include "python/PythonModule.h"
|
||||
|
||||
#include <assert.h>
|
||||
#undef slots
|
||||
#include <Python.h>
|
||||
#define slots Q_SLOTS
|
||||
|
||||
#include "app/Application.h"
|
||||
#include "common/SpdLogger.h"
|
||||
|
||||
|
||||
template<> PythonModule* Singleton<PythonModule>::instance_ = nullptr;
|
||||
|
||||
PythonModule::PythonModule(QObject* parent)
|
||||
: QObject(parent) {
|
||||
init_ = InitEnv();
|
||||
assert(init_);
|
||||
|
||||
QString appDir = QString("%1/test.py").arg(Application::applicationDirPath());
|
||||
CallFunction(appDir, "test");
|
||||
}
|
||||
|
||||
PythonModule::~PythonModule() {
|
||||
|
||||
}
|
||||
|
||||
void PythonModule::OnDestory() {
|
||||
if (init_) {
|
||||
Py_Finalize();
|
||||
}
|
||||
}
|
||||
|
||||
bool PythonModule::CallFunction(const QString& py, const QString& name) {
|
||||
if (!init_) {
|
||||
LOG_WARN("even not init");
|
||||
return false;
|
||||
}
|
||||
PyRun_SimpleString("import sys");
|
||||
PyRun_SimpleString("sys.path.append('./')");
|
||||
std::string pyPackagesPath = QString("sys.path.append('%1/site-packages')").arg(QCoreApplication::applicationDirPath()).toStdString();
|
||||
PyRun_SimpleString(pyPackagesPath.c_str());
|
||||
|
||||
PyObject* module = PyImport_ImportModule(py.toStdString().c_str());
|
||||
if (nullptr == module) {
|
||||
LOG_WARN("PyImport_ImportModule faile: {}", py.toStdString());
|
||||
return false;
|
||||
}
|
||||
PyObject* func = PyObject_GetAttrString(module, name.toStdString().c_str());
|
||||
if (nullptr == func) {
|
||||
LOG_WARN("PyObject_GetAttrString faile: {}", name.toStdString());
|
||||
return false;
|
||||
}
|
||||
PyObject_CallFunction(func, NULL);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PythonModule::InitEnv() {
|
||||
QString appDir = QString("%1").arg(Application::applicationDirPath());
|
||||
std::wstring path = appDir.toStdWString();
|
||||
Py_SetPythonHome(path.c_str());
|
||||
|
||||
Py_Initialize();
|
||||
|
||||
PyRun_SimpleString("import sys");
|
||||
PyRun_SimpleString("sys.path.append('./')");
|
||||
|
||||
if (!Py_IsInitialized()) {
|
||||
LOG_ERROR("init python");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
29
Source/src/python/PythonModule.h
Normal file
29
Source/src/python/PythonModule.h
Normal file
@ -0,0 +1,29 @@
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include <QObject>
|
||||
|
||||
#include "app/Singleton.h"
|
||||
|
||||
|
||||
class Entity;
|
||||
|
||||
class PythonModule : public QObject, public Singleton<PythonModule> {
|
||||
Q_OBJECT
|
||||
|
||||
public:
|
||||
explicit PythonModule(QObject* parent = nullptr);
|
||||
~PythonModule();
|
||||
void OnDestory();
|
||||
|
||||
bool CallFunction(const QString& py, const QString& name);
|
||||
|
||||
private:
|
||||
bool InitEnv();
|
||||
|
||||
private:
|
||||
bool init_{ false };
|
||||
|
||||
|
||||
};
|
@ -388,57 +388,57 @@
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="69"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="70"/>
|
||||
<source>model elements</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="75"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="76"/>
|
||||
<source>attribte</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="91"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="92"/>
|
||||
<source>Wave Curve</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="101"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="102"/>
|
||||
<source>Speed Curve</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="111"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="112"/>
|
||||
<source>3D Curve</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="139"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="140"/>
|
||||
<source>Report Table</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="165"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="166"/>
|
||||
<source>Report</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="176"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="177"/>
|
||||
<source>Signal Indicator Lamp</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="185"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="186"/>
|
||||
<source>ParamSetting</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="190"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="191"/>
|
||||
<source>name: 5year 0412</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/MainWindow.cpp" line="191"/>
|
||||
<location filename="../ui/MainWindow.cpp" line="192"/>
|
||||
<source>start: no start</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
@ -472,57 +472,57 @@
|
||||
<context>
|
||||
<name>ModelTreeWidget</name>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="110"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="96"/>
|
||||
<source>Release Track</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="127"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="113"/>
|
||||
<source>Add boke Entity</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="134"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="120"/>
|
||||
<source>Add lsjhqt Entity</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="141"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="127"/>
|
||||
<source>Add nimizi Entity</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="148"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="134"/>
|
||||
<source>Add tkdlj Entity</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="155"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="141"/>
|
||||
<source>Add jiaofan Entity</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="162"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="148"/>
|
||||
<source>Add satellite Entity</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="264"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="250"/>
|
||||
<source>Track</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="272"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="258"/>
|
||||
<source>Add Mesh Component</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="276"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="262"/>
|
||||
<source>Add Path Component</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="283"/>
|
||||
<location filename="../ui/ModelBrowser/ModelTreeWidget.cpp" line="269"/>
|
||||
<source>Delete</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
@ -761,24 +761,34 @@
|
||||
<context>
|
||||
<name>QtConeWaveComponentManager</name>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8326"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8335"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8347"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8356"/>
|
||||
<source>ConeWaveComponent</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8414"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8437"/>
|
||||
<source>Height</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8421"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8444"/>
|
||||
<source>Radius</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8428"/>
|
||||
<source>Color</source>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8451"/>
|
||||
<source>Color1</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8458"/>
|
||||
<source>Color2</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8465"/>
|
||||
<source>Color3</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
</context>
|
||||
@ -883,28 +893,28 @@
|
||||
<context>
|
||||
<name>QtDashedLineComponentManager</name>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8585"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8594"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8636"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8645"/>
|
||||
<source>DashedLineComponent</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8663"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8714"/>
|
||||
<source>Start</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8670"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8721"/>
|
||||
<source>End</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8677"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8728"/>
|
||||
<source>Radius</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
<message>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8684"/>
|
||||
<location filename="../ui/PropertyBrowser/qtpropertymanager.cpp" line="8735"/>
|
||||
<source>Color</source>
|
||||
<translation type="unfinished"></translation>
|
||||
</message>
|
||||
|
@ -192,12 +192,12 @@ void MainWindow::InitUI() {
|
||||
ui->status->setText(tr("start: no start"));
|
||||
|
||||
InitDockLayout();
|
||||
#if 0
|
||||
|
||||
if (0)
|
||||
{
|
||||
MatlabObject* mtlb = new MatlabObject;
|
||||
mtlb->RunMatlabFile("D:\\DYT\\TestGUI\\TestGUI\\LDPlatformTest.m");
|
||||
}
|
||||
MatlabObject* mtlb = new MatlabObject;
|
||||
mtlb->RunMatlabFile("D:\\DYT\\TestGUI\\TestGUI\\LDPlatformTest.m");
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
void MainWindow::InitDockLayout()
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include "UDPRecData.h"
|
||||
#include "UDPRecData.h"
|
||||
|
||||
#include <QVariant>;
|
||||
#include <QUdpSocket>
|
||||
|
BIN
Tool/Python39/DLLs/_asyncio.pyd
Normal file
BIN
Tool/Python39/DLLs/_asyncio.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_bz2.pyd
Normal file
BIN
Tool/Python39/DLLs/_bz2.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_ctypes.pyd
Normal file
BIN
Tool/Python39/DLLs/_ctypes.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_ctypes_test.pyd
Normal file
BIN
Tool/Python39/DLLs/_ctypes_test.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_decimal.pyd
Normal file
BIN
Tool/Python39/DLLs/_decimal.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_elementtree.pyd
Normal file
BIN
Tool/Python39/DLLs/_elementtree.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_hashlib.pyd
Normal file
BIN
Tool/Python39/DLLs/_hashlib.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_lzma.pyd
Normal file
BIN
Tool/Python39/DLLs/_lzma.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_msi.pyd
Normal file
BIN
Tool/Python39/DLLs/_msi.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_multiprocessing.pyd
Normal file
BIN
Tool/Python39/DLLs/_multiprocessing.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_overlapped.pyd
Normal file
BIN
Tool/Python39/DLLs/_overlapped.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_queue.pyd
Normal file
BIN
Tool/Python39/DLLs/_queue.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_socket.pyd
Normal file
BIN
Tool/Python39/DLLs/_socket.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_sqlite3.pyd
Normal file
BIN
Tool/Python39/DLLs/_sqlite3.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_ssl.pyd
Normal file
BIN
Tool/Python39/DLLs/_ssl.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_testbuffer.pyd
Normal file
BIN
Tool/Python39/DLLs/_testbuffer.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_testcapi.pyd
Normal file
BIN
Tool/Python39/DLLs/_testcapi.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_testconsole.pyd
Normal file
BIN
Tool/Python39/DLLs/_testconsole.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_testimportmultiple.pyd
Normal file
BIN
Tool/Python39/DLLs/_testimportmultiple.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_testinternalcapi.pyd
Normal file
BIN
Tool/Python39/DLLs/_testinternalcapi.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_testmultiphase.pyd
Normal file
BIN
Tool/Python39/DLLs/_testmultiphase.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_tkinter.pyd
Normal file
BIN
Tool/Python39/DLLs/_tkinter.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_uuid.pyd
Normal file
BIN
Tool/Python39/DLLs/_uuid.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/_zoneinfo.pyd
Normal file
BIN
Tool/Python39/DLLs/_zoneinfo.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/libcrypto-1_1.dll
Normal file
BIN
Tool/Python39/DLLs/libcrypto-1_1.dll
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/libffi-7.dll
Normal file
BIN
Tool/Python39/DLLs/libffi-7.dll
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/libssl-1_1.dll
Normal file
BIN
Tool/Python39/DLLs/libssl-1_1.dll
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/py.ico
Normal file
BIN
Tool/Python39/DLLs/py.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 74 KiB |
BIN
Tool/Python39/DLLs/pyc.ico
Normal file
BIN
Tool/Python39/DLLs/pyc.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 77 KiB |
BIN
Tool/Python39/DLLs/pyd.ico
Normal file
BIN
Tool/Python39/DLLs/pyd.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 81 KiB |
BIN
Tool/Python39/DLLs/pyexpat.pyd
Normal file
BIN
Tool/Python39/DLLs/pyexpat.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/python_lib.cat
Normal file
BIN
Tool/Python39/DLLs/python_lib.cat
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/python_tools.cat
Normal file
BIN
Tool/Python39/DLLs/python_tools.cat
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/select.pyd
Normal file
BIN
Tool/Python39/DLLs/select.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/sqlite3.dll
Normal file
BIN
Tool/Python39/DLLs/sqlite3.dll
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/tcl86t.dll
Normal file
BIN
Tool/Python39/DLLs/tcl86t.dll
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/tk86t.dll
Normal file
BIN
Tool/Python39/DLLs/tk86t.dll
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/unicodedata.pyd
Normal file
BIN
Tool/Python39/DLLs/unicodedata.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/DLLs/winsound.pyd
Normal file
BIN
Tool/Python39/DLLs/winsound.pyd
Normal file
Binary file not shown.
BIN
Tool/Python39/Doc/python3910.chm
Normal file
BIN
Tool/Python39/Doc/python3910.chm
Normal file
Binary file not shown.
650
Tool/Python39/LICENSE.txt
Normal file
650
Tool/Python39/LICENSE.txt
Normal file
@ -0,0 +1,650 @@
|
||||
A. HISTORY OF THE SOFTWARE
|
||||
==========================
|
||||
|
||||
Python was created in the early 1990s by Guido van Rossum at Stichting
|
||||
Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
|
||||
as a successor of a language called ABC. Guido remains Python's
|
||||
principal author, although it includes many contributions from others.
|
||||
|
||||
In 1995, Guido continued his work on Python at the Corporation for
|
||||
National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
|
||||
in Reston, Virginia where he released several versions of the
|
||||
software.
|
||||
|
||||
In May 2000, Guido and the Python core development team moved to
|
||||
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
|
||||
year, the PythonLabs team moved to Digital Creations, which became
|
||||
Zope Corporation. In 2001, the Python Software Foundation (PSF, see
|
||||
https://www.python.org/psf/) was formed, a non-profit organization
|
||||
created specifically to own Python-related Intellectual Property.
|
||||
Zope Corporation was a sponsoring member of the PSF.
|
||||
|
||||
All Python releases are Open Source (see http://www.opensource.org for
|
||||
the Open Source Definition). Historically, most, but not all, Python
|
||||
releases have also been GPL-compatible; the table below summarizes
|
||||
the various releases.
|
||||
|
||||
Release Derived Year Owner GPL-
|
||||
from compatible? (1)
|
||||
|
||||
0.9.0 thru 1.2 1991-1995 CWI yes
|
||||
1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
|
||||
1.6 1.5.2 2000 CNRI no
|
||||
2.0 1.6 2000 BeOpen.com no
|
||||
1.6.1 1.6 2001 CNRI yes (2)
|
||||
2.1 2.0+1.6.1 2001 PSF no
|
||||
2.0.1 2.0+1.6.1 2001 PSF yes
|
||||
2.1.1 2.1+2.0.1 2001 PSF yes
|
||||
2.1.2 2.1.1 2002 PSF yes
|
||||
2.1.3 2.1.2 2002 PSF yes
|
||||
2.2 and above 2.1.1 2001-now PSF yes
|
||||
|
||||
Footnotes:
|
||||
|
||||
(1) GPL-compatible doesn't mean that we're distributing Python under
|
||||
the GPL. All Python licenses, unlike the GPL, let you distribute
|
||||
a modified version without making your changes open source. The
|
||||
GPL-compatible licenses make it possible to combine Python with
|
||||
other software that is released under the GPL; the others don't.
|
||||
|
||||
(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
|
||||
because its license has a choice of law clause. According to
|
||||
CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
|
||||
is "not incompatible" with the GPL.
|
||||
|
||||
Thanks to the many outside volunteers who have worked under Guido's
|
||||
direction to make these releases possible.
|
||||
|
||||
|
||||
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
|
||||
===============================================================
|
||||
|
||||
Python software and documentation are licensed under the
|
||||
Python Software Foundation License Version 2.
|
||||
|
||||
Starting with Python 3.8.6, examples, recipes, and other code in
|
||||
the documentation are dual licensed under the PSF License Version 2
|
||||
and the Zero-Clause BSD license.
|
||||
|
||||
Some software incorporated into Python is under different licenses.
|
||||
The licenses are listed with code falling under that license.
|
||||
|
||||
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;
|
||||
All Rights Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
|
||||
-------------------------------------------
|
||||
|
||||
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
|
||||
|
||||
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
|
||||
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
|
||||
Individual or Organization ("Licensee") accessing and otherwise using
|
||||
this software in source or binary form and its associated
|
||||
documentation ("the Software").
|
||||
|
||||
2. Subject to the terms and conditions of this BeOpen Python License
|
||||
Agreement, BeOpen hereby grants Licensee a non-exclusive,
|
||||
royalty-free, world-wide license to reproduce, analyze, test, perform
|
||||
and/or display publicly, prepare derivative works, distribute, and
|
||||
otherwise use the Software alone or in any derivative version,
|
||||
provided, however, that the BeOpen Python License is retained in the
|
||||
Software, alone or in any derivative version prepared by Licensee.
|
||||
|
||||
3. BeOpen is making the Software available to Licensee on an "AS IS"
|
||||
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
|
||||
SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
|
||||
AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
|
||||
DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
5. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
6. This License Agreement shall be governed by and interpreted in all
|
||||
respects by the law of the State of California, excluding conflict of
|
||||
law provisions. Nothing in this License Agreement shall be deemed to
|
||||
create any relationship of agency, partnership, or joint venture
|
||||
between BeOpen and Licensee. This License Agreement does not grant
|
||||
permission to use BeOpen trademarks or trade names in a trademark
|
||||
sense to endorse or promote products or services of Licensee, or any
|
||||
third party. As an exception, the "BeOpen Python" logos available at
|
||||
http://www.pythonlabs.com/logos.html may be used according to the
|
||||
permissions granted on that web page.
|
||||
|
||||
7. By copying, installing or otherwise using the software, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
|
||||
|
||||
CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
|
||||
---------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Corporation for National
|
||||
Research Initiatives, having an office at 1895 Preston White Drive,
|
||||
Reston, VA 20191 ("CNRI"), and the Individual or Organization
|
||||
("Licensee") accessing and otherwise using Python 1.6.1 software in
|
||||
source or binary form and its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, CNRI
|
||||
hereby grants Licensee a nonexclusive, royalty-free, world-wide
|
||||
license to reproduce, analyze, test, perform and/or display publicly,
|
||||
prepare derivative works, distribute, and otherwise use Python 1.6.1
|
||||
alone or in any derivative version, provided, however, that CNRI's
|
||||
License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
|
||||
1995-2001 Corporation for National Research Initiatives; All Rights
|
||||
Reserved" are retained in Python 1.6.1 alone or in any derivative
|
||||
version prepared by Licensee. Alternately, in lieu of CNRI's License
|
||||
Agreement, Licensee may substitute the following text (omitting the
|
||||
quotes): "Python 1.6.1 is made available subject to the terms and
|
||||
conditions in CNRI's License Agreement. This Agreement together with
|
||||
Python 1.6.1 may be located on the Internet using the following
|
||||
unique, persistent identifier (known as a handle): 1895.22/1013. This
|
||||
Agreement may also be obtained from a proxy server on the Internet
|
||||
using the following URL: http://hdl.handle.net/1895.22/1013".
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python 1.6.1 or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python 1.6.1.
|
||||
|
||||
4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
|
||||
basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. This License Agreement shall be governed by the federal
|
||||
intellectual property law of the United States, including without
|
||||
limitation the federal copyright law, and, to the extent such
|
||||
U.S. federal law does not apply, by the law of the Commonwealth of
|
||||
Virginia, excluding Virginia's conflict of law provisions.
|
||||
Notwithstanding the foregoing, with regard to derivative works based
|
||||
on Python 1.6.1 that incorporate non-separable material that was
|
||||
previously distributed under the GNU General Public License (GPL), the
|
||||
law of the Commonwealth of Virginia shall govern this License
|
||||
Agreement only as to issues arising under or with respect to
|
||||
Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
|
||||
License Agreement shall be deemed to create any relationship of
|
||||
agency, partnership, or joint venture between CNRI and Licensee. This
|
||||
License Agreement does not grant permission to use CNRI trademarks or
|
||||
trade name in a trademark sense to endorse or promote products or
|
||||
services of Licensee, or any third party.
|
||||
|
||||
8. By clicking on the "ACCEPT" button where indicated, or by copying,
|
||||
installing or otherwise using Python 1.6.1, Licensee agrees to be
|
||||
bound by the terms and conditions of this License Agreement.
|
||||
|
||||
ACCEPT
|
||||
|
||||
|
||||
CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
|
||||
--------------------------------------------------
|
||||
|
||||
Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
|
||||
The Netherlands. All rights reserved.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software and its
|
||||
documentation for any purpose and without fee is hereby granted,
|
||||
provided that the above copyright notice appear in all copies and that
|
||||
both that copyright notice and this permission notice appear in
|
||||
supporting documentation, and that the name of Stichting Mathematisch
|
||||
Centrum or CWI not be used in advertising or publicity pertaining to
|
||||
distribution of the software without specific, written prior
|
||||
permission.
|
||||
|
||||
STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
|
||||
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
|
||||
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
||||
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
|
||||
|
||||
Additional Conditions for this Windows binary build
|
||||
---------------------------------------------------
|
||||
|
||||
This program is linked with and uses Microsoft Distributable Code,
|
||||
copyrighted by Microsoft Corporation. The Microsoft Distributable Code
|
||||
is embedded in each .exe, .dll and .pyd file as a result of running
|
||||
the code through a linker.
|
||||
|
||||
If you further distribute programs that include the Microsoft
|
||||
Distributable Code, you must comply with the restrictions on
|
||||
distribution specified by Microsoft. In particular, you must require
|
||||
distributors and external end users to agree to terms that protect the
|
||||
Microsoft Distributable Code at least as much as Microsoft's own
|
||||
requirements for the Distributable Code. See Microsoft's documentation
|
||||
(included in its developer tools and on its website at microsoft.com)
|
||||
for specific details.
|
||||
|
||||
Redistribution of the Windows binary build of the Python interpreter
|
||||
complies with this agreement, provided that you do not:
|
||||
|
||||
- alter any copyright, trademark or patent notice in Microsoft's
|
||||
Distributable Code;
|
||||
|
||||
- use Microsoft's trademarks in your programs' names or in a way that
|
||||
suggests your programs come from or are endorsed by Microsoft;
|
||||
|
||||
- distribute Microsoft's Distributable Code to run on a platform other
|
||||
than Microsoft operating systems, run-time technologies or application
|
||||
platforms; or
|
||||
|
||||
- include Microsoft Distributable Code in malicious, deceptive or
|
||||
unlawful programs.
|
||||
|
||||
These restrictions apply only to the Microsoft Distributable Code as
|
||||
defined above, not to Python itself or any programs running on the
|
||||
Python interpreter. The redistribution of the Python interpreter and
|
||||
libraries is governed by the Python Software License included with this
|
||||
file, or by other licenses as marked.
|
||||
|
||||
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
|
||||
This program, "bzip2", the associated library "libbzip2", and all
|
||||
documentation, are copyright (C) 1996-2010 Julian R Seward. All
|
||||
rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. The origin of this software must not be misrepresented; you must
|
||||
not claim that you wrote the original software. If you use this
|
||||
software in a product, an acknowledgment in the product
|
||||
documentation would be appreciated but is not required.
|
||||
|
||||
3. Altered source versions must be plainly marked as such, and must
|
||||
not be misrepresented as being the original software.
|
||||
|
||||
4. The name of the author may not be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
||||
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Julian Seward, jseward@bzip.org
|
||||
bzip2/libbzip2 version 1.0.6 of 6 September 2010
|
||||
|
||||
--------------------------------------------------------------------------
|
||||
|
||||
|
||||
LICENSE ISSUES
|
||||
==============
|
||||
|
||||
The OpenSSL toolkit stays under a double license, i.e. both the conditions of
|
||||
the OpenSSL License and the original SSLeay license apply to the toolkit.
|
||||
See below for the actual license texts.
|
||||
|
||||
OpenSSL License
|
||||
---------------
|
||||
|
||||
/* ====================================================================
|
||||
* Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* 3. All advertising materials mentioning features or use of this
|
||||
* software must display the following acknowledgment:
|
||||
* "This product includes software developed by the OpenSSL Project
|
||||
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
|
||||
*
|
||||
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
|
||||
* endorse or promote products derived from this software without
|
||||
* prior written permission. For written permission, please contact
|
||||
* openssl-core@openssl.org.
|
||||
*
|
||||
* 5. Products derived from this software may not be called "OpenSSL"
|
||||
* nor may "OpenSSL" appear in their names without prior written
|
||||
* permission of the OpenSSL Project.
|
||||
*
|
||||
* 6. Redistributions of any form whatsoever must retain the following
|
||||
* acknowledgment:
|
||||
* "This product includes software developed by the OpenSSL Project
|
||||
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
|
||||
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
|
||||
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* ====================================================================
|
||||
*
|
||||
* This product includes cryptographic software written by Eric Young
|
||||
* (eay@cryptsoft.com). This product includes software written by Tim
|
||||
* Hudson (tjh@cryptsoft.com).
|
||||
*
|
||||
*/
|
||||
|
||||
Original SSLeay License
|
||||
-----------------------
|
||||
|
||||
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
|
||||
* All rights reserved.
|
||||
*
|
||||
* This package is an SSL implementation written
|
||||
* by Eric Young (eay@cryptsoft.com).
|
||||
* The implementation was written so as to conform with Netscapes SSL.
|
||||
*
|
||||
* This library is free for commercial and non-commercial use as long as
|
||||
* the following conditions are aheared to. The following conditions
|
||||
* apply to all code found in this distribution, be it the RC4, RSA,
|
||||
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
|
||||
* included with this distribution is covered by the same copyright terms
|
||||
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
|
||||
*
|
||||
* Copyright remains Eric Young's, and as such any Copyright notices in
|
||||
* the code are not to be removed.
|
||||
* If this package is used in a product, Eric Young should be given attribution
|
||||
* as the author of the parts of the library used.
|
||||
* This can be in the form of a textual message at program startup or
|
||||
* in documentation (online or textual) provided with the package.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* "This product includes cryptographic software written by
|
||||
* Eric Young (eay@cryptsoft.com)"
|
||||
* The word 'cryptographic' can be left out if the rouines from the library
|
||||
* being used are not cryptographic related :-).
|
||||
* 4. If you include any Windows specific code (or a derivative thereof) from
|
||||
* the apps directory (application code) you must include an acknowledgement:
|
||||
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* The licence and distribution terms for any publically available version or
|
||||
* derivative of this code cannot be changed. i.e. this code cannot simply be
|
||||
* copied and put under another distribution licence
|
||||
* [including the GNU Public Licence.]
|
||||
*/
|
||||
|
||||
|
||||
libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others.
|
||||
See source files for details.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
``Software''), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
This software is copyrighted by the Regents of the University of
|
||||
California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState
|
||||
Corporation and other parties. The following terms apply to all files
|
||||
associated with the software unless explicitly disclaimed in
|
||||
individual files.
|
||||
|
||||
The authors hereby grant permission to use, copy, modify, distribute,
|
||||
and license this software and its documentation for any purpose, provided
|
||||
that existing copyright notices are retained in all copies and that this
|
||||
notice is included verbatim in any distributions. No written agreement,
|
||||
license, or royalty fee is required for any of the authorized uses.
|
||||
Modifications to this software may be copyrighted by their authors
|
||||
and need not follow the licensing terms described here, provided that
|
||||
the new terms are clearly indicated on the first page of each file where
|
||||
they apply.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
|
||||
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
|
||||
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
|
||||
IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
|
||||
NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
|
||||
MODIFICATIONS.
|
||||
|
||||
GOVERNMENT USE: If you are acquiring this software on behalf of the
|
||||
U.S. government, the Government shall have only "Restricted Rights"
|
||||
in the software and related documentation as defined in the Federal
|
||||
Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
|
||||
are acquiring the software on behalf of the Department of Defense, the
|
||||
software shall be classified as "Commercial Computer Software" and the
|
||||
Government shall have only "Restricted Rights" as defined in Clause
|
||||
252.227-7014 (b) (3) of DFARs. Notwithstanding the foregoing, the
|
||||
authors grant the U.S. Government and others acting in its behalf
|
||||
permission to use and distribute the software in accordance with the
|
||||
terms specified in this license.
|
||||
|
||||
This software is copyrighted by the Regents of the University of
|
||||
California, Sun Microsystems, Inc., Scriptics Corporation, ActiveState
|
||||
Corporation, Apple Inc. and other parties. The following terms apply to
|
||||
all files associated with the software unless explicitly disclaimed in
|
||||
individual files.
|
||||
|
||||
The authors hereby grant permission to use, copy, modify, distribute,
|
||||
and license this software and its documentation for any purpose, provided
|
||||
that existing copyright notices are retained in all copies and that this
|
||||
notice is included verbatim in any distributions. No written agreement,
|
||||
license, or royalty fee is required for any of the authorized uses.
|
||||
Modifications to this software may be copyrighted by their authors
|
||||
and need not follow the licensing terms described here, provided that
|
||||
the new terms are clearly indicated on the first page of each file where
|
||||
they apply.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
|
||||
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
|
||||
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
|
||||
IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
|
||||
NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
|
||||
MODIFICATIONS.
|
||||
|
||||
GOVERNMENT USE: If you are acquiring this software on behalf of the
|
||||
U.S. government, the Government shall have only "Restricted Rights"
|
||||
in the software and related documentation as defined in the Federal
|
||||
Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
|
||||
are acquiring the software on behalf of the Department of Defense, the
|
||||
software shall be classified as "Commercial Computer Software" and the
|
||||
Government shall have only "Restricted Rights" as defined in Clause
|
||||
252.227-7013 (b) (3) of DFARs. Notwithstanding the foregoing, the
|
||||
authors grant the U.S. Government and others acting in its behalf
|
||||
permission to use and distribute the software in accordance with the
|
||||
terms specified in this license.
|
||||
|
||||
Copyright (c) 1993-1999 Ioi Kim Lam.
|
||||
Copyright (c) 2000-2001 Tix Project Group.
|
||||
Copyright (c) 2004 ActiveState
|
||||
|
||||
This software is copyrighted by the above entities
|
||||
and other parties. The following terms apply to all files associated
|
||||
with the software unless explicitly disclaimed in individual files.
|
||||
|
||||
The authors hereby grant permission to use, copy, modify, distribute,
|
||||
and license this software and its documentation for any purpose, provided
|
||||
that existing copyright notices are retained in all copies and that this
|
||||
notice is included verbatim in any distributions. No written agreement,
|
||||
license, or royalty fee is required for any of the authorized uses.
|
||||
Modifications to this software may be copyrighted by their authors
|
||||
and need not follow the licensing terms described here, provided that
|
||||
the new terms are clearly indicated on the first page of each file where
|
||||
they apply.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
|
||||
FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
|
||||
DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
|
||||
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
|
||||
IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
|
||||
NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
|
||||
MODIFICATIONS.
|
||||
|
||||
GOVERNMENT USE: If you are acquiring this software on behalf of the
|
||||
U.S. government, the Government shall have only "Restricted Rights"
|
||||
in the software and related documentation as defined in the Federal
|
||||
Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
|
||||
are acquiring the software on behalf of the Department of Defense, the
|
||||
software shall be classified as "Commercial Computer Software" and the
|
||||
Government shall have only "Restricted Rights" as defined in Clause
|
||||
252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
|
||||
authors grant the U.S. Government and others acting in its behalf
|
||||
permission to use and distribute the software in accordance with the
|
||||
terms specified in this license.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Parts of this software are based on the Tcl/Tk software copyrighted by
|
||||
the Regents of the University of California, Sun Microsystems, Inc.,
|
||||
and other parties. The original license terms of the Tcl/Tk software
|
||||
distribution is included in the file docs/license.tcltk.
|
||||
|
||||
Parts of this software are based on the HTML Library software
|
||||
copyrighted by Sun Microsystems, Inc. The original license terms of
|
||||
the HTML Library software distribution is included in the file
|
||||
docs/license.html_lib.
|
||||
|
147
Tool/Python39/Lib/__future__.py
Normal file
147
Tool/Python39/Lib/__future__.py
Normal file
@ -0,0 +1,147 @@
|
||||
"""Record of phased-in incompatible language changes.
|
||||
|
||||
Each line is of the form:
|
||||
|
||||
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
|
||||
CompilerFlag ")"
|
||||
|
||||
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
|
||||
of the same form as sys.version_info:
|
||||
|
||||
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
|
||||
PY_MINOR_VERSION, # the 1; an int
|
||||
PY_MICRO_VERSION, # the 0; an int
|
||||
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
|
||||
PY_RELEASE_SERIAL # the 3; an int
|
||||
)
|
||||
|
||||
OptionalRelease records the first release in which
|
||||
|
||||
from __future__ import FeatureName
|
||||
|
||||
was accepted.
|
||||
|
||||
In the case of MandatoryReleases that have not yet occurred,
|
||||
MandatoryRelease predicts the release in which the feature will become part
|
||||
of the language.
|
||||
|
||||
Else MandatoryRelease records when the feature became part of the language;
|
||||
in releases at or after that, modules no longer need
|
||||
|
||||
from __future__ import FeatureName
|
||||
|
||||
to use the feature in question, but may continue to use such imports.
|
||||
|
||||
MandatoryRelease may also be None, meaning that a planned feature got
|
||||
dropped.
|
||||
|
||||
Instances of class _Feature have two corresponding methods,
|
||||
.getOptionalRelease() and .getMandatoryRelease().
|
||||
|
||||
CompilerFlag is the (bitfield) flag that should be passed in the fourth
|
||||
argument to the builtin function compile() to enable the feature in
|
||||
dynamically compiled code. This flag is stored in the .compiler_flag
|
||||
attribute on _Future instances. These values must match the appropriate
|
||||
#defines of CO_xxx flags in Include/compile.h.
|
||||
|
||||
No feature line is ever to be deleted from this file.
|
||||
"""
|
||||
|
||||
all_feature_names = [
|
||||
"nested_scopes",
|
||||
"generators",
|
||||
"division",
|
||||
"absolute_import",
|
||||
"with_statement",
|
||||
"print_function",
|
||||
"unicode_literals",
|
||||
"barry_as_FLUFL",
|
||||
"generator_stop",
|
||||
"annotations",
|
||||
]
|
||||
|
||||
__all__ = ["all_feature_names"] + all_feature_names
|
||||
|
||||
# The CO_xxx symbols are defined here under the same names defined in
|
||||
# code.h and used by compile.h, so that an editor search will find them here.
|
||||
# However, they're not exported in __all__, because they don't really belong to
|
||||
# this module.
|
||||
CO_NESTED = 0x0010 # nested_scopes
|
||||
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
|
||||
CO_FUTURE_DIVISION = 0x20000 # division
|
||||
CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default
|
||||
CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement
|
||||
CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function
|
||||
CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals
|
||||
CO_FUTURE_BARRY_AS_BDFL = 0x400000
|
||||
CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators
|
||||
CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime
|
||||
|
||||
|
||||
class _Feature:
|
||||
|
||||
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
|
||||
self.optional = optionalRelease
|
||||
self.mandatory = mandatoryRelease
|
||||
self.compiler_flag = compiler_flag
|
||||
|
||||
def getOptionalRelease(self):
|
||||
"""Return first release in which this feature was recognized.
|
||||
|
||||
This is a 5-tuple, of the same form as sys.version_info.
|
||||
"""
|
||||
return self.optional
|
||||
|
||||
def getMandatoryRelease(self):
|
||||
"""Return release in which this feature will become mandatory.
|
||||
|
||||
This is a 5-tuple, of the same form as sys.version_info, or, if
|
||||
the feature was dropped, is None.
|
||||
"""
|
||||
return self.mandatory
|
||||
|
||||
def __repr__(self):
|
||||
return "_Feature" + repr((self.optional,
|
||||
self.mandatory,
|
||||
self.compiler_flag))
|
||||
|
||||
|
||||
nested_scopes = _Feature((2, 1, 0, "beta", 1),
|
||||
(2, 2, 0, "alpha", 0),
|
||||
CO_NESTED)
|
||||
|
||||
generators = _Feature((2, 2, 0, "alpha", 1),
|
||||
(2, 3, 0, "final", 0),
|
||||
CO_GENERATOR_ALLOWED)
|
||||
|
||||
division = _Feature((2, 2, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_DIVISION)
|
||||
|
||||
absolute_import = _Feature((2, 5, 0, "alpha", 1),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_ABSOLUTE_IMPORT)
|
||||
|
||||
with_statement = _Feature((2, 5, 0, "alpha", 1),
|
||||
(2, 6, 0, "alpha", 0),
|
||||
CO_FUTURE_WITH_STATEMENT)
|
||||
|
||||
print_function = _Feature((2, 6, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_PRINT_FUNCTION)
|
||||
|
||||
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_UNICODE_LITERALS)
|
||||
|
||||
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
|
||||
(4, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_BARRY_AS_BDFL)
|
||||
|
||||
generator_stop = _Feature((3, 5, 0, "beta", 1),
|
||||
(3, 7, 0, "alpha", 0),
|
||||
CO_FUTURE_GENERATOR_STOP)
|
||||
|
||||
annotations = _Feature((3, 7, 0, "beta", 1),
|
||||
(3, 10, 0, "alpha", 0),
|
||||
CO_FUTURE_ANNOTATIONS)
|
1
Tool/Python39/Lib/__phello__.foo.py
Normal file
1
Tool/Python39/Lib/__phello__.foo.py
Normal file
@ -0,0 +1 @@
|
||||
# This file exists as a helper for the test.test_frozen module.
|
91
Tool/Python39/Lib/_aix_support.py
Normal file
91
Tool/Python39/Lib/_aix_support.py
Normal file
@ -0,0 +1,91 @@
|
||||
"""Shared AIX support functions."""
|
||||
|
||||
import sys
|
||||
import sysconfig
|
||||
|
||||
try:
|
||||
import subprocess
|
||||
except ImportError: # pragma: no cover
|
||||
# _aix_support is used in distutils by setup.py to build C extensions,
|
||||
# before subprocess dependencies like _posixsubprocess are available.
|
||||
import _bootsubprocess as subprocess
|
||||
|
||||
|
||||
def _aix_tag(vrtl, bd):
|
||||
# type: (List[int], int) -> str
|
||||
# Infer the ABI bitwidth from maxsize (assuming 64 bit as the default)
|
||||
_sz = 32 if sys.maxsize == (2**31-1) else 64
|
||||
_bd = bd if bd != 0 else 9988
|
||||
# vrtl[version, release, technology_level]
|
||||
return "aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(vrtl[0], vrtl[1], vrtl[2], _bd, _sz)
|
||||
|
||||
|
||||
# extract version, release and technology level from a VRMF string
|
||||
def _aix_vrtl(vrmf):
|
||||
# type: (str) -> List[int]
|
||||
v, r, tl = vrmf.split(".")[:3]
|
||||
return [int(v[-1]), int(r), int(tl)]
|
||||
|
||||
|
||||
def _aix_bos_rte():
|
||||
# type: () -> Tuple[str, int]
|
||||
"""
|
||||
Return a Tuple[str, int] e.g., ['7.1.4.34', 1806]
|
||||
The fileset bos.rte represents the current AIX run-time level. It's VRMF and
|
||||
builddate reflect the current ABI levels of the runtime environment.
|
||||
If no builddate is found give a value that will satisfy pep425 related queries
|
||||
"""
|
||||
# All AIX systems to have lslpp installed in this location
|
||||
out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.rte"])
|
||||
out = out.decode("utf-8")
|
||||
out = out.strip().split(":") # type: ignore
|
||||
_bd = int(out[-1]) if out[-1] != '' else 9988
|
||||
return (str(out[2]), _bd)
|
||||
|
||||
|
||||
def aix_platform():
|
||||
# type: () -> str
|
||||
"""
|
||||
AIX filesets are identified by four decimal values: V.R.M.F.
|
||||
V (version) and R (release) can be retreived using ``uname``
|
||||
Since 2007, starting with AIX 5.3 TL7, the M value has been
|
||||
included with the fileset bos.rte and represents the Technology
|
||||
Level (TL) of AIX. The F (Fix) value also increases, but is not
|
||||
relevant for comparing releases and binary compatibility.
|
||||
For binary compatibility the so-called builddate is needed.
|
||||
Again, the builddate of an AIX release is associated with bos.rte.
|
||||
AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\
|
||||
support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html
|
||||
|
||||
For pep425 purposes the AIX platform tag becomes:
|
||||
"aix-{:1x}{:1d}{:02d}-{:04d}-{}".format(v, r, tl, builddate, bitsize)
|
||||
e.g., "aix-6107-1415-32" for AIX 6.1 TL7 bd 1415, 32-bit
|
||||
and, "aix-6107-1415-64" for AIX 6.1 TL7 bd 1415, 64-bit
|
||||
"""
|
||||
vrmf, bd = _aix_bos_rte()
|
||||
return _aix_tag(_aix_vrtl(vrmf), bd)
|
||||
|
||||
|
||||
# extract vrtl from the BUILD_GNU_TYPE as an int
|
||||
def _aix_bgt():
|
||||
# type: () -> List[int]
|
||||
gnu_type = sysconfig.get_config_var("BUILD_GNU_TYPE")
|
||||
if not gnu_type:
|
||||
raise ValueError("BUILD_GNU_TYPE is not defined")
|
||||
return _aix_vrtl(vrmf=gnu_type)
|
||||
|
||||
|
||||
def aix_buildtag():
|
||||
# type: () -> str
|
||||
"""
|
||||
Return the platform_tag of the system Python was built on.
|
||||
"""
|
||||
# AIX_BUILDDATE is defined by configure with:
|
||||
# lslpp -Lcq bos.rte | awk -F: '{ print $NF }'
|
||||
build_date = sysconfig.get_config_var("AIX_BUILDDATE")
|
||||
try:
|
||||
build_date = int(build_date)
|
||||
except (ValueError, TypeError):
|
||||
raise ValueError(f"AIX_BUILDDATE is not defined or invalid: "
|
||||
f"{build_date!r}")
|
||||
return _aix_tag(_aix_bgt(), build_date)
|
46
Tool/Python39/Lib/_bootlocale.py
Normal file
46
Tool/Python39/Lib/_bootlocale.py
Normal file
@ -0,0 +1,46 @@
|
||||
"""A minimal subset of the locale module used at interpreter startup
|
||||
(imported by the _io module), in order to reduce startup time.
|
||||
|
||||
Don't import directly from third-party code; use the `locale` module instead!
|
||||
"""
|
||||
|
||||
import sys
|
||||
import _locale
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
def getpreferredencoding(do_setlocale=True):
|
||||
if sys.flags.utf8_mode:
|
||||
return 'UTF-8'
|
||||
return _locale._getdefaultlocale()[1]
|
||||
else:
|
||||
try:
|
||||
_locale.CODESET
|
||||
except AttributeError:
|
||||
if hasattr(sys, 'getandroidapilevel'):
|
||||
# On Android langinfo.h and CODESET are missing, and UTF-8 is
|
||||
# always used in mbstowcs() and wcstombs().
|
||||
def getpreferredencoding(do_setlocale=True):
|
||||
return 'UTF-8'
|
||||
else:
|
||||
def getpreferredencoding(do_setlocale=True):
|
||||
if sys.flags.utf8_mode:
|
||||
return 'UTF-8'
|
||||
# This path for legacy systems needs the more complex
|
||||
# getdefaultlocale() function, import the full locale module.
|
||||
import locale
|
||||
return locale.getpreferredencoding(do_setlocale)
|
||||
else:
|
||||
def getpreferredencoding(do_setlocale=True):
|
||||
assert not do_setlocale
|
||||
if sys.flags.utf8_mode:
|
||||
return 'UTF-8'
|
||||
result = _locale.nl_langinfo(_locale.CODESET)
|
||||
if not result and sys.platform == 'darwin':
|
||||
# nl_langinfo can return an empty string
|
||||
# when the setting has an invalid value.
|
||||
# Default to UTF-8 in that case because
|
||||
# UTF-8 is the default charset on OSX and
|
||||
# returning nothing will crash the
|
||||
# interpreter.
|
||||
result = 'UTF-8'
|
||||
return result
|
97
Tool/Python39/Lib/_bootsubprocess.py
Normal file
97
Tool/Python39/Lib/_bootsubprocess.py
Normal file
@ -0,0 +1,97 @@
|
||||
"""
|
||||
Basic subprocess implementation for POSIX which only uses os functions. Only
|
||||
implement features required by setup.py to build C extension modules when
|
||||
subprocess is unavailable. setup.py is not used on Windows.
|
||||
"""
|
||||
import os
|
||||
|
||||
|
||||
# distutils.spawn used by distutils.command.build_ext
|
||||
# calls subprocess.Popen().wait()
|
||||
class Popen:
|
||||
def __init__(self, cmd, env=None):
|
||||
self._cmd = cmd
|
||||
self._env = env
|
||||
self.returncode = None
|
||||
|
||||
def wait(self):
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# Child process
|
||||
try:
|
||||
if self._env is not None:
|
||||
os.execve(self._cmd[0], self._cmd, self._env)
|
||||
else:
|
||||
os.execv(self._cmd[0], self._cmd)
|
||||
finally:
|
||||
os._exit(1)
|
||||
else:
|
||||
# Parent process
|
||||
_, status = os.waitpid(pid, 0)
|
||||
self.returncode = os.waitstatus_to_exitcode(status)
|
||||
|
||||
return self.returncode
|
||||
|
||||
|
||||
def _check_cmd(cmd):
|
||||
# Use regex [a-zA-Z0-9./-]+: reject empty string, space, etc.
|
||||
safe_chars = []
|
||||
for first, last in (("a", "z"), ("A", "Z"), ("0", "9")):
|
||||
for ch in range(ord(first), ord(last) + 1):
|
||||
safe_chars.append(chr(ch))
|
||||
safe_chars.append("./-")
|
||||
safe_chars = ''.join(safe_chars)
|
||||
|
||||
if isinstance(cmd, (tuple, list)):
|
||||
check_strs = cmd
|
||||
elif isinstance(cmd, str):
|
||||
check_strs = [cmd]
|
||||
else:
|
||||
return False
|
||||
|
||||
for arg in check_strs:
|
||||
if not isinstance(arg, str):
|
||||
return False
|
||||
if not arg:
|
||||
# reject empty string
|
||||
return False
|
||||
for ch in arg:
|
||||
if ch not in safe_chars:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# _aix_support used by distutil.util calls subprocess.check_output()
|
||||
def check_output(cmd, **kwargs):
|
||||
if kwargs:
|
||||
raise NotImplementedError(repr(kwargs))
|
||||
|
||||
if not _check_cmd(cmd):
|
||||
raise ValueError(f"unsupported command: {cmd!r}")
|
||||
|
||||
tmp_filename = "check_output.tmp"
|
||||
if not isinstance(cmd, str):
|
||||
cmd = " ".join(cmd)
|
||||
cmd = f"{cmd} >{tmp_filename}"
|
||||
|
||||
try:
|
||||
# system() spawns a shell
|
||||
status = os.system(cmd)
|
||||
exitcode = os.waitstatus_to_exitcode(status)
|
||||
if exitcode:
|
||||
raise ValueError(f"Command {cmd!r} returned non-zero "
|
||||
f"exit status {exitcode!r}")
|
||||
|
||||
try:
|
||||
with open(tmp_filename, "rb") as fp:
|
||||
stdout = fp.read()
|
||||
except FileNotFoundError:
|
||||
stdout = b''
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmp_filename)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
return stdout
|
1116
Tool/Python39/Lib/_collections_abc.py
Normal file
1116
Tool/Python39/Lib/_collections_abc.py
Normal file
File diff suppressed because it is too large
Load Diff
251
Tool/Python39/Lib/_compat_pickle.py
Normal file
251
Tool/Python39/Lib/_compat_pickle.py
Normal file
@ -0,0 +1,251 @@
|
||||
# This module is used to map the old Python 2 names to the new names used in
|
||||
# Python 3 for the pickle module. This needed to make pickle streams
|
||||
# generated with Python 2 loadable by Python 3.
|
||||
|
||||
# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import
|
||||
# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
|
||||
# Thus, this could cause the module to be imported recursively.
|
||||
IMPORT_MAPPING = {
|
||||
'__builtin__' : 'builtins',
|
||||
'copy_reg': 'copyreg',
|
||||
'Queue': 'queue',
|
||||
'SocketServer': 'socketserver',
|
||||
'ConfigParser': 'configparser',
|
||||
'repr': 'reprlib',
|
||||
'tkFileDialog': 'tkinter.filedialog',
|
||||
'tkSimpleDialog': 'tkinter.simpledialog',
|
||||
'tkColorChooser': 'tkinter.colorchooser',
|
||||
'tkCommonDialog': 'tkinter.commondialog',
|
||||
'Dialog': 'tkinter.dialog',
|
||||
'Tkdnd': 'tkinter.dnd',
|
||||
'tkFont': 'tkinter.font',
|
||||
'tkMessageBox': 'tkinter.messagebox',
|
||||
'ScrolledText': 'tkinter.scrolledtext',
|
||||
'Tkconstants': 'tkinter.constants',
|
||||
'Tix': 'tkinter.tix',
|
||||
'ttk': 'tkinter.ttk',
|
||||
'Tkinter': 'tkinter',
|
||||
'markupbase': '_markupbase',
|
||||
'_winreg': 'winreg',
|
||||
'thread': '_thread',
|
||||
'dummy_thread': '_dummy_thread',
|
||||
'dbhash': 'dbm.bsd',
|
||||
'dumbdbm': 'dbm.dumb',
|
||||
'dbm': 'dbm.ndbm',
|
||||
'gdbm': 'dbm.gnu',
|
||||
'xmlrpclib': 'xmlrpc.client',
|
||||
'SimpleXMLRPCServer': 'xmlrpc.server',
|
||||
'httplib': 'http.client',
|
||||
'htmlentitydefs' : 'html.entities',
|
||||
'HTMLParser' : 'html.parser',
|
||||
'Cookie': 'http.cookies',
|
||||
'cookielib': 'http.cookiejar',
|
||||
'BaseHTTPServer': 'http.server',
|
||||
'test.test_support': 'test.support',
|
||||
'commands': 'subprocess',
|
||||
'urlparse' : 'urllib.parse',
|
||||
'robotparser' : 'urllib.robotparser',
|
||||
'urllib2': 'urllib.request',
|
||||
'anydbm': 'dbm',
|
||||
'_abcoll' : 'collections.abc',
|
||||
}
|
||||
|
||||
|
||||
# This contains rename rules that are easy to handle. We ignore the more
|
||||
# complex stuff (e.g. mapping the names in the urllib and types modules).
|
||||
# These rules should be run before import names are fixed.
|
||||
NAME_MAPPING = {
|
||||
('__builtin__', 'xrange'): ('builtins', 'range'),
|
||||
('__builtin__', 'reduce'): ('functools', 'reduce'),
|
||||
('__builtin__', 'intern'): ('sys', 'intern'),
|
||||
('__builtin__', 'unichr'): ('builtins', 'chr'),
|
||||
('__builtin__', 'unicode'): ('builtins', 'str'),
|
||||
('__builtin__', 'long'): ('builtins', 'int'),
|
||||
('itertools', 'izip'): ('builtins', 'zip'),
|
||||
('itertools', 'imap'): ('builtins', 'map'),
|
||||
('itertools', 'ifilter'): ('builtins', 'filter'),
|
||||
('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
|
||||
('itertools', 'izip_longest'): ('itertools', 'zip_longest'),
|
||||
('UserDict', 'IterableUserDict'): ('collections', 'UserDict'),
|
||||
('UserList', 'UserList'): ('collections', 'UserList'),
|
||||
('UserString', 'UserString'): ('collections', 'UserString'),
|
||||
('whichdb', 'whichdb'): ('dbm', 'whichdb'),
|
||||
('_socket', 'fromfd'): ('socket', 'fromfd'),
|
||||
('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'),
|
||||
('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'),
|
||||
('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'),
|
||||
('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'),
|
||||
('urllib', 'getproxies'): ('urllib.request', 'getproxies'),
|
||||
('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'),
|
||||
('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'),
|
||||
('urllib', 'quote'): ('urllib.parse', 'quote'),
|
||||
('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'),
|
||||
('urllib', 'unquote'): ('urllib.parse', 'unquote'),
|
||||
('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'),
|
||||
('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'),
|
||||
('urllib', 'urlencode'): ('urllib.parse', 'urlencode'),
|
||||
('urllib', 'urlopen'): ('urllib.request', 'urlopen'),
|
||||
('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'),
|
||||
('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'),
|
||||
('urllib2', 'URLError'): ('urllib.error', 'URLError'),
|
||||
}
|
||||
|
||||
PYTHON2_EXCEPTIONS = (
|
||||
"ArithmeticError",
|
||||
"AssertionError",
|
||||
"AttributeError",
|
||||
"BaseException",
|
||||
"BufferError",
|
||||
"BytesWarning",
|
||||
"DeprecationWarning",
|
||||
"EOFError",
|
||||
"EnvironmentError",
|
||||
"Exception",
|
||||
"FloatingPointError",
|
||||
"FutureWarning",
|
||||
"GeneratorExit",
|
||||
"IOError",
|
||||
"ImportError",
|
||||
"ImportWarning",
|
||||
"IndentationError",
|
||||
"IndexError",
|
||||
"KeyError",
|
||||
"KeyboardInterrupt",
|
||||
"LookupError",
|
||||
"MemoryError",
|
||||
"NameError",
|
||||
"NotImplementedError",
|
||||
"OSError",
|
||||
"OverflowError",
|
||||
"PendingDeprecationWarning",
|
||||
"ReferenceError",
|
||||
"RuntimeError",
|
||||
"RuntimeWarning",
|
||||
# StandardError is gone in Python 3, so we map it to Exception
|
||||
"StopIteration",
|
||||
"SyntaxError",
|
||||
"SyntaxWarning",
|
||||
"SystemError",
|
||||
"SystemExit",
|
||||
"TabError",
|
||||
"TypeError",
|
||||
"UnboundLocalError",
|
||||
"UnicodeDecodeError",
|
||||
"UnicodeEncodeError",
|
||||
"UnicodeError",
|
||||
"UnicodeTranslateError",
|
||||
"UnicodeWarning",
|
||||
"UserWarning",
|
||||
"ValueError",
|
||||
"Warning",
|
||||
"ZeroDivisionError",
|
||||
)
|
||||
|
||||
try:
|
||||
WindowsError
|
||||
except NameError:
|
||||
pass
|
||||
else:
|
||||
PYTHON2_EXCEPTIONS += ("WindowsError",)
|
||||
|
||||
for excname in PYTHON2_EXCEPTIONS:
|
||||
NAME_MAPPING[("exceptions", excname)] = ("builtins", excname)
|
||||
|
||||
MULTIPROCESSING_EXCEPTIONS = (
|
||||
'AuthenticationError',
|
||||
'BufferTooShort',
|
||||
'ProcessError',
|
||||
'TimeoutError',
|
||||
)
|
||||
|
||||
for excname in MULTIPROCESSING_EXCEPTIONS:
|
||||
NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname)
|
||||
|
||||
# Same, but for 3.x to 2.x
|
||||
REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
|
||||
assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING)
|
||||
REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
|
||||
assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING)
|
||||
|
||||
# Non-mutual mappings.
|
||||
|
||||
IMPORT_MAPPING.update({
|
||||
'cPickle': 'pickle',
|
||||
'_elementtree': 'xml.etree.ElementTree',
|
||||
'FileDialog': 'tkinter.filedialog',
|
||||
'SimpleDialog': 'tkinter.simpledialog',
|
||||
'DocXMLRPCServer': 'xmlrpc.server',
|
||||
'SimpleHTTPServer': 'http.server',
|
||||
'CGIHTTPServer': 'http.server',
|
||||
# For compatibility with broken pickles saved in old Python 3 versions
|
||||
'UserDict': 'collections',
|
||||
'UserList': 'collections',
|
||||
'UserString': 'collections',
|
||||
'whichdb': 'dbm',
|
||||
'StringIO': 'io',
|
||||
'cStringIO': 'io',
|
||||
})
|
||||
|
||||
REVERSE_IMPORT_MAPPING.update({
|
||||
'_bz2': 'bz2',
|
||||
'_dbm': 'dbm',
|
||||
'_functools': 'functools',
|
||||
'_gdbm': 'gdbm',
|
||||
'_pickle': 'pickle',
|
||||
})
|
||||
|
||||
NAME_MAPPING.update({
|
||||
('__builtin__', 'basestring'): ('builtins', 'str'),
|
||||
('exceptions', 'StandardError'): ('builtins', 'Exception'),
|
||||
('UserDict', 'UserDict'): ('collections', 'UserDict'),
|
||||
('socket', '_socketobject'): ('socket', 'SocketType'),
|
||||
})
|
||||
|
||||
REVERSE_NAME_MAPPING.update({
|
||||
('_functools', 'reduce'): ('__builtin__', 'reduce'),
|
||||
('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'),
|
||||
('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'),
|
||||
('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'),
|
||||
('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'),
|
||||
('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'),
|
||||
('xmlrpc.server', 'XMLRPCDocGenerator'):
|
||||
('DocXMLRPCServer', 'XMLRPCDocGenerator'),
|
||||
('xmlrpc.server', 'DocXMLRPCRequestHandler'):
|
||||
('DocXMLRPCServer', 'DocXMLRPCRequestHandler'),
|
||||
('xmlrpc.server', 'DocXMLRPCServer'):
|
||||
('DocXMLRPCServer', 'DocXMLRPCServer'),
|
||||
('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'):
|
||||
('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'),
|
||||
('http.server', 'SimpleHTTPRequestHandler'):
|
||||
('SimpleHTTPServer', 'SimpleHTTPRequestHandler'),
|
||||
('http.server', 'CGIHTTPRequestHandler'):
|
||||
('CGIHTTPServer', 'CGIHTTPRequestHandler'),
|
||||
('_socket', 'socket'): ('socket', '_socketobject'),
|
||||
})
|
||||
|
||||
PYTHON3_OSERROR_EXCEPTIONS = (
|
||||
'BrokenPipeError',
|
||||
'ChildProcessError',
|
||||
'ConnectionAbortedError',
|
||||
'ConnectionError',
|
||||
'ConnectionRefusedError',
|
||||
'ConnectionResetError',
|
||||
'FileExistsError',
|
||||
'FileNotFoundError',
|
||||
'InterruptedError',
|
||||
'IsADirectoryError',
|
||||
'NotADirectoryError',
|
||||
'PermissionError',
|
||||
'ProcessLookupError',
|
||||
'TimeoutError',
|
||||
)
|
||||
|
||||
for excname in PYTHON3_OSERROR_EXCEPTIONS:
|
||||
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError')
|
||||
|
||||
PYTHON3_IMPORTERROR_EXCEPTIONS = (
|
||||
'ModuleNotFoundError',
|
||||
)
|
||||
|
||||
for excname in PYTHON3_IMPORTERROR_EXCEPTIONS:
|
||||
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError')
|
152
Tool/Python39/Lib/_compression.py
Normal file
152
Tool/Python39/Lib/_compression.py
Normal file
@ -0,0 +1,152 @@
|
||||
"""Internal classes used by the gzip, lzma and bz2 modules"""
|
||||
|
||||
import io
|
||||
|
||||
|
||||
BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size
|
||||
|
||||
|
||||
class BaseStream(io.BufferedIOBase):
|
||||
"""Mode-checking helper functions."""
|
||||
|
||||
def _check_not_closed(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
|
||||
def _check_can_read(self):
|
||||
if not self.readable():
|
||||
raise io.UnsupportedOperation("File not open for reading")
|
||||
|
||||
def _check_can_write(self):
|
||||
if not self.writable():
|
||||
raise io.UnsupportedOperation("File not open for writing")
|
||||
|
||||
def _check_can_seek(self):
|
||||
if not self.readable():
|
||||
raise io.UnsupportedOperation("Seeking is only supported "
|
||||
"on files open for reading")
|
||||
if not self.seekable():
|
||||
raise io.UnsupportedOperation("The underlying file object "
|
||||
"does not support seeking")
|
||||
|
||||
|
||||
class DecompressReader(io.RawIOBase):
|
||||
"""Adapts the decompressor API to a RawIOBase reader API"""
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args):
|
||||
self._fp = fp
|
||||
self._eof = False
|
||||
self._pos = 0 # Current offset in decompressed stream
|
||||
|
||||
# Set to size of decompressed stream once it is known, for SEEK_END
|
||||
self._size = -1
|
||||
|
||||
# Save the decompressor factory and arguments.
|
||||
# If the file contains multiple compressed streams, each
|
||||
# stream will need a separate decompressor object. A new decompressor
|
||||
# object is also needed when implementing a backwards seek().
|
||||
self._decomp_factory = decomp_factory
|
||||
self._decomp_args = decomp_args
|
||||
self._decompressor = self._decomp_factory(**self._decomp_args)
|
||||
|
||||
# Exception class to catch from decompressor signifying invalid
|
||||
# trailing data to ignore
|
||||
self._trailing_error = trailing_error
|
||||
|
||||
def close(self):
|
||||
self._decompressor = None
|
||||
return super().close()
|
||||
|
||||
def seekable(self):
|
||||
return self._fp.seekable()
|
||||
|
||||
def readinto(self, b):
|
||||
with memoryview(b) as view, view.cast("B") as byte_view:
|
||||
data = self.read(len(byte_view))
|
||||
byte_view[:len(data)] = data
|
||||
return len(data)
|
||||
|
||||
def read(self, size=-1):
|
||||
if size < 0:
|
||||
return self.readall()
|
||||
|
||||
if not size or self._eof:
|
||||
return b""
|
||||
data = None # Default if EOF is encountered
|
||||
# Depending on the input data, our call to the decompressor may not
|
||||
# return any data. In this case, try again after reading another block.
|
||||
while True:
|
||||
if self._decompressor.eof:
|
||||
rawblock = (self._decompressor.unused_data or
|
||||
self._fp.read(BUFFER_SIZE))
|
||||
if not rawblock:
|
||||
break
|
||||
# Continue to next stream.
|
||||
self._decompressor = self._decomp_factory(
|
||||
**self._decomp_args)
|
||||
try:
|
||||
data = self._decompressor.decompress(rawblock, size)
|
||||
except self._trailing_error:
|
||||
# Trailing data isn't a valid compressed stream; ignore it.
|
||||
break
|
||||
else:
|
||||
if self._decompressor.needs_input:
|
||||
rawblock = self._fp.read(BUFFER_SIZE)
|
||||
if not rawblock:
|
||||
raise EOFError("Compressed file ended before the "
|
||||
"end-of-stream marker was reached")
|
||||
else:
|
||||
rawblock = b""
|
||||
data = self._decompressor.decompress(rawblock, size)
|
||||
if data:
|
||||
break
|
||||
if not data:
|
||||
self._eof = True
|
||||
self._size = self._pos
|
||||
return b""
|
||||
self._pos += len(data)
|
||||
return data
|
||||
|
||||
# Rewind the file to the beginning of the data stream.
|
||||
def _rewind(self):
|
||||
self._fp.seek(0)
|
||||
self._eof = False
|
||||
self._pos = 0
|
||||
self._decompressor = self._decomp_factory(**self._decomp_args)
|
||||
|
||||
def seek(self, offset, whence=io.SEEK_SET):
|
||||
# Recalculate offset as an absolute file position.
|
||||
if whence == io.SEEK_SET:
|
||||
pass
|
||||
elif whence == io.SEEK_CUR:
|
||||
offset = self._pos + offset
|
||||
elif whence == io.SEEK_END:
|
||||
# Seeking relative to EOF - we need to know the file's size.
|
||||
if self._size < 0:
|
||||
while self.read(io.DEFAULT_BUFFER_SIZE):
|
||||
pass
|
||||
offset = self._size + offset
|
||||
else:
|
||||
raise ValueError("Invalid value for whence: {}".format(whence))
|
||||
|
||||
# Make it so that offset is the number of bytes to skip forward.
|
||||
if offset < self._pos:
|
||||
self._rewind()
|
||||
else:
|
||||
offset -= self._pos
|
||||
|
||||
# Read and discard data until we reach the desired position.
|
||||
while offset > 0:
|
||||
data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset))
|
||||
if not data:
|
||||
break
|
||||
offset -= len(data)
|
||||
|
||||
return self._pos
|
||||
|
||||
def tell(self):
|
||||
"""Return the current file position."""
|
||||
return self._pos
|
395
Tool/Python39/Lib/_markupbase.py
Normal file
395
Tool/Python39/Lib/_markupbase.py
Normal file
@ -0,0 +1,395 @@
|
||||
"""Shared support for scanning document type declarations in HTML and XHTML.
|
||||
|
||||
This module is used as a foundation for the html.parser module. It has no
|
||||
documented public API and should not be used directly.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
|
||||
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
|
||||
_commentclose = re.compile(r'--\s*>')
|
||||
_markedsectionclose = re.compile(r']\s*]\s*>')
|
||||
|
||||
# An analysis of the MS-Word extensions is available at
|
||||
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
|
||||
|
||||
_msmarkedsectionclose = re.compile(r']\s*>')
|
||||
|
||||
del re
|
||||
|
||||
|
||||
class ParserBase:
|
||||
"""Parser base class which provides some common support methods used
|
||||
by the SGML/HTML and XHTML parsers."""
|
||||
|
||||
def __init__(self):
|
||||
if self.__class__ is ParserBase:
|
||||
raise RuntimeError(
|
||||
"_markupbase.ParserBase must be subclassed")
|
||||
|
||||
def error(self, message):
|
||||
raise NotImplementedError(
|
||||
"subclasses of ParserBase must override error()")
|
||||
|
||||
def reset(self):
|
||||
self.lineno = 1
|
||||
self.offset = 0
|
||||
|
||||
def getpos(self):
|
||||
"""Return current line number and offset."""
|
||||
return self.lineno, self.offset
|
||||
|
||||
# Internal -- update line number and offset. This should be
|
||||
# called for each piece of data exactly once, in order -- in other
|
||||
# words the concatenation of all the input strings to this
|
||||
# function should be exactly the entire input.
|
||||
def updatepos(self, i, j):
|
||||
if i >= j:
|
||||
return j
|
||||
rawdata = self.rawdata
|
||||
nlines = rawdata.count("\n", i, j)
|
||||
if nlines:
|
||||
self.lineno = self.lineno + nlines
|
||||
pos = rawdata.rindex("\n", i, j) # Should not fail
|
||||
self.offset = j-(pos+1)
|
||||
else:
|
||||
self.offset = self.offset + j-i
|
||||
return j
|
||||
|
||||
_decl_otherchars = ''
|
||||
|
||||
# Internal -- parse declaration (for use by subclasses).
|
||||
def parse_declaration(self, i):
|
||||
# This is some sort of declaration; in "HTML as
|
||||
# deployed," this should only be the document type
|
||||
# declaration ("<!DOCTYPE html...>").
|
||||
# ISO 8879:1986, however, has more complex
|
||||
# declaration syntax for elements in <!...>, including:
|
||||
# --comment--
|
||||
# [marked section]
|
||||
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
|
||||
# ATTLIST, NOTATION, SHORTREF, USEMAP,
|
||||
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
|
||||
rawdata = self.rawdata
|
||||
j = i + 2
|
||||
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
|
||||
if rawdata[j:j+1] == ">":
|
||||
# the empty comment <!>
|
||||
return j + 1
|
||||
if rawdata[j:j+1] in ("-", ""):
|
||||
# Start of comment followed by buffer boundary,
|
||||
# or just a buffer boundary.
|
||||
return -1
|
||||
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
|
||||
n = len(rawdata)
|
||||
if rawdata[j:j+2] == '--': #comment
|
||||
# Locate --.*-- as the body of the comment
|
||||
return self.parse_comment(i)
|
||||
elif rawdata[j] == '[': #marked section
|
||||
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
|
||||
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
|
||||
# Note that this is extended by Microsoft Office "Save as Web" function
|
||||
# to include [if...] and [endif].
|
||||
return self.parse_marked_section(i)
|
||||
else: #all other declaration elements
|
||||
decltype, j = self._scan_name(j, i)
|
||||
if j < 0:
|
||||
return j
|
||||
if decltype == "doctype":
|
||||
self._decl_otherchars = ''
|
||||
while j < n:
|
||||
c = rawdata[j]
|
||||
if c == ">":
|
||||
# end of declaration syntax
|
||||
data = rawdata[i+2:j]
|
||||
if decltype == "doctype":
|
||||
self.handle_decl(data)
|
||||
else:
|
||||
# According to the HTML5 specs sections "8.2.4.44 Bogus
|
||||
# comment state" and "8.2.4.45 Markup declaration open
|
||||
# state", a comment token should be emitted.
|
||||
# Calling unknown_decl provides more flexibility though.
|
||||
self.unknown_decl(data)
|
||||
return j + 1
|
||||
if c in "\"'":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if not m:
|
||||
return -1 # incomplete
|
||||
j = m.end()
|
||||
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
|
||||
name, j = self._scan_name(j, i)
|
||||
elif c in self._decl_otherchars:
|
||||
j = j + 1
|
||||
elif c == "[":
|
||||
# this could be handled in a separate doctype parser
|
||||
if decltype == "doctype":
|
||||
j = self._parse_doctype_subset(j + 1, i)
|
||||
elif decltype in {"attlist", "linktype", "link", "element"}:
|
||||
# must tolerate []'d groups in a content model in an element declaration
|
||||
# also in data attribute specifications of attlist declaration
|
||||
# also link type declaration subsets in linktype declarations
|
||||
# also link attribute specification lists in link declarations
|
||||
self.error("unsupported '[' char in %s declaration" % decltype)
|
||||
else:
|
||||
self.error("unexpected '[' char in declaration")
|
||||
else:
|
||||
self.error(
|
||||
"unexpected %r char in declaration" % rawdata[j])
|
||||
if j < 0:
|
||||
return j
|
||||
return -1 # incomplete
|
||||
|
||||
# Internal -- parse a marked section
|
||||
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
|
||||
def parse_marked_section(self, i, report=1):
|
||||
rawdata= self.rawdata
|
||||
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
|
||||
sectName, j = self._scan_name( i+3, i )
|
||||
if j < 0:
|
||||
return j
|
||||
if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
|
||||
# look for standard ]]> ending
|
||||
match= _markedsectionclose.search(rawdata, i+3)
|
||||
elif sectName in {"if", "else", "endif"}:
|
||||
# look for MS Office ]> ending
|
||||
match= _msmarkedsectionclose.search(rawdata, i+3)
|
||||
else:
|
||||
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
|
||||
if not match:
|
||||
return -1
|
||||
if report:
|
||||
j = match.start(0)
|
||||
self.unknown_decl(rawdata[i+3: j])
|
||||
return match.end(0)
|
||||
|
||||
# Internal -- parse comment, return length or -1 if not terminated
|
||||
def parse_comment(self, i, report=1):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+4] != '<!--':
|
||||
self.error('unexpected call to parse_comment()')
|
||||
match = _commentclose.search(rawdata, i+4)
|
||||
if not match:
|
||||
return -1
|
||||
if report:
|
||||
j = match.start(0)
|
||||
self.handle_comment(rawdata[i+4: j])
|
||||
return match.end(0)
|
||||
|
||||
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
|
||||
# returning the index just past any whitespace following the trailing ']'.
|
||||
def _parse_doctype_subset(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
n = len(rawdata)
|
||||
j = i
|
||||
while j < n:
|
||||
c = rawdata[j]
|
||||
if c == "<":
|
||||
s = rawdata[j:j+2]
|
||||
if s == "<":
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if s != "<!":
|
||||
self.updatepos(declstartpos, j + 1)
|
||||
self.error("unexpected char in internal subset (in %r)" % s)
|
||||
if (j + 2) == n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if (j + 4) > n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if rawdata[j:j+4] == "<!--":
|
||||
j = self.parse_comment(j, report=0)
|
||||
if j < 0:
|
||||
return j
|
||||
continue
|
||||
name, j = self._scan_name(j + 2, declstartpos)
|
||||
if j == -1:
|
||||
return -1
|
||||
if name not in {"attlist", "element", "entity", "notation"}:
|
||||
self.updatepos(declstartpos, j + 2)
|
||||
self.error(
|
||||
"unknown declaration %r in internal subset" % name)
|
||||
# handle the individual names
|
||||
meth = getattr(self, "_parse_doctype_" + name)
|
||||
j = meth(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
elif c == "%":
|
||||
# parameter entity reference
|
||||
if (j + 1) == n:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
s, j = self._scan_name(j + 1, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
if rawdata[j] == ";":
|
||||
j = j + 1
|
||||
elif c == "]":
|
||||
j = j + 1
|
||||
while j < n and rawdata[j].isspace():
|
||||
j = j + 1
|
||||
if j < n:
|
||||
if rawdata[j] == ">":
|
||||
return j
|
||||
self.updatepos(declstartpos, j)
|
||||
self.error("unexpected char after internal subset")
|
||||
else:
|
||||
return -1
|
||||
elif c.isspace():
|
||||
j = j + 1
|
||||
else:
|
||||
self.updatepos(declstartpos, j)
|
||||
self.error("unexpected char %r in internal subset" % c)
|
||||
# end of buffer reached
|
||||
return -1
|
||||
|
||||
# Internal -- scan past <!ELEMENT declarations
|
||||
def _parse_doctype_element(self, i, declstartpos):
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
if j == -1:
|
||||
return -1
|
||||
# style content model; just skip until '>'
|
||||
rawdata = self.rawdata
|
||||
if '>' in rawdata[j:]:
|
||||
return rawdata.find(">", j) + 1
|
||||
return -1
|
||||
|
||||
# Internal -- scan past <!ATTLIST declarations
|
||||
def _parse_doctype_attlist(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
c = rawdata[j:j+1]
|
||||
if c == "":
|
||||
return -1
|
||||
if c == ">":
|
||||
return j + 1
|
||||
while 1:
|
||||
# scan a series of attribute descriptions; simplified:
|
||||
# name type [value] [#constraint]
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
c = rawdata[j:j+1]
|
||||
if c == "":
|
||||
return -1
|
||||
if c == "(":
|
||||
# an enumerated type; look for ')'
|
||||
if ")" in rawdata[j:]:
|
||||
j = rawdata.find(")", j) + 1
|
||||
else:
|
||||
return -1
|
||||
while rawdata[j:j+1].isspace():
|
||||
j = j + 1
|
||||
if not rawdata[j:]:
|
||||
# end of buffer, incomplete
|
||||
return -1
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if m:
|
||||
j = m.end()
|
||||
else:
|
||||
return -1
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c == "#":
|
||||
if rawdata[j:] == "#":
|
||||
# end of buffer
|
||||
return -1
|
||||
name, j = self._scan_name(j + 1, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c == '>':
|
||||
# all done
|
||||
return j + 1
|
||||
|
||||
# Internal -- scan past <!NOTATION declarations
|
||||
def _parse_doctype_notation(self, i, declstartpos):
|
||||
name, j = self._scan_name(i, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
rawdata = self.rawdata
|
||||
while 1:
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
# end of buffer; incomplete
|
||||
return -1
|
||||
if c == '>':
|
||||
return j + 1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if not m:
|
||||
return -1
|
||||
j = m.end()
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
|
||||
# Internal -- scan past <!ENTITY declarations
|
||||
def _parse_doctype_entity(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+1] == "%":
|
||||
j = i + 1
|
||||
while 1:
|
||||
c = rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c.isspace():
|
||||
j = j + 1
|
||||
else:
|
||||
break
|
||||
else:
|
||||
j = i
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
while 1:
|
||||
c = self.rawdata[j:j+1]
|
||||
if not c:
|
||||
return -1
|
||||
if c in "'\"":
|
||||
m = _declstringlit_match(rawdata, j)
|
||||
if m:
|
||||
j = m.end()
|
||||
else:
|
||||
return -1 # incomplete
|
||||
elif c == ">":
|
||||
return j + 1
|
||||
else:
|
||||
name, j = self._scan_name(j, declstartpos)
|
||||
if j < 0:
|
||||
return j
|
||||
|
||||
# Internal -- scan a name token and the new position and the token, or
|
||||
# return -1 if we've reached the end of the buffer.
|
||||
def _scan_name(self, i, declstartpos):
|
||||
rawdata = self.rawdata
|
||||
n = len(rawdata)
|
||||
if i == n:
|
||||
return None, -1
|
||||
m = _declname_match(rawdata, i)
|
||||
if m:
|
||||
s = m.group()
|
||||
name = s.strip()
|
||||
if (i + len(s)) == n:
|
||||
return None, -1 # end of buffer
|
||||
return name.lower(), m.end()
|
||||
else:
|
||||
self.updatepos(declstartpos, i)
|
||||
self.error("expected name token at %r"
|
||||
% rawdata[declstartpos:declstartpos+20])
|
||||
|
||||
# To be overridden -- handlers for unknown objects
|
||||
def unknown_decl(self, data):
|
||||
pass
|
575
Tool/Python39/Lib/_osx_support.py
Normal file
575
Tool/Python39/Lib/_osx_support.py
Normal file
@ -0,0 +1,575 @@
|
||||
"""Shared OS X support functions."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
__all__ = [
|
||||
'compiler_fixup',
|
||||
'customize_config_vars',
|
||||
'customize_compiler',
|
||||
'get_platform_osx',
|
||||
]
|
||||
|
||||
# configuration variables that may contain universal build flags,
|
||||
# like "-arch" or "-isdkroot", that may need customization for
|
||||
# the user environment
|
||||
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
|
||||
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
|
||||
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
|
||||
'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS')
|
||||
|
||||
# configuration variables that may contain compiler calls
|
||||
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
|
||||
|
||||
# prefix added to original configuration variable names
|
||||
_INITPRE = '_OSX_SUPPORT_INITIAL_'
|
||||
|
||||
|
||||
def _find_executable(executable, path=None):
|
||||
"""Tries to find 'executable' in the directories listed in 'path'.
|
||||
|
||||
A string listing directories separated by 'os.pathsep'; defaults to
|
||||
os.environ['PATH']. Returns the complete filename or None if not found.
|
||||
"""
|
||||
if path is None:
|
||||
path = os.environ['PATH']
|
||||
|
||||
paths = path.split(os.pathsep)
|
||||
base, ext = os.path.splitext(executable)
|
||||
|
||||
if (sys.platform == 'win32') and (ext != '.exe'):
|
||||
executable = executable + '.exe'
|
||||
|
||||
if not os.path.isfile(executable):
|
||||
for p in paths:
|
||||
f = os.path.join(p, executable)
|
||||
if os.path.isfile(f):
|
||||
# the file exists, we have a shot at spawn working
|
||||
return f
|
||||
return None
|
||||
else:
|
||||
return executable
|
||||
|
||||
|
||||
def _read_output(commandstring, capture_stderr=False):
|
||||
"""Output from successful command execution or None"""
|
||||
# Similar to os.popen(commandstring, "r").read(),
|
||||
# but without actually using os.popen because that
|
||||
# function is not usable during python bootstrap.
|
||||
# tempfile is also not available then.
|
||||
import contextlib
|
||||
try:
|
||||
import tempfile
|
||||
fp = tempfile.NamedTemporaryFile()
|
||||
except ImportError:
|
||||
fp = open("/tmp/_osx_support.%s"%(
|
||||
os.getpid(),), "w+b")
|
||||
|
||||
with contextlib.closing(fp) as fp:
|
||||
if capture_stderr:
|
||||
cmd = "%s >'%s' 2>&1" % (commandstring, fp.name)
|
||||
else:
|
||||
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
|
||||
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
|
||||
|
||||
|
||||
def _find_build_tool(toolname):
|
||||
"""Find a build tool on current path or using xcrun"""
|
||||
return (_find_executable(toolname)
|
||||
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
|
||||
or ''
|
||||
)
|
||||
|
||||
_SYSTEM_VERSION = None
|
||||
|
||||
def _get_system_version():
|
||||
"""Return the OS X system version as a string"""
|
||||
# Reading this plist is a documented way to get the system
|
||||
# version (see the documentation for the Gestalt Manager)
|
||||
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
|
||||
# the build of Python itself (distutils is used to build standard library
|
||||
# extensions).
|
||||
|
||||
global _SYSTEM_VERSION
|
||||
|
||||
if _SYSTEM_VERSION is None:
|
||||
_SYSTEM_VERSION = ''
|
||||
try:
|
||||
f = open('/System/Library/CoreServices/SystemVersion.plist')
|
||||
except OSError:
|
||||
# We're on a plain darwin box, fall back to the default
|
||||
# behaviour.
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
|
||||
r'<string>(.*?)</string>', f.read())
|
||||
finally:
|
||||
f.close()
|
||||
if m is not None:
|
||||
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
|
||||
# else: fall back to the default behaviour
|
||||
|
||||
return _SYSTEM_VERSION
|
||||
|
||||
_SYSTEM_VERSION_TUPLE = None
|
||||
def _get_system_version_tuple():
|
||||
"""
|
||||
Return the macOS system version as a tuple
|
||||
|
||||
The return value is safe to use to compare
|
||||
two version numbers.
|
||||
"""
|
||||
global _SYSTEM_VERSION_TUPLE
|
||||
if _SYSTEM_VERSION_TUPLE is None:
|
||||
osx_version = _get_system_version()
|
||||
if osx_version:
|
||||
try:
|
||||
_SYSTEM_VERSION_TUPLE = tuple(int(i) for i in osx_version.split('.'))
|
||||
except ValueError:
|
||||
_SYSTEM_VERSION_TUPLE = ()
|
||||
|
||||
return _SYSTEM_VERSION_TUPLE
|
||||
|
||||
|
||||
def _remove_original_values(_config_vars):
|
||||
"""Remove original unmodified values for testing"""
|
||||
# This is needed for higher-level cross-platform tests of get_platform.
|
||||
for k in list(_config_vars):
|
||||
if k.startswith(_INITPRE):
|
||||
del _config_vars[k]
|
||||
|
||||
def _save_modified_value(_config_vars, cv, newvalue):
|
||||
"""Save modified and original unmodified value of configuration var"""
|
||||
|
||||
oldvalue = _config_vars.get(cv, '')
|
||||
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
|
||||
_config_vars[_INITPRE + cv] = oldvalue
|
||||
_config_vars[cv] = newvalue
|
||||
|
||||
|
||||
_cache_default_sysroot = None
|
||||
def _default_sysroot(cc):
|
||||
""" Returns the root of the default SDK for this system, or '/' """
|
||||
global _cache_default_sysroot
|
||||
|
||||
if _cache_default_sysroot is not None:
|
||||
return _cache_default_sysroot
|
||||
|
||||
contents = _read_output('%s -c -E -v - </dev/null' % (cc,), True)
|
||||
in_incdirs = False
|
||||
for line in contents.splitlines():
|
||||
if line.startswith("#include <...>"):
|
||||
in_incdirs = True
|
||||
elif line.startswith("End of search list"):
|
||||
in_incdirs = False
|
||||
elif in_incdirs:
|
||||
line = line.strip()
|
||||
if line == '/usr/include':
|
||||
_cache_default_sysroot = '/'
|
||||
elif line.endswith(".sdk/usr/include"):
|
||||
_cache_default_sysroot = line[:-12]
|
||||
if _cache_default_sysroot is None:
|
||||
_cache_default_sysroot = '/'
|
||||
|
||||
return _cache_default_sysroot
|
||||
|
||||
def _supports_universal_builds():
|
||||
"""Returns True if universal builds are supported on this system"""
|
||||
# As an approximation, we assume that if we are running on 10.4 or above,
|
||||
# then we are running with an Xcode environment that supports universal
|
||||
# builds, in particular -isysroot and -arch arguments to the compiler. This
|
||||
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
|
||||
|
||||
osx_version = _get_system_version_tuple()
|
||||
return bool(osx_version >= (10, 4)) if osx_version else False
|
||||
|
||||
def _supports_arm64_builds():
|
||||
"""Returns True if arm64 builds are supported on this system"""
|
||||
# There are two sets of systems supporting macOS/arm64 builds:
|
||||
# 1. macOS 11 and later, unconditionally
|
||||
# 2. macOS 10.15 with Xcode 12.2 or later
|
||||
# For now the second category is ignored.
|
||||
osx_version = _get_system_version_tuple()
|
||||
return osx_version >= (11, 0) if osx_version else False
|
||||
|
||||
|
||||
def _find_appropriate_compiler(_config_vars):
|
||||
"""Find appropriate C compiler for extension module builds"""
|
||||
|
||||
# Issue #13590:
|
||||
# The OSX location for the compiler varies between OSX
|
||||
# (or rather Xcode) releases. With older releases (up-to 10.5)
|
||||
# the compiler is in /usr/bin, with newer releases the compiler
|
||||
# can only be found inside Xcode.app if the "Command Line Tools"
|
||||
# are not installed.
|
||||
#
|
||||
# Furthermore, the compiler that can be used varies between
|
||||
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
|
||||
# as the compiler, after that 'clang' should be used because
|
||||
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
|
||||
# miscompiles Python.
|
||||
|
||||
# skip checks if the compiler was overridden with a CC env variable
|
||||
if 'CC' in os.environ:
|
||||
return _config_vars
|
||||
|
||||
# The CC config var might contain additional arguments.
|
||||
# Ignore them while searching.
|
||||
cc = oldcc = _config_vars['CC'].split()[0]
|
||||
if not _find_executable(cc):
|
||||
# Compiler is not found on the shell search PATH.
|
||||
# Now search for clang, first on PATH (if the Command LIne
|
||||
# Tools have been installed in / or if the user has provided
|
||||
# another location via CC). If not found, try using xcrun
|
||||
# to find an uninstalled clang (within a selected Xcode).
|
||||
|
||||
# NOTE: Cannot use subprocess here because of bootstrap
|
||||
# issues when building Python itself (and os.popen is
|
||||
# implemented on top of subprocess and is therefore not
|
||||
# usable as well)
|
||||
|
||||
cc = _find_build_tool('clang')
|
||||
|
||||
elif os.path.basename(cc).startswith('gcc'):
|
||||
# Compiler is GCC, check if it is LLVM-GCC
|
||||
data = _read_output("'%s' --version"
|
||||
% (cc.replace("'", "'\"'\"'"),))
|
||||
if data and 'llvm-gcc' in data:
|
||||
# Found LLVM-GCC, fall back to clang
|
||||
cc = _find_build_tool('clang')
|
||||
|
||||
if not cc:
|
||||
raise SystemError(
|
||||
"Cannot locate working compiler")
|
||||
|
||||
if cc != oldcc:
|
||||
# Found a replacement compiler.
|
||||
# Modify config vars using new compiler, if not already explicitly
|
||||
# overridden by an env variable, preserving additional arguments.
|
||||
for cv in _COMPILER_CONFIG_VARS:
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
cv_split = _config_vars[cv].split()
|
||||
cv_split[0] = cc if cv != 'CXX' else cc + '++'
|
||||
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _remove_universal_flags(_config_vars):
|
||||
"""Remove all universal build arguments from config vars"""
|
||||
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
# Do not alter a config var explicitly overridden by env var
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
|
||||
flags = re.sub(r'-isysroot\s*\S+', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _remove_unsupported_archs(_config_vars):
|
||||
"""Remove any unsupported archs from config vars"""
|
||||
# Different Xcode releases support different sets for '-arch'
|
||||
# flags. In particular, Xcode 4.x no longer supports the
|
||||
# PPC architectures.
|
||||
#
|
||||
# This code automatically removes '-arch ppc' and '-arch ppc64'
|
||||
# when these are not supported. That makes it possible to
|
||||
# build extensions on OSX 10.7 and later with the prebuilt
|
||||
# 32-bit installer on the python.org website.
|
||||
|
||||
# skip checks if the compiler was overridden with a CC env variable
|
||||
if 'CC' in os.environ:
|
||||
return _config_vars
|
||||
|
||||
if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None:
|
||||
# NOTE: Cannot use subprocess here because of bootstrap
|
||||
# issues when building Python itself
|
||||
status = os.system(
|
||||
"""echo 'int main{};' | """
|
||||
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
|
||||
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
|
||||
if status:
|
||||
# The compile failed for some reason. Because of differences
|
||||
# across Xcode and compiler versions, there is no reliable way
|
||||
# to be sure why it failed. Assume here it was due to lack of
|
||||
# PPC support and remove the related '-arch' flags from each
|
||||
# config variables not explicitly overridden by an environment
|
||||
# variable. If the error was for some other reason, we hope the
|
||||
# failure will show up again when trying to compile an extension
|
||||
# module.
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _override_all_archs(_config_vars):
|
||||
"""Allow override of all archs with ARCHFLAGS env var"""
|
||||
# NOTE: This name was introduced by Apple in OSX 10.5 and
|
||||
# is used by several scripting languages distributed with
|
||||
# that OS release.
|
||||
if 'ARCHFLAGS' in os.environ:
|
||||
arch = os.environ['ARCHFLAGS']
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
if cv in _config_vars and '-arch' in _config_vars[cv]:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
|
||||
flags = flags + ' ' + arch
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _check_for_unavailable_sdk(_config_vars):
|
||||
"""Remove references to any SDKs not available"""
|
||||
# If we're on OSX 10.5 or later and the user tries to
|
||||
# compile an extension using an SDK that is not present
|
||||
# on the current machine it is better to not use an SDK
|
||||
# than to fail. This is particularly important with
|
||||
# the standalone Command Line Tools alternative to a
|
||||
# full-blown Xcode install since the CLT packages do not
|
||||
# provide SDKs. If the SDK is not present, it is assumed
|
||||
# that the header files and dev libs have been installed
|
||||
# to /usr and /System/Library by either a standalone CLT
|
||||
# package or the CLT component within Xcode.
|
||||
cflags = _config_vars.get('CFLAGS', '')
|
||||
m = re.search(r'-isysroot\s*(\S+)', cflags)
|
||||
if m is not None:
|
||||
sdk = m.group(1)
|
||||
if not os.path.exists(sdk):
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
# Do not alter a config var explicitly overridden by env var
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-isysroot\s*\S+(?:\s|$)', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def compiler_fixup(compiler_so, cc_args):
|
||||
"""
|
||||
This function will strip '-isysroot PATH' and '-arch ARCH' from the
|
||||
compile flags if the user has specified one them in extra_compile_flags.
|
||||
|
||||
This is needed because '-arch ARCH' adds another architecture to the
|
||||
build, without a way to remove an architecture. Furthermore GCC will
|
||||
barf if multiple '-isysroot' arguments are present.
|
||||
"""
|
||||
stripArch = stripSysroot = False
|
||||
|
||||
compiler_so = list(compiler_so)
|
||||
|
||||
if not _supports_universal_builds():
|
||||
# OSX before 10.4.0, these don't support -arch and -isysroot at
|
||||
# all.
|
||||
stripArch = stripSysroot = True
|
||||
else:
|
||||
stripArch = '-arch' in cc_args
|
||||
stripSysroot = any(arg for arg in cc_args if arg.startswith('-isysroot'))
|
||||
|
||||
if stripArch or 'ARCHFLAGS' in os.environ:
|
||||
while True:
|
||||
try:
|
||||
index = compiler_so.index('-arch')
|
||||
# Strip this argument and the next one:
|
||||
del compiler_so[index:index+2]
|
||||
except ValueError:
|
||||
break
|
||||
|
||||
elif not _supports_arm64_builds():
|
||||
# Look for "-arch arm64" and drop that
|
||||
for idx in reversed(range(len(compiler_so))):
|
||||
if compiler_so[idx] == '-arch' and compiler_so[idx+1] == "arm64":
|
||||
del compiler_so[idx:idx+2]
|
||||
|
||||
if 'ARCHFLAGS' in os.environ and not stripArch:
|
||||
# User specified different -arch flags in the environ,
|
||||
# see also distutils.sysconfig
|
||||
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
|
||||
|
||||
if stripSysroot:
|
||||
while True:
|
||||
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
|
||||
if not indices:
|
||||
break
|
||||
index = indices[0]
|
||||
if compiler_so[index] == '-isysroot':
|
||||
# Strip this argument and the next one:
|
||||
del compiler_so[index:index+2]
|
||||
else:
|
||||
# It's '-isysroot/some/path' in one arg
|
||||
del compiler_so[index:index+1]
|
||||
|
||||
# Check if the SDK that is used during compilation actually exists,
|
||||
# the universal build requires the usage of a universal SDK and not all
|
||||
# users have that installed by default.
|
||||
sysroot = None
|
||||
argvar = cc_args
|
||||
indices = [i for i,x in enumerate(cc_args) if x.startswith('-isysroot')]
|
||||
if not indices:
|
||||
argvar = compiler_so
|
||||
indices = [i for i,x in enumerate(compiler_so) if x.startswith('-isysroot')]
|
||||
|
||||
for idx in indices:
|
||||
if argvar[idx] == '-isysroot':
|
||||
sysroot = argvar[idx+1]
|
||||
break
|
||||
else:
|
||||
sysroot = argvar[idx][len('-isysroot'):]
|
||||
break
|
||||
|
||||
if sysroot and not os.path.isdir(sysroot):
|
||||
from distutils import log
|
||||
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
|
||||
sysroot)
|
||||
log.warn("Please check your Xcode installation")
|
||||
|
||||
return compiler_so
|
||||
|
||||
|
||||
def customize_config_vars(_config_vars):
|
||||
"""Customize Python build configuration variables.
|
||||
|
||||
Called internally from sysconfig with a mutable mapping
|
||||
containing name/value pairs parsed from the configured
|
||||
makefile used to build this interpreter. Returns
|
||||
the mapping updated as needed to reflect the environment
|
||||
in which the interpreter is running; in the case of
|
||||
a Python from a binary installer, the installed
|
||||
environment may be very different from the build
|
||||
environment, i.e. different OS levels, different
|
||||
built tools, different available CPU architectures.
|
||||
|
||||
This customization is performed whenever
|
||||
distutils.sysconfig.get_config_vars() is first
|
||||
called. It may be used in environments where no
|
||||
compilers are present, i.e. when installing pure
|
||||
Python dists. Customization of compiler paths
|
||||
and detection of unavailable archs is deferred
|
||||
until the first extension module build is
|
||||
requested (in distutils.sysconfig.customize_compiler).
|
||||
|
||||
Currently called from distutils.sysconfig
|
||||
"""
|
||||
|
||||
if not _supports_universal_builds():
|
||||
# On Mac OS X before 10.4, check if -arch and -isysroot
|
||||
# are in CFLAGS or LDFLAGS and remove them if they are.
|
||||
# This is needed when building extensions on a 10.3 system
|
||||
# using a universal build of python.
|
||||
_remove_universal_flags(_config_vars)
|
||||
|
||||
# Allow user to override all archs with ARCHFLAGS env var
|
||||
_override_all_archs(_config_vars)
|
||||
|
||||
# Remove references to sdks that are not found
|
||||
_check_for_unavailable_sdk(_config_vars)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def customize_compiler(_config_vars):
|
||||
"""Customize compiler path and configuration variables.
|
||||
|
||||
This customization is performed when the first
|
||||
extension module build is requested
|
||||
in distutils.sysconfig.customize_compiler.
|
||||
"""
|
||||
|
||||
# Find a compiler to use for extension module builds
|
||||
_find_appropriate_compiler(_config_vars)
|
||||
|
||||
# Remove ppc arch flags if not supported here
|
||||
_remove_unsupported_archs(_config_vars)
|
||||
|
||||
# Allow user to override all archs with ARCHFLAGS env var
|
||||
_override_all_archs(_config_vars)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def get_platform_osx(_config_vars, osname, release, machine):
|
||||
"""Filter values for get_platform()"""
|
||||
# called from get_platform() in sysconfig and distutils.util
|
||||
#
|
||||
# For our purposes, we'll assume that the system version from
|
||||
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
|
||||
# to. This makes the compatibility story a bit more sane because the
|
||||
# machine is going to compile and link as if it were
|
||||
# MACOSX_DEPLOYMENT_TARGET.
|
||||
|
||||
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
|
||||
macrelease = _get_system_version() or macver
|
||||
macver = macver or macrelease
|
||||
|
||||
if macver:
|
||||
release = macver
|
||||
osname = "macosx"
|
||||
|
||||
# Use the original CFLAGS value, if available, so that we
|
||||
# return the same machine type for the platform string.
|
||||
# Otherwise, distutils may consider this a cross-compiling
|
||||
# case and disallow installs.
|
||||
cflags = _config_vars.get(_INITPRE+'CFLAGS',
|
||||
_config_vars.get('CFLAGS', ''))
|
||||
if macrelease:
|
||||
try:
|
||||
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
|
||||
except ValueError:
|
||||
macrelease = (10, 0)
|
||||
else:
|
||||
# assume no universal support
|
||||
macrelease = (10, 0)
|
||||
|
||||
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
|
||||
# The universal build will build fat binaries, but not on
|
||||
# systems before 10.4
|
||||
|
||||
machine = 'fat'
|
||||
|
||||
archs = re.findall(r'-arch\s+(\S+)', cflags)
|
||||
archs = tuple(sorted(set(archs)))
|
||||
|
||||
if len(archs) == 1:
|
||||
machine = archs[0]
|
||||
elif archs == ('arm64', 'x86_64'):
|
||||
machine = 'universal2'
|
||||
elif archs == ('i386', 'ppc'):
|
||||
machine = 'fat'
|
||||
elif archs == ('i386', 'x86_64'):
|
||||
machine = 'intel'
|
||||
elif archs == ('i386', 'ppc', 'x86_64'):
|
||||
machine = 'fat3'
|
||||
elif archs == ('ppc64', 'x86_64'):
|
||||
machine = 'fat64'
|
||||
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
|
||||
machine = 'universal'
|
||||
else:
|
||||
raise ValueError(
|
||||
"Don't know machine value for archs=%r" % (archs,))
|
||||
|
||||
elif machine == 'i386':
|
||||
# On OSX the machine type returned by uname is always the
|
||||
# 32-bit variant, even if the executable architecture is
|
||||
# the 64-bit variant
|
||||
if sys.maxsize >= 2**32:
|
||||
machine = 'x86_64'
|
||||
|
||||
elif machine in ('PowerPC', 'Power_Macintosh'):
|
||||
# Pick a sane name for the PPC architecture.
|
||||
# See 'i386' case
|
||||
if sys.maxsize >= 2**32:
|
||||
machine = 'ppc64'
|
||||
else:
|
||||
machine = 'ppc'
|
||||
|
||||
return (osname, release, machine)
|
147
Tool/Python39/Lib/_py_abc.py
Normal file
147
Tool/Python39/Lib/_py_abc.py
Normal file
@ -0,0 +1,147 @@
|
||||
from _weakrefset import WeakSet
|
||||
|
||||
|
||||
def get_cache_token():
|
||||
"""Returns the current ABC cache token.
|
||||
|
||||
The token is an opaque object (supporting equality testing) identifying the
|
||||
current version of the ABC cache for virtual subclasses. The token changes
|
||||
with every call to ``register()`` on any ABC.
|
||||
"""
|
||||
return ABCMeta._abc_invalidation_counter
|
||||
|
||||
|
||||
class ABCMeta(type):
|
||||
"""Metaclass for defining Abstract Base Classes (ABCs).
|
||||
|
||||
Use this metaclass to create an ABC. An ABC can be subclassed
|
||||
directly, and then acts as a mix-in class. You can also register
|
||||
unrelated concrete classes (even built-in classes) and unrelated
|
||||
ABCs as 'virtual subclasses' -- these and their descendants will
|
||||
be considered subclasses of the registering ABC by the built-in
|
||||
issubclass() function, but the registering ABC won't show up in
|
||||
their MRO (Method Resolution Order) nor will method
|
||||
implementations defined by the registering ABC be callable (not
|
||||
even via super()).
|
||||
"""
|
||||
|
||||
# A global counter that is incremented each time a class is
|
||||
# registered as a virtual subclass of anything. It forces the
|
||||
# negative cache to be cleared before its next use.
|
||||
# Note: this counter is private. Use `abc.get_cache_token()` for
|
||||
# external code.
|
||||
_abc_invalidation_counter = 0
|
||||
|
||||
def __new__(mcls, name, bases, namespace, /, **kwargs):
|
||||
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
|
||||
# Compute set of abstract method names
|
||||
abstracts = {name
|
||||
for name, value in namespace.items()
|
||||
if getattr(value, "__isabstractmethod__", False)}
|
||||
for base in bases:
|
||||
for name in getattr(base, "__abstractmethods__", set()):
|
||||
value = getattr(cls, name, None)
|
||||
if getattr(value, "__isabstractmethod__", False):
|
||||
abstracts.add(name)
|
||||
cls.__abstractmethods__ = frozenset(abstracts)
|
||||
# Set up inheritance registry
|
||||
cls._abc_registry = WeakSet()
|
||||
cls._abc_cache = WeakSet()
|
||||
cls._abc_negative_cache = WeakSet()
|
||||
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||
return cls
|
||||
|
||||
def register(cls, subclass):
|
||||
"""Register a virtual subclass of an ABC.
|
||||
|
||||
Returns the subclass, to allow usage as a class decorator.
|
||||
"""
|
||||
if not isinstance(subclass, type):
|
||||
raise TypeError("Can only register classes")
|
||||
if issubclass(subclass, cls):
|
||||
return subclass # Already a subclass
|
||||
# Subtle: test for cycles *after* testing for "already a subclass";
|
||||
# this means we allow X.register(X) and interpret it as a no-op.
|
||||
if issubclass(cls, subclass):
|
||||
# This would create a cycle, which is bad for the algorithm below
|
||||
raise RuntimeError("Refusing to create an inheritance cycle")
|
||||
cls._abc_registry.add(subclass)
|
||||
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
|
||||
return subclass
|
||||
|
||||
def _dump_registry(cls, file=None):
|
||||
"""Debug helper to print the ABC registry."""
|
||||
print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file)
|
||||
print(f"Inv. counter: {get_cache_token()}", file=file)
|
||||
for name in cls.__dict__:
|
||||
if name.startswith("_abc_"):
|
||||
value = getattr(cls, name)
|
||||
if isinstance(value, WeakSet):
|
||||
value = set(value)
|
||||
print(f"{name}: {value!r}", file=file)
|
||||
|
||||
def _abc_registry_clear(cls):
|
||||
"""Clear the registry (for debugging or testing)."""
|
||||
cls._abc_registry.clear()
|
||||
|
||||
def _abc_caches_clear(cls):
|
||||
"""Clear the caches (for debugging or testing)."""
|
||||
cls._abc_cache.clear()
|
||||
cls._abc_negative_cache.clear()
|
||||
|
||||
def __instancecheck__(cls, instance):
|
||||
"""Override for isinstance(instance, cls)."""
|
||||
# Inline the cache checking
|
||||
subclass = instance.__class__
|
||||
if subclass in cls._abc_cache:
|
||||
return True
|
||||
subtype = type(instance)
|
||||
if subtype is subclass:
|
||||
if (cls._abc_negative_cache_version ==
|
||||
ABCMeta._abc_invalidation_counter and
|
||||
subclass in cls._abc_negative_cache):
|
||||
return False
|
||||
# Fall back to the subclass check.
|
||||
return cls.__subclasscheck__(subclass)
|
||||
return any(cls.__subclasscheck__(c) for c in (subclass, subtype))
|
||||
|
||||
def __subclasscheck__(cls, subclass):
|
||||
"""Override for issubclass(subclass, cls)."""
|
||||
if not isinstance(subclass, type):
|
||||
raise TypeError('issubclass() arg 1 must be a class')
|
||||
# Check cache
|
||||
if subclass in cls._abc_cache:
|
||||
return True
|
||||
# Check negative cache; may have to invalidate
|
||||
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
|
||||
# Invalidate the negative cache
|
||||
cls._abc_negative_cache = WeakSet()
|
||||
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||
elif subclass in cls._abc_negative_cache:
|
||||
return False
|
||||
# Check the subclass hook
|
||||
ok = cls.__subclasshook__(subclass)
|
||||
if ok is not NotImplemented:
|
||||
assert isinstance(ok, bool)
|
||||
if ok:
|
||||
cls._abc_cache.add(subclass)
|
||||
else:
|
||||
cls._abc_negative_cache.add(subclass)
|
||||
return ok
|
||||
# Check if it's a direct subclass
|
||||
if cls in getattr(subclass, '__mro__', ()):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# Check if it's a subclass of a registered class (recursive)
|
||||
for rcls in cls._abc_registry:
|
||||
if issubclass(subclass, rcls):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# Check if it's a subclass of a subclass (recursive)
|
||||
for scls in cls.__subclasses__():
|
||||
if issubclass(subclass, scls):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# No dice; update negative cache
|
||||
cls._abc_negative_cache.add(subclass)
|
||||
return False
|
6410
Tool/Python39/Lib/_pydecimal.py
Normal file
6410
Tool/Python39/Lib/_pydecimal.py
Normal file
File diff suppressed because it is too large
Load Diff
2692
Tool/Python39/Lib/_pyio.py
Normal file
2692
Tool/Python39/Lib/_pyio.py
Normal file
File diff suppressed because it is too large
Load Diff
103
Tool/Python39/Lib/_sitebuiltins.py
Normal file
103
Tool/Python39/Lib/_sitebuiltins.py
Normal file
@ -0,0 +1,103 @@
|
||||
"""
|
||||
The objects used by the site module to add custom builtins.
|
||||
"""
|
||||
|
||||
# Those objects are almost immortal and they keep a reference to their module
|
||||
# globals. Defining them in the site module would keep too many references
|
||||
# alive.
|
||||
# Note this means this module should also avoid keep things alive in its
|
||||
# globals.
|
||||
|
||||
import sys
|
||||
|
||||
class Quitter(object):
|
||||
def __init__(self, name, eof):
|
||||
self.name = name
|
||||
self.eof = eof
|
||||
def __repr__(self):
|
||||
return 'Use %s() or %s to exit' % (self.name, self.eof)
|
||||
def __call__(self, code=None):
|
||||
# Shells like IDLE catch the SystemExit, but listen when their
|
||||
# stdin wrapper is closed.
|
||||
try:
|
||||
sys.stdin.close()
|
||||
except:
|
||||
pass
|
||||
raise SystemExit(code)
|
||||
|
||||
|
||||
class _Printer(object):
|
||||
"""interactive prompt objects for printing the license text, a list of
|
||||
contributors and the copyright notice."""
|
||||
|
||||
MAXLINES = 23
|
||||
|
||||
def __init__(self, name, data, files=(), dirs=()):
|
||||
import os
|
||||
self.__name = name
|
||||
self.__data = data
|
||||
self.__lines = None
|
||||
self.__filenames = [os.path.join(dir, filename)
|
||||
for dir in dirs
|
||||
for filename in files]
|
||||
|
||||
def __setup(self):
|
||||
if self.__lines:
|
||||
return
|
||||
data = None
|
||||
for filename in self.__filenames:
|
||||
try:
|
||||
with open(filename, "r") as fp:
|
||||
data = fp.read()
|
||||
break
|
||||
except OSError:
|
||||
pass
|
||||
if not data:
|
||||
data = self.__data
|
||||
self.__lines = data.split('\n')
|
||||
self.__linecnt = len(self.__lines)
|
||||
|
||||
def __repr__(self):
|
||||
self.__setup()
|
||||
if len(self.__lines) <= self.MAXLINES:
|
||||
return "\n".join(self.__lines)
|
||||
else:
|
||||
return "Type %s() to see the full %s text" % ((self.__name,)*2)
|
||||
|
||||
def __call__(self):
|
||||
self.__setup()
|
||||
prompt = 'Hit Return for more, or q (and Return) to quit: '
|
||||
lineno = 0
|
||||
while 1:
|
||||
try:
|
||||
for i in range(lineno, lineno + self.MAXLINES):
|
||||
print(self.__lines[i])
|
||||
except IndexError:
|
||||
break
|
||||
else:
|
||||
lineno += self.MAXLINES
|
||||
key = None
|
||||
while key is None:
|
||||
key = input(prompt)
|
||||
if key not in ('', 'q'):
|
||||
key = None
|
||||
if key == 'q':
|
||||
break
|
||||
|
||||
|
||||
class _Helper(object):
|
||||
"""Define the builtin 'help'.
|
||||
|
||||
This is a wrapper around pydoc.help that provides a helpful message
|
||||
when 'help' is typed at the Python interactive prompt.
|
||||
|
||||
Calling help() at the Python prompt starts an interactive help session.
|
||||
Calling help(thing) prints help for the python object 'thing'.
|
||||
"""
|
||||
|
||||
def __repr__(self):
|
||||
return "Type help() for interactive help, " \
|
||||
"or help(object) for help about object."
|
||||
def __call__(self, *args, **kwds):
|
||||
import pydoc
|
||||
return pydoc.help(*args, **kwds)
|
579
Tool/Python39/Lib/_strptime.py
Normal file
579
Tool/Python39/Lib/_strptime.py
Normal file
@ -0,0 +1,579 @@
|
||||
"""Strptime-related classes and functions.
|
||||
|
||||
CLASSES:
|
||||
LocaleTime -- Discovers and stores locale-specific time information
|
||||
TimeRE -- Creates regexes for pattern matching a string of text containing
|
||||
time information
|
||||
|
||||
FUNCTIONS:
|
||||
_getlang -- Figure out what language is being used for the locale
|
||||
strptime -- Calculates the time struct represented by the passed-in string
|
||||
|
||||
"""
|
||||
import time
|
||||
import locale
|
||||
import calendar
|
||||
from re import compile as re_compile
|
||||
from re import IGNORECASE
|
||||
from re import escape as re_escape
|
||||
from datetime import (date as datetime_date,
|
||||
timedelta as datetime_timedelta,
|
||||
timezone as datetime_timezone)
|
||||
from _thread import allocate_lock as _thread_allocate_lock
|
||||
|
||||
__all__ = []
|
||||
|
||||
def _getlang():
|
||||
# Figure out what the current language is set to.
|
||||
return locale.getlocale(locale.LC_TIME)
|
||||
|
||||
class LocaleTime(object):
|
||||
"""Stores and handles locale-specific information related to time.
|
||||
|
||||
ATTRIBUTES:
|
||||
f_weekday -- full weekday names (7-item list)
|
||||
a_weekday -- abbreviated weekday names (7-item list)
|
||||
f_month -- full month names (13-item list; dummy value in [0], which
|
||||
is added by code)
|
||||
a_month -- abbreviated month names (13-item list, dummy value in
|
||||
[0], which is added by code)
|
||||
am_pm -- AM/PM representation (2-item list)
|
||||
LC_date_time -- format string for date/time representation (string)
|
||||
LC_date -- format string for date representation (string)
|
||||
LC_time -- format string for time representation (string)
|
||||
timezone -- daylight- and non-daylight-savings timezone representation
|
||||
(2-item list of sets)
|
||||
lang -- Language used by instance (2-item tuple)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Set all attributes.
|
||||
|
||||
Order of methods called matters for dependency reasons.
|
||||
|
||||
The locale language is set at the offset and then checked again before
|
||||
exiting. This is to make sure that the attributes were not set with a
|
||||
mix of information from more than one locale. This would most likely
|
||||
happen when using threads where one thread calls a locale-dependent
|
||||
function while another thread changes the locale while the function in
|
||||
the other thread is still running. Proper coding would call for
|
||||
locks to prevent changing the locale while locale-dependent code is
|
||||
running. The check here is done in case someone does not think about
|
||||
doing this.
|
||||
|
||||
Only other possible issue is if someone changed the timezone and did
|
||||
not call tz.tzset . That is an issue for the programmer, though,
|
||||
since changing the timezone is worthless without that call.
|
||||
|
||||
"""
|
||||
self.lang = _getlang()
|
||||
self.__calc_weekday()
|
||||
self.__calc_month()
|
||||
self.__calc_am_pm()
|
||||
self.__calc_timezone()
|
||||
self.__calc_date_time()
|
||||
if _getlang() != self.lang:
|
||||
raise ValueError("locale changed during initialization")
|
||||
if time.tzname != self.tzname or time.daylight != self.daylight:
|
||||
raise ValueError("timezone changed during initialization")
|
||||
|
||||
def __calc_weekday(self):
|
||||
# Set self.a_weekday and self.f_weekday using the calendar
|
||||
# module.
|
||||
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
|
||||
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
|
||||
self.a_weekday = a_weekday
|
||||
self.f_weekday = f_weekday
|
||||
|
||||
def __calc_month(self):
|
||||
# Set self.f_month and self.a_month using the calendar module.
|
||||
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
|
||||
f_month = [calendar.month_name[i].lower() for i in range(13)]
|
||||
self.a_month = a_month
|
||||
self.f_month = f_month
|
||||
|
||||
def __calc_am_pm(self):
|
||||
# Set self.am_pm by using time.strftime().
|
||||
|
||||
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
|
||||
# magical; just happened to have used it everywhere else where a
|
||||
# static date was needed.
|
||||
am_pm = []
|
||||
for hour in (1, 22):
|
||||
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
|
||||
am_pm.append(time.strftime("%p", time_tuple).lower())
|
||||
self.am_pm = am_pm
|
||||
|
||||
def __calc_date_time(self):
|
||||
# Set self.date_time, self.date, & self.time by using
|
||||
# time.strftime().
|
||||
|
||||
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
|
||||
# overloaded numbers is minimized. The order in which searches for
|
||||
# values within the format string is very important; it eliminates
|
||||
# possible ambiguity for what something represents.
|
||||
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
|
||||
date_time = [None, None, None]
|
||||
date_time[0] = time.strftime("%c", time_tuple).lower()
|
||||
date_time[1] = time.strftime("%x", time_tuple).lower()
|
||||
date_time[2] = time.strftime("%X", time_tuple).lower()
|
||||
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
|
||||
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
|
||||
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
|
||||
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
|
||||
('44', '%M'), ('55', '%S'), ('76', '%j'),
|
||||
('17', '%d'), ('03', '%m'), ('3', '%m'),
|
||||
# '3' needed for when no leading zero.
|
||||
('2', '%w'), ('10', '%I')]
|
||||
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
|
||||
for tz in tz_values])
|
||||
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
|
||||
current_format = date_time[offset]
|
||||
for old, new in replacement_pairs:
|
||||
# Must deal with possible lack of locale info
|
||||
# manifesting itself as the empty string (e.g., Swedish's
|
||||
# lack of AM/PM info) or a platform returning a tuple of empty
|
||||
# strings (e.g., MacOS 9 having timezone as ('','')).
|
||||
if old:
|
||||
current_format = current_format.replace(old, new)
|
||||
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
|
||||
# 2005-01-03 occurs before the first Monday of the year. Otherwise
|
||||
# %U is used.
|
||||
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
|
||||
if '00' in time.strftime(directive, time_tuple):
|
||||
U_W = '%W'
|
||||
else:
|
||||
U_W = '%U'
|
||||
date_time[offset] = current_format.replace('11', U_W)
|
||||
self.LC_date_time = date_time[0]
|
||||
self.LC_date = date_time[1]
|
||||
self.LC_time = date_time[2]
|
||||
|
||||
def __calc_timezone(self):
|
||||
# Set self.timezone by using time.tzname.
|
||||
# Do not worry about possibility of time.tzname[0] == time.tzname[1]
|
||||
# and time.daylight; handle that in strptime.
|
||||
try:
|
||||
time.tzset()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.tzname = time.tzname
|
||||
self.daylight = time.daylight
|
||||
no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()})
|
||||
if self.daylight:
|
||||
has_saving = frozenset({self.tzname[1].lower()})
|
||||
else:
|
||||
has_saving = frozenset()
|
||||
self.timezone = (no_saving, has_saving)
|
||||
|
||||
|
||||
class TimeRE(dict):
|
||||
"""Handle conversion from format directives to regexes."""
|
||||
|
||||
def __init__(self, locale_time=None):
|
||||
"""Create keys/values.
|
||||
|
||||
Order of execution is important for dependency reasons.
|
||||
|
||||
"""
|
||||
if locale_time:
|
||||
self.locale_time = locale_time
|
||||
else:
|
||||
self.locale_time = LocaleTime()
|
||||
base = super()
|
||||
base.__init__({
|
||||
# The " [1-9]" part of the regex is to make %c from ANSI C work
|
||||
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
|
||||
'f': r"(?P<f>[0-9]{1,6})",
|
||||
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
|
||||
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
|
||||
'G': r"(?P<G>\d\d\d\d)",
|
||||
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
|
||||
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
|
||||
'M': r"(?P<M>[0-5]\d|\d)",
|
||||
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
|
||||
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
|
||||
'w': r"(?P<w>[0-6])",
|
||||
'u': r"(?P<u>[1-7])",
|
||||
'V': r"(?P<V>5[0-3]|0[1-9]|[1-4]\d|\d)",
|
||||
# W is set below by using 'U'
|
||||
'y': r"(?P<y>\d\d)",
|
||||
#XXX: Does 'Y' need to worry about having less or more than
|
||||
# 4 digits?
|
||||
'Y': r"(?P<Y>\d\d\d\d)",
|
||||
'z': r"(?P<z>[+-]\d\d:?[0-5]\d(:?[0-5]\d(\.\d{1,6})?)?|(?-i:Z))",
|
||||
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
|
||||
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
|
||||
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
|
||||
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
|
||||
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
|
||||
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
|
||||
for tz in tz_names),
|
||||
'Z'),
|
||||
'%': '%'})
|
||||
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
|
||||
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
|
||||
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
|
||||
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
|
||||
|
||||
def __seqToRE(self, to_convert, directive):
|
||||
"""Convert a list to a regex string for matching a directive.
|
||||
|
||||
Want possible matching values to be from longest to shortest. This
|
||||
prevents the possibility of a match occurring for a value that also
|
||||
a substring of a larger value that should have matched (e.g., 'abc'
|
||||
matching when 'abcdef' should have been the match).
|
||||
|
||||
"""
|
||||
to_convert = sorted(to_convert, key=len, reverse=True)
|
||||
for value in to_convert:
|
||||
if value != '':
|
||||
break
|
||||
else:
|
||||
return ''
|
||||
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
|
||||
regex = '(?P<%s>%s' % (directive, regex)
|
||||
return '%s)' % regex
|
||||
|
||||
def pattern(self, format):
|
||||
"""Return regex pattern for the format string.
|
||||
|
||||
Need to make sure that any characters that might be interpreted as
|
||||
regex syntax are escaped.
|
||||
|
||||
"""
|
||||
processed_format = ''
|
||||
# The sub() call escapes all characters that might be misconstrued
|
||||
# as regex syntax. Cannot use re.escape since we have to deal with
|
||||
# format directives (%m, etc.).
|
||||
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
|
||||
format = regex_chars.sub(r"\\\1", format)
|
||||
whitespace_replacement = re_compile(r'\s+')
|
||||
format = whitespace_replacement.sub(r'\\s+', format)
|
||||
while '%' in format:
|
||||
directive_index = format.index('%')+1
|
||||
processed_format = "%s%s%s" % (processed_format,
|
||||
format[:directive_index-1],
|
||||
self[format[directive_index]])
|
||||
format = format[directive_index+1:]
|
||||
return "%s%s" % (processed_format, format)
|
||||
|
||||
def compile(self, format):
|
||||
"""Return a compiled re object for the format string."""
|
||||
return re_compile(self.pattern(format), IGNORECASE)
|
||||
|
||||
_cache_lock = _thread_allocate_lock()
|
||||
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
|
||||
# first!
|
||||
_TimeRE_cache = TimeRE()
|
||||
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
|
||||
_regex_cache = {}
|
||||
|
||||
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
|
||||
"""Calculate the Julian day based on the year, week of the year, and day of
|
||||
the week, with week_start_day representing whether the week of the year
|
||||
assumes the week starts on Sunday or Monday (6 or 0)."""
|
||||
first_weekday = datetime_date(year, 1, 1).weekday()
|
||||
# If we are dealing with the %U directive (week starts on Sunday), it's
|
||||
# easier to just shift the view to Sunday being the first day of the
|
||||
# week.
|
||||
if not week_starts_Mon:
|
||||
first_weekday = (first_weekday + 1) % 7
|
||||
day_of_week = (day_of_week + 1) % 7
|
||||
# Need to watch out for a week 0 (when the first day of the year is not
|
||||
# the same as that specified by %U or %W).
|
||||
week_0_length = (7 - first_weekday) % 7
|
||||
if week_of_year == 0:
|
||||
return 1 + day_of_week - first_weekday
|
||||
else:
|
||||
days_to_week = week_0_length + (7 * (week_of_year - 1))
|
||||
return 1 + days_to_week + day_of_week
|
||||
|
||||
|
||||
def _calc_julian_from_V(iso_year, iso_week, iso_weekday):
|
||||
"""Calculate the Julian day based on the ISO 8601 year, week, and weekday.
|
||||
ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
|
||||
ISO week days range from 1 (Monday) to 7 (Sunday).
|
||||
"""
|
||||
correction = datetime_date(iso_year, 1, 4).isoweekday() + 3
|
||||
ordinal = (iso_week * 7) + iso_weekday - correction
|
||||
# ordinal may be negative or 0 now, which means the date is in the previous
|
||||
# calendar year
|
||||
if ordinal < 1:
|
||||
ordinal += datetime_date(iso_year, 1, 1).toordinal()
|
||||
iso_year -= 1
|
||||
ordinal -= datetime_date(iso_year, 1, 1).toordinal()
|
||||
return iso_year, ordinal
|
||||
|
||||
|
||||
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
"""Return a 2-tuple consisting of a time struct and an int containing
|
||||
the number of microseconds based on the input string and the
|
||||
format string."""
|
||||
|
||||
for index, arg in enumerate([data_string, format]):
|
||||
if not isinstance(arg, str):
|
||||
msg = "strptime() argument {} must be str, not {}"
|
||||
raise TypeError(msg.format(index, type(arg)))
|
||||
|
||||
global _TimeRE_cache, _regex_cache
|
||||
with _cache_lock:
|
||||
locale_time = _TimeRE_cache.locale_time
|
||||
if (_getlang() != locale_time.lang or
|
||||
time.tzname != locale_time.tzname or
|
||||
time.daylight != locale_time.daylight):
|
||||
_TimeRE_cache = TimeRE()
|
||||
_regex_cache.clear()
|
||||
locale_time = _TimeRE_cache.locale_time
|
||||
if len(_regex_cache) > _CACHE_MAX_SIZE:
|
||||
_regex_cache.clear()
|
||||
format_regex = _regex_cache.get(format)
|
||||
if not format_regex:
|
||||
try:
|
||||
format_regex = _TimeRE_cache.compile(format)
|
||||
# KeyError raised when a bad format is found; can be specified as
|
||||
# \\, in which case it was a stray % but with a space after it
|
||||
except KeyError as err:
|
||||
bad_directive = err.args[0]
|
||||
if bad_directive == "\\":
|
||||
bad_directive = "%"
|
||||
del err
|
||||
raise ValueError("'%s' is a bad directive in format '%s'" %
|
||||
(bad_directive, format)) from None
|
||||
# IndexError only occurs when the format string is "%"
|
||||
except IndexError:
|
||||
raise ValueError("stray %% in format '%s'" % format) from None
|
||||
_regex_cache[format] = format_regex
|
||||
found = format_regex.match(data_string)
|
||||
if not found:
|
||||
raise ValueError("time data %r does not match format %r" %
|
||||
(data_string, format))
|
||||
if len(data_string) != found.end():
|
||||
raise ValueError("unconverted data remains: %s" %
|
||||
data_string[found.end():])
|
||||
|
||||
iso_year = year = None
|
||||
month = day = 1
|
||||
hour = minute = second = fraction = 0
|
||||
tz = -1
|
||||
gmtoff = None
|
||||
gmtoff_fraction = 0
|
||||
# Default to -1 to signify that values not known; not critical to have,
|
||||
# though
|
||||
iso_week = week_of_year = None
|
||||
week_of_year_start = None
|
||||
# weekday and julian defaulted to None so as to signal need to calculate
|
||||
# values
|
||||
weekday = julian = None
|
||||
found_dict = found.groupdict()
|
||||
for group_key in found_dict.keys():
|
||||
# Directives not explicitly handled below:
|
||||
# c, x, X
|
||||
# handled by making out of other directives
|
||||
# U, W
|
||||
# worthless without day of the week
|
||||
if group_key == 'y':
|
||||
year = int(found_dict['y'])
|
||||
# Open Group specification for strptime() states that a %y
|
||||
#value in the range of [00, 68] is in the century 2000, while
|
||||
#[69,99] is in the century 1900
|
||||
if year <= 68:
|
||||
year += 2000
|
||||
else:
|
||||
year += 1900
|
||||
elif group_key == 'Y':
|
||||
year = int(found_dict['Y'])
|
||||
elif group_key == 'G':
|
||||
iso_year = int(found_dict['G'])
|
||||
elif group_key == 'm':
|
||||
month = int(found_dict['m'])
|
||||
elif group_key == 'B':
|
||||
month = locale_time.f_month.index(found_dict['B'].lower())
|
||||
elif group_key == 'b':
|
||||
month = locale_time.a_month.index(found_dict['b'].lower())
|
||||
elif group_key == 'd':
|
||||
day = int(found_dict['d'])
|
||||
elif group_key == 'H':
|
||||
hour = int(found_dict['H'])
|
||||
elif group_key == 'I':
|
||||
hour = int(found_dict['I'])
|
||||
ampm = found_dict.get('p', '').lower()
|
||||
# If there was no AM/PM indicator, we'll treat this like AM
|
||||
if ampm in ('', locale_time.am_pm[0]):
|
||||
# We're in AM so the hour is correct unless we're
|
||||
# looking at 12 midnight.
|
||||
# 12 midnight == 12 AM == hour 0
|
||||
if hour == 12:
|
||||
hour = 0
|
||||
elif ampm == locale_time.am_pm[1]:
|
||||
# We're in PM so we need to add 12 to the hour unless
|
||||
# we're looking at 12 noon.
|
||||
# 12 noon == 12 PM == hour 12
|
||||
if hour != 12:
|
||||
hour += 12
|
||||
elif group_key == 'M':
|
||||
minute = int(found_dict['M'])
|
||||
elif group_key == 'S':
|
||||
second = int(found_dict['S'])
|
||||
elif group_key == 'f':
|
||||
s = found_dict['f']
|
||||
# Pad to always return microseconds.
|
||||
s += "0" * (6 - len(s))
|
||||
fraction = int(s)
|
||||
elif group_key == 'A':
|
||||
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
|
||||
elif group_key == 'a':
|
||||
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
|
||||
elif group_key == 'w':
|
||||
weekday = int(found_dict['w'])
|
||||
if weekday == 0:
|
||||
weekday = 6
|
||||
else:
|
||||
weekday -= 1
|
||||
elif group_key == 'u':
|
||||
weekday = int(found_dict['u'])
|
||||
weekday -= 1
|
||||
elif group_key == 'j':
|
||||
julian = int(found_dict['j'])
|
||||
elif group_key in ('U', 'W'):
|
||||
week_of_year = int(found_dict[group_key])
|
||||
if group_key == 'U':
|
||||
# U starts week on Sunday.
|
||||
week_of_year_start = 6
|
||||
else:
|
||||
# W starts week on Monday.
|
||||
week_of_year_start = 0
|
||||
elif group_key == 'V':
|
||||
iso_week = int(found_dict['V'])
|
||||
elif group_key == 'z':
|
||||
z = found_dict['z']
|
||||
if z == 'Z':
|
||||
gmtoff = 0
|
||||
else:
|
||||
if z[3] == ':':
|
||||
z = z[:3] + z[4:]
|
||||
if len(z) > 5:
|
||||
if z[5] != ':':
|
||||
msg = f"Inconsistent use of : in {found_dict['z']}"
|
||||
raise ValueError(msg)
|
||||
z = z[:5] + z[6:]
|
||||
hours = int(z[1:3])
|
||||
minutes = int(z[3:5])
|
||||
seconds = int(z[5:7] or 0)
|
||||
gmtoff = (hours * 60 * 60) + (minutes * 60) + seconds
|
||||
gmtoff_remainder = z[8:]
|
||||
# Pad to always return microseconds.
|
||||
gmtoff_remainder_padding = "0" * (6 - len(gmtoff_remainder))
|
||||
gmtoff_fraction = int(gmtoff_remainder + gmtoff_remainder_padding)
|
||||
if z.startswith("-"):
|
||||
gmtoff = -gmtoff
|
||||
gmtoff_fraction = -gmtoff_fraction
|
||||
elif group_key == 'Z':
|
||||
# Since -1 is default value only need to worry about setting tz if
|
||||
# it can be something other than -1.
|
||||
found_zone = found_dict['Z'].lower()
|
||||
for value, tz_values in enumerate(locale_time.timezone):
|
||||
if found_zone in tz_values:
|
||||
# Deal with bad locale setup where timezone names are the
|
||||
# same and yet time.daylight is true; too ambiguous to
|
||||
# be able to tell what timezone has daylight savings
|
||||
if (time.tzname[0] == time.tzname[1] and
|
||||
time.daylight and found_zone not in ("utc", "gmt")):
|
||||
break
|
||||
else:
|
||||
tz = value
|
||||
break
|
||||
# Deal with the cases where ambiguities arize
|
||||
# don't assume default values for ISO week/year
|
||||
if year is None and iso_year is not None:
|
||||
if iso_week is None or weekday is None:
|
||||
raise ValueError("ISO year directive '%G' must be used with "
|
||||
"the ISO week directive '%V' and a weekday "
|
||||
"directive ('%A', '%a', '%w', or '%u').")
|
||||
if julian is not None:
|
||||
raise ValueError("Day of the year directive '%j' is not "
|
||||
"compatible with ISO year directive '%G'. "
|
||||
"Use '%Y' instead.")
|
||||
elif week_of_year is None and iso_week is not None:
|
||||
if weekday is None:
|
||||
raise ValueError("ISO week directive '%V' must be used with "
|
||||
"the ISO year directive '%G' and a weekday "
|
||||
"directive ('%A', '%a', '%w', or '%u').")
|
||||
else:
|
||||
raise ValueError("ISO week directive '%V' is incompatible with "
|
||||
"the year directive '%Y'. Use the ISO year '%G' "
|
||||
"instead.")
|
||||
|
||||
leap_year_fix = False
|
||||
if year is None and month == 2 and day == 29:
|
||||
year = 1904 # 1904 is first leap year of 20th century
|
||||
leap_year_fix = True
|
||||
elif year is None:
|
||||
year = 1900
|
||||
|
||||
|
||||
# If we know the week of the year and what day of that week, we can figure
|
||||
# out the Julian day of the year.
|
||||
if julian is None and weekday is not None:
|
||||
if week_of_year is not None:
|
||||
week_starts_Mon = True if week_of_year_start == 0 else False
|
||||
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
|
||||
week_starts_Mon)
|
||||
elif iso_year is not None and iso_week is not None:
|
||||
year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1)
|
||||
if julian is not None and julian <= 0:
|
||||
year -= 1
|
||||
yday = 366 if calendar.isleap(year) else 365
|
||||
julian += yday
|
||||
|
||||
if julian is None:
|
||||
# Cannot pre-calculate datetime_date() since can change in Julian
|
||||
# calculation and thus could have different value for the day of
|
||||
# the week calculation.
|
||||
# Need to add 1 to result since first day of the year is 1, not 0.
|
||||
julian = datetime_date(year, month, day).toordinal() - \
|
||||
datetime_date(year, 1, 1).toordinal() + 1
|
||||
else: # Assume that if they bothered to include Julian day (or if it was
|
||||
# calculated above with year/week/weekday) it will be accurate.
|
||||
datetime_result = datetime_date.fromordinal(
|
||||
(julian - 1) +
|
||||
datetime_date(year, 1, 1).toordinal())
|
||||
year = datetime_result.year
|
||||
month = datetime_result.month
|
||||
day = datetime_result.day
|
||||
if weekday is None:
|
||||
weekday = datetime_date(year, month, day).weekday()
|
||||
# Add timezone info
|
||||
tzname = found_dict.get("Z")
|
||||
|
||||
if leap_year_fix:
|
||||
# the caller didn't supply a year but asked for Feb 29th. We couldn't
|
||||
# use the default of 1900 for computations. We set it back to ensure
|
||||
# that February 29th is smaller than March 1st.
|
||||
year = 1900
|
||||
|
||||
return (year, month, day,
|
||||
hour, minute, second,
|
||||
weekday, julian, tz, tzname, gmtoff), fraction, gmtoff_fraction
|
||||
|
||||
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
"""Return a time struct based on the input string and the
|
||||
format string."""
|
||||
tt = _strptime(data_string, format)[0]
|
||||
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
|
||||
|
||||
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
"""Return a class cls instance based on the input string and the
|
||||
format string."""
|
||||
tt, fraction, gmtoff_fraction = _strptime(data_string, format)
|
||||
tzname, gmtoff = tt[-2:]
|
||||
args = tt[:6] + (fraction,)
|
||||
if gmtoff is not None:
|
||||
tzdelta = datetime_timedelta(seconds=gmtoff, microseconds=gmtoff_fraction)
|
||||
if tzname:
|
||||
tz = datetime_timezone(tzdelta, tzname)
|
||||
else:
|
||||
tz = datetime_timezone(tzdelta)
|
||||
args += (tz,)
|
||||
|
||||
return cls(*args)
|
242
Tool/Python39/Lib/_threading_local.py
Normal file
242
Tool/Python39/Lib/_threading_local.py
Normal file
@ -0,0 +1,242 @@
|
||||
"""Thread-local objects.
|
||||
|
||||
(Note that this module provides a Python version of the threading.local
|
||||
class. Depending on the version of Python you're using, there may be a
|
||||
faster one available. You should always import the `local` class from
|
||||
`threading`.)
|
||||
|
||||
Thread-local objects support the management of thread-local data.
|
||||
If you have data that you want to be local to a thread, simply create
|
||||
a thread-local object and use its attributes:
|
||||
|
||||
>>> mydata = local()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
You can also access the local-object's dictionary:
|
||||
|
||||
>>> mydata.__dict__
|
||||
{'number': 42}
|
||||
>>> mydata.__dict__.setdefault('widgets', [])
|
||||
[]
|
||||
>>> mydata.widgets
|
||||
[]
|
||||
|
||||
What's important about thread-local objects is that their data are
|
||||
local to a thread. If we access the data in a different thread:
|
||||
|
||||
>>> log = []
|
||||
>>> def f():
|
||||
... items = sorted(mydata.__dict__.items())
|
||||
... log.append(items)
|
||||
... mydata.number = 11
|
||||
... log.append(mydata.number)
|
||||
|
||||
>>> import threading
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
>>> log
|
||||
[[], 11]
|
||||
|
||||
we get different data. Furthermore, changes made in the other thread
|
||||
don't affect data seen in this thread:
|
||||
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
Of course, values you get from a local object, including a __dict__
|
||||
attribute, are for whatever thread was current at the time the
|
||||
attribute was read. For that reason, you generally don't want to save
|
||||
these values across threads, as they apply only to the thread they
|
||||
came from.
|
||||
|
||||
You can create custom local objects by subclassing the local class:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... number = 2
|
||||
... def __init__(self, /, **kw):
|
||||
... self.__dict__.update(kw)
|
||||
... def squared(self):
|
||||
... return self.number ** 2
|
||||
|
||||
This can be useful to support default values, methods and
|
||||
initialization. Note that if you define an __init__ method, it will be
|
||||
called each time the local object is used in a separate thread. This
|
||||
is necessary to initialize each thread's dictionary.
|
||||
|
||||
Now if we create a local object:
|
||||
|
||||
>>> mydata = MyLocal(color='red')
|
||||
|
||||
Now we have a default number:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
|
||||
an initial color:
|
||||
|
||||
>>> mydata.color
|
||||
'red'
|
||||
>>> del mydata.color
|
||||
|
||||
And a method that operates on the data:
|
||||
|
||||
>>> mydata.squared()
|
||||
4
|
||||
|
||||
As before, we can access the data in a separate thread:
|
||||
|
||||
>>> log = []
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
>>> log
|
||||
[[('color', 'red')], 11]
|
||||
|
||||
without affecting this thread's data:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
>>> mydata.color
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AttributeError: 'MyLocal' object has no attribute 'color'
|
||||
|
||||
Note that subclasses can define slots, but they are not thread
|
||||
local. They are shared across threads:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... __slots__ = 'number'
|
||||
|
||||
>>> mydata = MyLocal()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.color = 'red'
|
||||
|
||||
So, the separate thread:
|
||||
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
|
||||
affects what we see:
|
||||
|
||||
>>> mydata.number
|
||||
11
|
||||
|
||||
>>> del mydata
|
||||
"""
|
||||
|
||||
from weakref import ref
|
||||
from contextlib import contextmanager
|
||||
|
||||
__all__ = ["local"]
|
||||
|
||||
# We need to use objects from the threading module, but the threading
|
||||
# module may also want to use our `local` class, if support for locals
|
||||
# isn't compiled in to the `thread` module. This creates potential problems
|
||||
# with circular imports. For that reason, we don't import `threading`
|
||||
# until the bottom of this file (a hack sufficient to worm around the
|
||||
# potential problems). Note that all platforms on CPython do have support
|
||||
# for locals in the `thread` module, and there is no circular import problem
|
||||
# then, so problems introduced by fiddling the order of imports here won't
|
||||
# manifest.
|
||||
|
||||
class _localimpl:
|
||||
"""A class managing thread-local dicts"""
|
||||
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
|
||||
|
||||
def __init__(self):
|
||||
# The key used in the Thread objects' attribute dicts.
|
||||
# We keep it a string for speed but make it unlikely to clash with
|
||||
# a "real" attribute.
|
||||
self.key = '_threading_local._localimpl.' + str(id(self))
|
||||
# { id(Thread) -> (ref(Thread), thread-local dict) }
|
||||
self.dicts = {}
|
||||
|
||||
def get_dict(self):
|
||||
"""Return the dict for the current thread. Raises KeyError if none
|
||||
defined."""
|
||||
thread = current_thread()
|
||||
return self.dicts[id(thread)][1]
|
||||
|
||||
def create_dict(self):
|
||||
"""Create a new dict for the current thread, and return it."""
|
||||
localdict = {}
|
||||
key = self.key
|
||||
thread = current_thread()
|
||||
idt = id(thread)
|
||||
def local_deleted(_, key=key):
|
||||
# When the localimpl is deleted, remove the thread attribute.
|
||||
thread = wrthread()
|
||||
if thread is not None:
|
||||
del thread.__dict__[key]
|
||||
def thread_deleted(_, idt=idt):
|
||||
# When the thread is deleted, remove the local dict.
|
||||
# Note that this is suboptimal if the thread object gets
|
||||
# caught in a reference loop. We would like to be called
|
||||
# as soon as the OS-level thread ends instead.
|
||||
local = wrlocal()
|
||||
if local is not None:
|
||||
dct = local.dicts.pop(idt)
|
||||
wrlocal = ref(self, local_deleted)
|
||||
wrthread = ref(thread, thread_deleted)
|
||||
thread.__dict__[key] = wrlocal
|
||||
self.dicts[idt] = wrthread, localdict
|
||||
return localdict
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _patch(self):
|
||||
impl = object.__getattribute__(self, '_local__impl')
|
||||
try:
|
||||
dct = impl.get_dict()
|
||||
except KeyError:
|
||||
dct = impl.create_dict()
|
||||
args, kw = impl.localargs
|
||||
self.__init__(*args, **kw)
|
||||
with impl.locallock:
|
||||
object.__setattr__(self, '__dict__', dct)
|
||||
yield
|
||||
|
||||
|
||||
class local:
|
||||
__slots__ = '_local__impl', '__dict__'
|
||||
|
||||
def __new__(cls, /, *args, **kw):
|
||||
if (args or kw) and (cls.__init__ is object.__init__):
|
||||
raise TypeError("Initialization arguments are not supported")
|
||||
self = object.__new__(cls)
|
||||
impl = _localimpl()
|
||||
impl.localargs = (args, kw)
|
||||
impl.locallock = RLock()
|
||||
object.__setattr__(self, '_local__impl', impl)
|
||||
# We need to create the thread dict in anticipation of
|
||||
# __init__ being called, to make sure we don't call it
|
||||
# again ourselves.
|
||||
impl.create_dict()
|
||||
return self
|
||||
|
||||
def __getattribute__(self, name):
|
||||
with _patch(self):
|
||||
return object.__getattribute__(self, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
with _patch(self):
|
||||
return object.__setattr__(self, name, value)
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
with _patch(self):
|
||||
return object.__delattr__(self, name)
|
||||
|
||||
|
||||
from threading import current_thread, RLock
|
206
Tool/Python39/Lib/_weakrefset.py
Normal file
206
Tool/Python39/Lib/_weakrefset.py
Normal file
@ -0,0 +1,206 @@
|
||||
# Access WeakSet through the weakref module.
|
||||
# This code is separated-out because it is needed
|
||||
# by abc.py to load everything else at startup.
|
||||
|
||||
from _weakref import ref
|
||||
from types import GenericAlias
|
||||
|
||||
__all__ = ['WeakSet']
|
||||
|
||||
|
||||
class _IterationGuard:
|
||||
# This context manager registers itself in the current iterators of the
|
||||
# weak container, such as to delay all removals until the context manager
|
||||
# exits.
|
||||
# This technique should be relatively thread-safe (since sets are).
|
||||
|
||||
def __init__(self, weakcontainer):
|
||||
# Don't create cycles
|
||||
self.weakcontainer = ref(weakcontainer)
|
||||
|
||||
def __enter__(self):
|
||||
w = self.weakcontainer()
|
||||
if w is not None:
|
||||
w._iterating.add(self)
|
||||
return self
|
||||
|
||||
def __exit__(self, e, t, b):
|
||||
w = self.weakcontainer()
|
||||
if w is not None:
|
||||
s = w._iterating
|
||||
s.remove(self)
|
||||
if not s:
|
||||
w._commit_removals()
|
||||
|
||||
|
||||
class WeakSet:
|
||||
def __init__(self, data=None):
|
||||
self.data = set()
|
||||
def _remove(item, selfref=ref(self)):
|
||||
self = selfref()
|
||||
if self is not None:
|
||||
if self._iterating:
|
||||
self._pending_removals.append(item)
|
||||
else:
|
||||
self.data.discard(item)
|
||||
self._remove = _remove
|
||||
# A list of keys to be removed
|
||||
self._pending_removals = []
|
||||
self._iterating = set()
|
||||
if data is not None:
|
||||
self.update(data)
|
||||
|
||||
def _commit_removals(self):
|
||||
pop = self._pending_removals.pop
|
||||
discard = self.data.discard
|
||||
while True:
|
||||
try:
|
||||
item = pop()
|
||||
except IndexError:
|
||||
return
|
||||
discard(item)
|
||||
|
||||
def __iter__(self):
|
||||
with _IterationGuard(self):
|
||||
for itemref in self.data:
|
||||
item = itemref()
|
||||
if item is not None:
|
||||
# Caveat: the iterator will keep a strong reference to
|
||||
# `item` until it is resumed or closed.
|
||||
yield item
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data) - len(self._pending_removals)
|
||||
|
||||
def __contains__(self, item):
|
||||
try:
|
||||
wr = ref(item)
|
||||
except TypeError:
|
||||
return False
|
||||
return wr in self.data
|
||||
|
||||
def __reduce__(self):
|
||||
return (self.__class__, (list(self),),
|
||||
getattr(self, '__dict__', None))
|
||||
|
||||
def add(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.add(ref(item, self._remove))
|
||||
|
||||
def clear(self):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.clear()
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
def pop(self):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
while True:
|
||||
try:
|
||||
itemref = self.data.pop()
|
||||
except KeyError:
|
||||
raise KeyError('pop from empty WeakSet') from None
|
||||
item = itemref()
|
||||
if item is not None:
|
||||
return item
|
||||
|
||||
def remove(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.remove(ref(item))
|
||||
|
||||
def discard(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.discard(ref(item))
|
||||
|
||||
def update(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
for element in other:
|
||||
self.add(element)
|
||||
|
||||
def __ior__(self, other):
|
||||
self.update(other)
|
||||
return self
|
||||
|
||||
def difference(self, other):
|
||||
newset = self.copy()
|
||||
newset.difference_update(other)
|
||||
return newset
|
||||
__sub__ = difference
|
||||
|
||||
def difference_update(self, other):
|
||||
self.__isub__(other)
|
||||
def __isub__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
if self is other:
|
||||
self.data.clear()
|
||||
else:
|
||||
self.data.difference_update(ref(item) for item in other)
|
||||
return self
|
||||
|
||||
def intersection(self, other):
|
||||
return self.__class__(item for item in other if item in self)
|
||||
__and__ = intersection
|
||||
|
||||
def intersection_update(self, other):
|
||||
self.__iand__(other)
|
||||
def __iand__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.intersection_update(ref(item) for item in other)
|
||||
return self
|
||||
|
||||
def issubset(self, other):
|
||||
return self.data.issubset(ref(item) for item in other)
|
||||
__le__ = issubset
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.data < set(map(ref, other))
|
||||
|
||||
def issuperset(self, other):
|
||||
return self.data.issuperset(ref(item) for item in other)
|
||||
__ge__ = issuperset
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.data > set(map(ref, other))
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self.data == set(map(ref, other))
|
||||
|
||||
def symmetric_difference(self, other):
|
||||
newset = self.copy()
|
||||
newset.symmetric_difference_update(other)
|
||||
return newset
|
||||
__xor__ = symmetric_difference
|
||||
|
||||
def symmetric_difference_update(self, other):
|
||||
self.__ixor__(other)
|
||||
def __ixor__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
if self is other:
|
||||
self.data.clear()
|
||||
else:
|
||||
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
|
||||
return self
|
||||
|
||||
def union(self, other):
|
||||
return self.__class__(e for s in (self, other) for e in s)
|
||||
__or__ = union
|
||||
|
||||
def isdisjoint(self, other):
|
||||
return len(self.intersection(other)) == 0
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.data)
|
||||
|
||||
__class_getitem__ = classmethod(GenericAlias)
|
150
Tool/Python39/Lib/abc.py
Normal file
150
Tool/Python39/Lib/abc.py
Normal file
@ -0,0 +1,150 @@
|
||||
# Copyright 2007 Google, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Abstract Base Classes (ABCs) according to PEP 3119."""
|
||||
|
||||
|
||||
def abstractmethod(funcobj):
|
||||
"""A decorator indicating abstract methods.
|
||||
|
||||
Requires that the metaclass is ABCMeta or derived from it. A
|
||||
class that has a metaclass derived from ABCMeta cannot be
|
||||
instantiated unless all of its abstract methods are overridden.
|
||||
The abstract methods can be called using any of the normal
|
||||
'super' call mechanisms. abstractmethod() may be used to declare
|
||||
abstract methods for properties and descriptors.
|
||||
|
||||
Usage:
|
||||
|
||||
class C(metaclass=ABCMeta):
|
||||
@abstractmethod
|
||||
def my_abstract_method(self, ...):
|
||||
...
|
||||
"""
|
||||
funcobj.__isabstractmethod__ = True
|
||||
return funcobj
|
||||
|
||||
|
||||
class abstractclassmethod(classmethod):
|
||||
"""A decorator indicating abstract classmethods.
|
||||
|
||||
Deprecated, use 'classmethod' with 'abstractmethod' instead:
|
||||
|
||||
class C(ABC):
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def my_abstract_classmethod(cls, ...):
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
__isabstractmethod__ = True
|
||||
|
||||
def __init__(self, callable):
|
||||
callable.__isabstractmethod__ = True
|
||||
super().__init__(callable)
|
||||
|
||||
|
||||
class abstractstaticmethod(staticmethod):
|
||||
"""A decorator indicating abstract staticmethods.
|
||||
|
||||
Deprecated, use 'staticmethod' with 'abstractmethod' instead:
|
||||
|
||||
class C(ABC):
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def my_abstract_staticmethod(...):
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
__isabstractmethod__ = True
|
||||
|
||||
def __init__(self, callable):
|
||||
callable.__isabstractmethod__ = True
|
||||
super().__init__(callable)
|
||||
|
||||
|
||||
class abstractproperty(property):
|
||||
"""A decorator indicating abstract properties.
|
||||
|
||||
Deprecated, use 'property' with 'abstractmethod' instead:
|
||||
|
||||
class C(ABC):
|
||||
@property
|
||||
@abstractmethod
|
||||
def my_abstract_property(self):
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
__isabstractmethod__ = True
|
||||
|
||||
|
||||
try:
|
||||
from _abc import (get_cache_token, _abc_init, _abc_register,
|
||||
_abc_instancecheck, _abc_subclasscheck, _get_dump,
|
||||
_reset_registry, _reset_caches)
|
||||
except ImportError:
|
||||
from _py_abc import ABCMeta, get_cache_token
|
||||
ABCMeta.__module__ = 'abc'
|
||||
else:
|
||||
class ABCMeta(type):
|
||||
"""Metaclass for defining Abstract Base Classes (ABCs).
|
||||
|
||||
Use this metaclass to create an ABC. An ABC can be subclassed
|
||||
directly, and then acts as a mix-in class. You can also register
|
||||
unrelated concrete classes (even built-in classes) and unrelated
|
||||
ABCs as 'virtual subclasses' -- these and their descendants will
|
||||
be considered subclasses of the registering ABC by the built-in
|
||||
issubclass() function, but the registering ABC won't show up in
|
||||
their MRO (Method Resolution Order) nor will method
|
||||
implementations defined by the registering ABC be callable (not
|
||||
even via super()).
|
||||
"""
|
||||
def __new__(mcls, name, bases, namespace, **kwargs):
|
||||
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
|
||||
_abc_init(cls)
|
||||
return cls
|
||||
|
||||
def register(cls, subclass):
|
||||
"""Register a virtual subclass of an ABC.
|
||||
|
||||
Returns the subclass, to allow usage as a class decorator.
|
||||
"""
|
||||
return _abc_register(cls, subclass)
|
||||
|
||||
def __instancecheck__(cls, instance):
|
||||
"""Override for isinstance(instance, cls)."""
|
||||
return _abc_instancecheck(cls, instance)
|
||||
|
||||
def __subclasscheck__(cls, subclass):
|
||||
"""Override for issubclass(subclass, cls)."""
|
||||
return _abc_subclasscheck(cls, subclass)
|
||||
|
||||
def _dump_registry(cls, file=None):
|
||||
"""Debug helper to print the ABC registry."""
|
||||
print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file)
|
||||
print(f"Inv. counter: {get_cache_token()}", file=file)
|
||||
(_abc_registry, _abc_cache, _abc_negative_cache,
|
||||
_abc_negative_cache_version) = _get_dump(cls)
|
||||
print(f"_abc_registry: {_abc_registry!r}", file=file)
|
||||
print(f"_abc_cache: {_abc_cache!r}", file=file)
|
||||
print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file)
|
||||
print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}",
|
||||
file=file)
|
||||
|
||||
def _abc_registry_clear(cls):
|
||||
"""Clear the registry (for debugging or testing)."""
|
||||
_reset_registry(cls)
|
||||
|
||||
def _abc_caches_clear(cls):
|
||||
"""Clear the caches (for debugging or testing)."""
|
||||
_reset_caches(cls)
|
||||
|
||||
|
||||
class ABC(metaclass=ABCMeta):
|
||||
"""Helper class that provides a standard way to create an ABC using
|
||||
inheritance.
|
||||
"""
|
||||
__slots__ = ()
|
947
Tool/Python39/Lib/aifc.py
Normal file
947
Tool/Python39/Lib/aifc.py
Normal file
@ -0,0 +1,947 @@
|
||||
"""Stuff to parse AIFF-C and AIFF files.
|
||||
|
||||
Unless explicitly stated otherwise, the description below is true
|
||||
both for AIFF-C files and AIFF files.
|
||||
|
||||
An AIFF-C file has the following structure.
|
||||
|
||||
+-----------------+
|
||||
| FORM |
|
||||
+-----------------+
|
||||
| <size> |
|
||||
+----+------------+
|
||||
| | AIFC |
|
||||
| +------------+
|
||||
| | <chunks> |
|
||||
| | . |
|
||||
| | . |
|
||||
| | . |
|
||||
+----+------------+
|
||||
|
||||
An AIFF file has the string "AIFF" instead of "AIFC".
|
||||
|
||||
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
|
||||
big endian order), followed by the data. The size field does not include
|
||||
the size of the 8 byte header.
|
||||
|
||||
The following chunk types are recognized.
|
||||
|
||||
FVER
|
||||
<version number of AIFF-C defining document> (AIFF-C only).
|
||||
MARK
|
||||
<# of markers> (2 bytes)
|
||||
list of markers:
|
||||
<marker ID> (2 bytes, must be > 0)
|
||||
<position> (4 bytes)
|
||||
<marker name> ("pstring")
|
||||
COMM
|
||||
<# of channels> (2 bytes)
|
||||
<# of sound frames> (4 bytes)
|
||||
<size of the samples> (2 bytes)
|
||||
<sampling frequency> (10 bytes, IEEE 80-bit extended
|
||||
floating point)
|
||||
in AIFF-C files only:
|
||||
<compression type> (4 bytes)
|
||||
<human-readable version of compression type> ("pstring")
|
||||
SSND
|
||||
<offset> (4 bytes, not used by this program)
|
||||
<blocksize> (4 bytes, not used by this program)
|
||||
<sound data>
|
||||
|
||||
A pstring consists of 1 byte length, a string of characters, and 0 or 1
|
||||
byte pad to make the total length even.
|
||||
|
||||
Usage.
|
||||
|
||||
Reading AIFF files:
|
||||
f = aifc.open(file, 'r')
|
||||
where file is either the name of a file or an open file pointer.
|
||||
The open file pointer must have methods read(), seek(), and close().
|
||||
In some types of audio files, if the setpos() method is not used,
|
||||
the seek() method is not necessary.
|
||||
|
||||
This returns an instance of a class with the following public methods:
|
||||
getnchannels() -- returns number of audio channels (1 for
|
||||
mono, 2 for stereo)
|
||||
getsampwidth() -- returns sample width in bytes
|
||||
getframerate() -- returns sampling frequency
|
||||
getnframes() -- returns number of audio frames
|
||||
getcomptype() -- returns compression type ('NONE' for AIFF files)
|
||||
getcompname() -- returns human-readable version of
|
||||
compression type ('not compressed' for AIFF files)
|
||||
getparams() -- returns a namedtuple consisting of all of the
|
||||
above in the above order
|
||||
getmarkers() -- get the list of marks in the audio file or None
|
||||
if there are no marks
|
||||
getmark(id) -- get mark with the specified id (raises an error
|
||||
if the mark does not exist)
|
||||
readframes(n) -- returns at most n frames of audio
|
||||
rewind() -- rewind to the beginning of the audio stream
|
||||
setpos(pos) -- seek to the specified position
|
||||
tell() -- return the current position
|
||||
close() -- close the instance (make it unusable)
|
||||
The position returned by tell(), the position given to setpos() and
|
||||
the position of marks are all compatible and have nothing to do with
|
||||
the actual position in the file.
|
||||
The close() method is called automatically when the class instance
|
||||
is destroyed.
|
||||
|
||||
Writing AIFF files:
|
||||
f = aifc.open(file, 'w')
|
||||
where file is either the name of a file or an open file pointer.
|
||||
The open file pointer must have methods write(), tell(), seek(), and
|
||||
close().
|
||||
|
||||
This returns an instance of a class with the following public methods:
|
||||
aiff() -- create an AIFF file (AIFF-C default)
|
||||
aifc() -- create an AIFF-C file
|
||||
setnchannels(n) -- set the number of channels
|
||||
setsampwidth(n) -- set the sample width
|
||||
setframerate(n) -- set the frame rate
|
||||
setnframes(n) -- set the number of frames
|
||||
setcomptype(type, name)
|
||||
-- set the compression type and the
|
||||
human-readable compression type
|
||||
setparams(tuple)
|
||||
-- set all parameters at once
|
||||
setmark(id, pos, name)
|
||||
-- add specified mark to the list of marks
|
||||
tell() -- return current position in output file (useful
|
||||
in combination with setmark())
|
||||
writeframesraw(data)
|
||||
-- write audio frames without pathing up the
|
||||
file header
|
||||
writeframes(data)
|
||||
-- write audio frames and patch up the file header
|
||||
close() -- patch up the file header and close the
|
||||
output file
|
||||
You should set the parameters before the first writeframesraw or
|
||||
writeframes. The total number of frames does not need to be set,
|
||||
but when it is set to the correct value, the header does not have to
|
||||
be patched up.
|
||||
It is best to first set all parameters, perhaps possibly the
|
||||
compression type, and then write audio frames using writeframesraw.
|
||||
When all frames have been written, either call writeframes(b'') or
|
||||
close() to patch up the sizes in the header.
|
||||
Marks can be added anytime. If there are any marks, you must call
|
||||
close() after all frames have been written.
|
||||
The close() method is called automatically when the class instance
|
||||
is destroyed.
|
||||
|
||||
When a file is opened with the extension '.aiff', an AIFF file is
|
||||
written, otherwise an AIFF-C file is written. This default can be
|
||||
changed by calling aiff() or aifc() before the first writeframes or
|
||||
writeframesraw.
|
||||
"""
|
||||
|
||||
import struct
|
||||
import builtins
|
||||
import warnings
|
||||
|
||||
__all__ = ["Error", "open"]
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
_AIFC_version = 0xA2805140 # Version 1 of AIFF-C
|
||||
|
||||
def _read_long(file):
|
||||
try:
|
||||
return struct.unpack('>l', file.read(4))[0]
|
||||
except struct.error:
|
||||
raise EOFError from None
|
||||
|
||||
def _read_ulong(file):
|
||||
try:
|
||||
return struct.unpack('>L', file.read(4))[0]
|
||||
except struct.error:
|
||||
raise EOFError from None
|
||||
|
||||
def _read_short(file):
|
||||
try:
|
||||
return struct.unpack('>h', file.read(2))[0]
|
||||
except struct.error:
|
||||
raise EOFError from None
|
||||
|
||||
def _read_ushort(file):
|
||||
try:
|
||||
return struct.unpack('>H', file.read(2))[0]
|
||||
except struct.error:
|
||||
raise EOFError from None
|
||||
|
||||
def _read_string(file):
|
||||
length = ord(file.read(1))
|
||||
if length == 0:
|
||||
data = b''
|
||||
else:
|
||||
data = file.read(length)
|
||||
if length & 1 == 0:
|
||||
dummy = file.read(1)
|
||||
return data
|
||||
|
||||
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
|
||||
|
||||
def _read_float(f): # 10 bytes
|
||||
expon = _read_short(f) # 2 bytes
|
||||
sign = 1
|
||||
if expon < 0:
|
||||
sign = -1
|
||||
expon = expon + 0x8000
|
||||
himant = _read_ulong(f) # 4 bytes
|
||||
lomant = _read_ulong(f) # 4 bytes
|
||||
if expon == himant == lomant == 0:
|
||||
f = 0.0
|
||||
elif expon == 0x7FFF:
|
||||
f = _HUGE_VAL
|
||||
else:
|
||||
expon = expon - 16383
|
||||
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
|
||||
return sign * f
|
||||
|
||||
def _write_short(f, x):
|
||||
f.write(struct.pack('>h', x))
|
||||
|
||||
def _write_ushort(f, x):
|
||||
f.write(struct.pack('>H', x))
|
||||
|
||||
def _write_long(f, x):
|
||||
f.write(struct.pack('>l', x))
|
||||
|
||||
def _write_ulong(f, x):
|
||||
f.write(struct.pack('>L', x))
|
||||
|
||||
def _write_string(f, s):
|
||||
if len(s) > 255:
|
||||
raise ValueError("string exceeds maximum pstring length")
|
||||
f.write(struct.pack('B', len(s)))
|
||||
f.write(s)
|
||||
if len(s) & 1 == 0:
|
||||
f.write(b'\x00')
|
||||
|
||||
def _write_float(f, x):
|
||||
import math
|
||||
if x < 0:
|
||||
sign = 0x8000
|
||||
x = x * -1
|
||||
else:
|
||||
sign = 0
|
||||
if x == 0:
|
||||
expon = 0
|
||||
himant = 0
|
||||
lomant = 0
|
||||
else:
|
||||
fmant, expon = math.frexp(x)
|
||||
if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
|
||||
expon = sign|0x7FFF
|
||||
himant = 0
|
||||
lomant = 0
|
||||
else: # Finite
|
||||
expon = expon + 16382
|
||||
if expon < 0: # denormalized
|
||||
fmant = math.ldexp(fmant, expon)
|
||||
expon = 0
|
||||
expon = expon | sign
|
||||
fmant = math.ldexp(fmant, 32)
|
||||
fsmant = math.floor(fmant)
|
||||
himant = int(fsmant)
|
||||
fmant = math.ldexp(fmant - fsmant, 32)
|
||||
fsmant = math.floor(fmant)
|
||||
lomant = int(fsmant)
|
||||
_write_ushort(f, expon)
|
||||
_write_ulong(f, himant)
|
||||
_write_ulong(f, lomant)
|
||||
|
||||
from chunk import Chunk
|
||||
from collections import namedtuple
|
||||
|
||||
_aifc_params = namedtuple('_aifc_params',
|
||||
'nchannels sampwidth framerate nframes comptype compname')
|
||||
|
||||
_aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)'
|
||||
_aifc_params.sampwidth.__doc__ = 'Sample width in bytes'
|
||||
_aifc_params.framerate.__doc__ = 'Sampling frequency'
|
||||
_aifc_params.nframes.__doc__ = 'Number of audio frames'
|
||||
_aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)'
|
||||
_aifc_params.compname.__doc__ = ("""\
|
||||
A human-readable version of the compression type
|
||||
('not compressed' for AIFF files)""")
|
||||
|
||||
|
||||
class Aifc_read:
|
||||
# Variables used in this class:
|
||||
#
|
||||
# These variables are available to the user though appropriate
|
||||
# methods of this class:
|
||||
# _file -- the open file with methods read(), close(), and seek()
|
||||
# set through the __init__() method
|
||||
# _nchannels -- the number of audio channels
|
||||
# available through the getnchannels() method
|
||||
# _nframes -- the number of audio frames
|
||||
# available through the getnframes() method
|
||||
# _sampwidth -- the number of bytes per audio sample
|
||||
# available through the getsampwidth() method
|
||||
# _framerate -- the sampling frequency
|
||||
# available through the getframerate() method
|
||||
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
|
||||
# available through the getcomptype() method
|
||||
# _compname -- the human-readable AIFF-C compression type
|
||||
# available through the getcomptype() method
|
||||
# _markers -- the marks in the audio file
|
||||
# available through the getmarkers() and getmark()
|
||||
# methods
|
||||
# _soundpos -- the position in the audio stream
|
||||
# available through the tell() method, set through the
|
||||
# setpos() method
|
||||
#
|
||||
# These variables are used internally only:
|
||||
# _version -- the AIFF-C version number
|
||||
# _decomp -- the decompressor from builtin module cl
|
||||
# _comm_chunk_read -- 1 iff the COMM chunk has been read
|
||||
# _aifc -- 1 iff reading an AIFF-C file
|
||||
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
|
||||
# file for readframes()
|
||||
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
|
||||
# _framesize -- size of one frame in the file
|
||||
|
||||
_file = None # Set here since __del__ checks it
|
||||
|
||||
def initfp(self, file):
|
||||
self._version = 0
|
||||
self._convert = None
|
||||
self._markers = []
|
||||
self._soundpos = 0
|
||||
self._file = file
|
||||
chunk = Chunk(file)
|
||||
if chunk.getname() != b'FORM':
|
||||
raise Error('file does not start with FORM id')
|
||||
formdata = chunk.read(4)
|
||||
if formdata == b'AIFF':
|
||||
self._aifc = 0
|
||||
elif formdata == b'AIFC':
|
||||
self._aifc = 1
|
||||
else:
|
||||
raise Error('not an AIFF or AIFF-C file')
|
||||
self._comm_chunk_read = 0
|
||||
self._ssnd_chunk = None
|
||||
while 1:
|
||||
self._ssnd_seek_needed = 1
|
||||
try:
|
||||
chunk = Chunk(self._file)
|
||||
except EOFError:
|
||||
break
|
||||
chunkname = chunk.getname()
|
||||
if chunkname == b'COMM':
|
||||
self._read_comm_chunk(chunk)
|
||||
self._comm_chunk_read = 1
|
||||
elif chunkname == b'SSND':
|
||||
self._ssnd_chunk = chunk
|
||||
dummy = chunk.read(8)
|
||||
self._ssnd_seek_needed = 0
|
||||
elif chunkname == b'FVER':
|
||||
self._version = _read_ulong(chunk)
|
||||
elif chunkname == b'MARK':
|
||||
self._readmark(chunk)
|
||||
chunk.skip()
|
||||
if not self._comm_chunk_read or not self._ssnd_chunk:
|
||||
raise Error('COMM chunk and/or SSND chunk missing')
|
||||
|
||||
def __init__(self, f):
|
||||
if isinstance(f, str):
|
||||
file_object = builtins.open(f, 'rb')
|
||||
try:
|
||||
self.initfp(file_object)
|
||||
except:
|
||||
file_object.close()
|
||||
raise
|
||||
else:
|
||||
# assume it is an open file object already
|
||||
self.initfp(f)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
#
|
||||
# User visible methods.
|
||||
#
|
||||
def getfp(self):
|
||||
return self._file
|
||||
|
||||
def rewind(self):
|
||||
self._ssnd_seek_needed = 1
|
||||
self._soundpos = 0
|
||||
|
||||
def close(self):
|
||||
file = self._file
|
||||
if file is not None:
|
||||
self._file = None
|
||||
file.close()
|
||||
|
||||
def tell(self):
|
||||
return self._soundpos
|
||||
|
||||
def getnchannels(self):
|
||||
return self._nchannels
|
||||
|
||||
def getnframes(self):
|
||||
return self._nframes
|
||||
|
||||
def getsampwidth(self):
|
||||
return self._sampwidth
|
||||
|
||||
def getframerate(self):
|
||||
return self._framerate
|
||||
|
||||
def getcomptype(self):
|
||||
return self._comptype
|
||||
|
||||
def getcompname(self):
|
||||
return self._compname
|
||||
|
||||
## def getversion(self):
|
||||
## return self._version
|
||||
|
||||
def getparams(self):
|
||||
return _aifc_params(self.getnchannels(), self.getsampwidth(),
|
||||
self.getframerate(), self.getnframes(),
|
||||
self.getcomptype(), self.getcompname())
|
||||
|
||||
def getmarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return None
|
||||
return self._markers
|
||||
|
||||
def getmark(self, id):
|
||||
for marker in self._markers:
|
||||
if id == marker[0]:
|
||||
return marker
|
||||
raise Error('marker {0!r} does not exist'.format(id))
|
||||
|
||||
def setpos(self, pos):
|
||||
if pos < 0 or pos > self._nframes:
|
||||
raise Error('position not in range')
|
||||
self._soundpos = pos
|
||||
self._ssnd_seek_needed = 1
|
||||
|
||||
def readframes(self, nframes):
|
||||
if self._ssnd_seek_needed:
|
||||
self._ssnd_chunk.seek(0)
|
||||
dummy = self._ssnd_chunk.read(8)
|
||||
pos = self._soundpos * self._framesize
|
||||
if pos:
|
||||
self._ssnd_chunk.seek(pos + 8)
|
||||
self._ssnd_seek_needed = 0
|
||||
if nframes == 0:
|
||||
return b''
|
||||
data = self._ssnd_chunk.read(nframes * self._framesize)
|
||||
if self._convert and data:
|
||||
data = self._convert(data)
|
||||
self._soundpos = self._soundpos + len(data) // (self._nchannels
|
||||
* self._sampwidth)
|
||||
return data
|
||||
|
||||
#
|
||||
# Internal methods.
|
||||
#
|
||||
|
||||
def _alaw2lin(self, data):
|
||||
import audioop
|
||||
return audioop.alaw2lin(data, 2)
|
||||
|
||||
def _ulaw2lin(self, data):
|
||||
import audioop
|
||||
return audioop.ulaw2lin(data, 2)
|
||||
|
||||
def _adpcm2lin(self, data):
|
||||
import audioop
|
||||
if not hasattr(self, '_adpcmstate'):
|
||||
# first time
|
||||
self._adpcmstate = None
|
||||
data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate)
|
||||
return data
|
||||
|
||||
def _read_comm_chunk(self, chunk):
|
||||
self._nchannels = _read_short(chunk)
|
||||
self._nframes = _read_long(chunk)
|
||||
self._sampwidth = (_read_short(chunk) + 7) // 8
|
||||
self._framerate = int(_read_float(chunk))
|
||||
if self._sampwidth <= 0:
|
||||
raise Error('bad sample width')
|
||||
if self._nchannels <= 0:
|
||||
raise Error('bad # of channels')
|
||||
self._framesize = self._nchannels * self._sampwidth
|
||||
if self._aifc:
|
||||
#DEBUG: SGI's soundeditor produces a bad size :-(
|
||||
kludge = 0
|
||||
if chunk.chunksize == 18:
|
||||
kludge = 1
|
||||
warnings.warn('Warning: bad COMM chunk size')
|
||||
chunk.chunksize = 23
|
||||
#DEBUG end
|
||||
self._comptype = chunk.read(4)
|
||||
#DEBUG start
|
||||
if kludge:
|
||||
length = ord(chunk.file.read(1))
|
||||
if length & 1 == 0:
|
||||
length = length + 1
|
||||
chunk.chunksize = chunk.chunksize + length
|
||||
chunk.file.seek(-1, 1)
|
||||
#DEBUG end
|
||||
self._compname = _read_string(chunk)
|
||||
if self._comptype != b'NONE':
|
||||
if self._comptype == b'G722':
|
||||
self._convert = self._adpcm2lin
|
||||
elif self._comptype in (b'ulaw', b'ULAW'):
|
||||
self._convert = self._ulaw2lin
|
||||
elif self._comptype in (b'alaw', b'ALAW'):
|
||||
self._convert = self._alaw2lin
|
||||
else:
|
||||
raise Error('unsupported compression type')
|
||||
self._sampwidth = 2
|
||||
else:
|
||||
self._comptype = b'NONE'
|
||||
self._compname = b'not compressed'
|
||||
|
||||
def _readmark(self, chunk):
|
||||
nmarkers = _read_short(chunk)
|
||||
# Some files appear to contain invalid counts.
|
||||
# Cope with this by testing for EOF.
|
||||
try:
|
||||
for i in range(nmarkers):
|
||||
id = _read_short(chunk)
|
||||
pos = _read_long(chunk)
|
||||
name = _read_string(chunk)
|
||||
if pos or name:
|
||||
# some files appear to have
|
||||
# dummy markers consisting of
|
||||
# a position 0 and name ''
|
||||
self._markers.append((id, pos, name))
|
||||
except EOFError:
|
||||
w = ('Warning: MARK chunk contains only %s marker%s instead of %s' %
|
||||
(len(self._markers), '' if len(self._markers) == 1 else 's',
|
||||
nmarkers))
|
||||
warnings.warn(w)
|
||||
|
||||
class Aifc_write:
|
||||
# Variables used in this class:
|
||||
#
|
||||
# These variables are user settable through appropriate methods
|
||||
# of this class:
|
||||
# _file -- the open file with methods write(), close(), tell(), seek()
|
||||
# set through the __init__() method
|
||||
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
|
||||
# set through the setcomptype() or setparams() method
|
||||
# _compname -- the human-readable AIFF-C compression type
|
||||
# set through the setcomptype() or setparams() method
|
||||
# _nchannels -- the number of audio channels
|
||||
# set through the setnchannels() or setparams() method
|
||||
# _sampwidth -- the number of bytes per audio sample
|
||||
# set through the setsampwidth() or setparams() method
|
||||
# _framerate -- the sampling frequency
|
||||
# set through the setframerate() or setparams() method
|
||||
# _nframes -- the number of audio frames written to the header
|
||||
# set through the setnframes() or setparams() method
|
||||
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
|
||||
# set through the aifc() method, reset through the
|
||||
# aiff() method
|
||||
#
|
||||
# These variables are used internally only:
|
||||
# _version -- the AIFF-C version number
|
||||
# _comp -- the compressor from builtin module cl
|
||||
# _nframeswritten -- the number of audio frames actually written
|
||||
# _datalength -- the size of the audio samples written to the header
|
||||
# _datawritten -- the size of the audio samples actually written
|
||||
|
||||
_file = None # Set here since __del__ checks it
|
||||
|
||||
def __init__(self, f):
|
||||
if isinstance(f, str):
|
||||
file_object = builtins.open(f, 'wb')
|
||||
try:
|
||||
self.initfp(file_object)
|
||||
except:
|
||||
file_object.close()
|
||||
raise
|
||||
|
||||
# treat .aiff file extensions as non-compressed audio
|
||||
if f.endswith('.aiff'):
|
||||
self._aifc = 0
|
||||
else:
|
||||
# assume it is an open file object already
|
||||
self.initfp(f)
|
||||
|
||||
def initfp(self, file):
|
||||
self._file = file
|
||||
self._version = _AIFC_version
|
||||
self._comptype = b'NONE'
|
||||
self._compname = b'not compressed'
|
||||
self._convert = None
|
||||
self._nchannels = 0
|
||||
self._sampwidth = 0
|
||||
self._framerate = 0
|
||||
self._nframes = 0
|
||||
self._nframeswritten = 0
|
||||
self._datawritten = 0
|
||||
self._datalength = 0
|
||||
self._markers = []
|
||||
self._marklength = 0
|
||||
self._aifc = 1 # AIFF-C is default
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
#
|
||||
# User visible methods.
|
||||
#
|
||||
def aiff(self):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
self._aifc = 0
|
||||
|
||||
def aifc(self):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
self._aifc = 1
|
||||
|
||||
def setnchannels(self, nchannels):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if nchannels < 1:
|
||||
raise Error('bad # of channels')
|
||||
self._nchannels = nchannels
|
||||
|
||||
def getnchannels(self):
|
||||
if not self._nchannels:
|
||||
raise Error('number of channels not set')
|
||||
return self._nchannels
|
||||
|
||||
def setsampwidth(self, sampwidth):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if sampwidth < 1 or sampwidth > 4:
|
||||
raise Error('bad sample width')
|
||||
self._sampwidth = sampwidth
|
||||
|
||||
def getsampwidth(self):
|
||||
if not self._sampwidth:
|
||||
raise Error('sample width not set')
|
||||
return self._sampwidth
|
||||
|
||||
def setframerate(self, framerate):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if framerate <= 0:
|
||||
raise Error('bad frame rate')
|
||||
self._framerate = framerate
|
||||
|
||||
def getframerate(self):
|
||||
if not self._framerate:
|
||||
raise Error('frame rate not set')
|
||||
return self._framerate
|
||||
|
||||
def setnframes(self, nframes):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
self._nframes = nframes
|
||||
|
||||
def getnframes(self):
|
||||
return self._nframeswritten
|
||||
|
||||
def setcomptype(self, comptype, compname):
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if comptype not in (b'NONE', b'ulaw', b'ULAW',
|
||||
b'alaw', b'ALAW', b'G722'):
|
||||
raise Error('unsupported compression type')
|
||||
self._comptype = comptype
|
||||
self._compname = compname
|
||||
|
||||
def getcomptype(self):
|
||||
return self._comptype
|
||||
|
||||
def getcompname(self):
|
||||
return self._compname
|
||||
|
||||
## def setversion(self, version):
|
||||
## if self._nframeswritten:
|
||||
## raise Error, 'cannot change parameters after starting to write'
|
||||
## self._version = version
|
||||
|
||||
def setparams(self, params):
|
||||
nchannels, sampwidth, framerate, nframes, comptype, compname = params
|
||||
if self._nframeswritten:
|
||||
raise Error('cannot change parameters after starting to write')
|
||||
if comptype not in (b'NONE', b'ulaw', b'ULAW',
|
||||
b'alaw', b'ALAW', b'G722'):
|
||||
raise Error('unsupported compression type')
|
||||
self.setnchannels(nchannels)
|
||||
self.setsampwidth(sampwidth)
|
||||
self.setframerate(framerate)
|
||||
self.setnframes(nframes)
|
||||
self.setcomptype(comptype, compname)
|
||||
|
||||
def getparams(self):
|
||||
if not self._nchannels or not self._sampwidth or not self._framerate:
|
||||
raise Error('not all parameters set')
|
||||
return _aifc_params(self._nchannels, self._sampwidth, self._framerate,
|
||||
self._nframes, self._comptype, self._compname)
|
||||
|
||||
def setmark(self, id, pos, name):
|
||||
if id <= 0:
|
||||
raise Error('marker ID must be > 0')
|
||||
if pos < 0:
|
||||
raise Error('marker position must be >= 0')
|
||||
if not isinstance(name, bytes):
|
||||
raise Error('marker name must be bytes')
|
||||
for i in range(len(self._markers)):
|
||||
if id == self._markers[i][0]:
|
||||
self._markers[i] = id, pos, name
|
||||
return
|
||||
self._markers.append((id, pos, name))
|
||||
|
||||
def getmark(self, id):
|
||||
for marker in self._markers:
|
||||
if id == marker[0]:
|
||||
return marker
|
||||
raise Error('marker {0!r} does not exist'.format(id))
|
||||
|
||||
def getmarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return None
|
||||
return self._markers
|
||||
|
||||
def tell(self):
|
||||
return self._nframeswritten
|
||||
|
||||
def writeframesraw(self, data):
|
||||
if not isinstance(data, (bytes, bytearray)):
|
||||
data = memoryview(data).cast('B')
|
||||
self._ensure_header_written(len(data))
|
||||
nframes = len(data) // (self._sampwidth * self._nchannels)
|
||||
if self._convert:
|
||||
data = self._convert(data)
|
||||
self._file.write(data)
|
||||
self._nframeswritten = self._nframeswritten + nframes
|
||||
self._datawritten = self._datawritten + len(data)
|
||||
|
||||
def writeframes(self, data):
|
||||
self.writeframesraw(data)
|
||||
if self._nframeswritten != self._nframes or \
|
||||
self._datalength != self._datawritten:
|
||||
self._patchheader()
|
||||
|
||||
def close(self):
|
||||
if self._file is None:
|
||||
return
|
||||
try:
|
||||
self._ensure_header_written(0)
|
||||
if self._datawritten & 1:
|
||||
# quick pad to even size
|
||||
self._file.write(b'\x00')
|
||||
self._datawritten = self._datawritten + 1
|
||||
self._writemarkers()
|
||||
if self._nframeswritten != self._nframes or \
|
||||
self._datalength != self._datawritten or \
|
||||
self._marklength:
|
||||
self._patchheader()
|
||||
finally:
|
||||
# Prevent ref cycles
|
||||
self._convert = None
|
||||
f = self._file
|
||||
self._file = None
|
||||
f.close()
|
||||
|
||||
#
|
||||
# Internal methods.
|
||||
#
|
||||
|
||||
def _lin2alaw(self, data):
|
||||
import audioop
|
||||
return audioop.lin2alaw(data, 2)
|
||||
|
||||
def _lin2ulaw(self, data):
|
||||
import audioop
|
||||
return audioop.lin2ulaw(data, 2)
|
||||
|
||||
def _lin2adpcm(self, data):
|
||||
import audioop
|
||||
if not hasattr(self, '_adpcmstate'):
|
||||
self._adpcmstate = None
|
||||
data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate)
|
||||
return data
|
||||
|
||||
def _ensure_header_written(self, datasize):
|
||||
if not self._nframeswritten:
|
||||
if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
|
||||
if not self._sampwidth:
|
||||
self._sampwidth = 2
|
||||
if self._sampwidth != 2:
|
||||
raise Error('sample width must be 2 when compressing '
|
||||
'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)')
|
||||
if not self._nchannels:
|
||||
raise Error('# channels not specified')
|
||||
if not self._sampwidth:
|
||||
raise Error('sample width not specified')
|
||||
if not self._framerate:
|
||||
raise Error('sampling rate not specified')
|
||||
self._write_header(datasize)
|
||||
|
||||
def _init_compression(self):
|
||||
if self._comptype == b'G722':
|
||||
self._convert = self._lin2adpcm
|
||||
elif self._comptype in (b'ulaw', b'ULAW'):
|
||||
self._convert = self._lin2ulaw
|
||||
elif self._comptype in (b'alaw', b'ALAW'):
|
||||
self._convert = self._lin2alaw
|
||||
|
||||
def _write_header(self, initlength):
|
||||
if self._aifc and self._comptype != b'NONE':
|
||||
self._init_compression()
|
||||
self._file.write(b'FORM')
|
||||
if not self._nframes:
|
||||
self._nframes = initlength // (self._nchannels * self._sampwidth)
|
||||
self._datalength = self._nframes * self._nchannels * self._sampwidth
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
if self._aifc:
|
||||
if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'):
|
||||
self._datalength = self._datalength // 2
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
elif self._comptype == b'G722':
|
||||
self._datalength = (self._datalength + 3) // 4
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
try:
|
||||
self._form_length_pos = self._file.tell()
|
||||
except (AttributeError, OSError):
|
||||
self._form_length_pos = None
|
||||
commlength = self._write_form_length(self._datalength)
|
||||
if self._aifc:
|
||||
self._file.write(b'AIFC')
|
||||
self._file.write(b'FVER')
|
||||
_write_ulong(self._file, 4)
|
||||
_write_ulong(self._file, self._version)
|
||||
else:
|
||||
self._file.write(b'AIFF')
|
||||
self._file.write(b'COMM')
|
||||
_write_ulong(self._file, commlength)
|
||||
_write_short(self._file, self._nchannels)
|
||||
if self._form_length_pos is not None:
|
||||
self._nframes_pos = self._file.tell()
|
||||
_write_ulong(self._file, self._nframes)
|
||||
if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
|
||||
_write_short(self._file, 8)
|
||||
else:
|
||||
_write_short(self._file, self._sampwidth * 8)
|
||||
_write_float(self._file, self._framerate)
|
||||
if self._aifc:
|
||||
self._file.write(self._comptype)
|
||||
_write_string(self._file, self._compname)
|
||||
self._file.write(b'SSND')
|
||||
if self._form_length_pos is not None:
|
||||
self._ssnd_length_pos = self._file.tell()
|
||||
_write_ulong(self._file, self._datalength + 8)
|
||||
_write_ulong(self._file, 0)
|
||||
_write_ulong(self._file, 0)
|
||||
|
||||
def _write_form_length(self, datalength):
|
||||
if self._aifc:
|
||||
commlength = 18 + 5 + len(self._compname)
|
||||
if commlength & 1:
|
||||
commlength = commlength + 1
|
||||
verslength = 12
|
||||
else:
|
||||
commlength = 18
|
||||
verslength = 0
|
||||
_write_ulong(self._file, 4 + verslength + self._marklength + \
|
||||
8 + commlength + 16 + datalength)
|
||||
return commlength
|
||||
|
||||
def _patchheader(self):
|
||||
curpos = self._file.tell()
|
||||
if self._datawritten & 1:
|
||||
datalength = self._datawritten + 1
|
||||
self._file.write(b'\x00')
|
||||
else:
|
||||
datalength = self._datawritten
|
||||
if datalength == self._datalength and \
|
||||
self._nframes == self._nframeswritten and \
|
||||
self._marklength == 0:
|
||||
self._file.seek(curpos, 0)
|
||||
return
|
||||
self._file.seek(self._form_length_pos, 0)
|
||||
dummy = self._write_form_length(datalength)
|
||||
self._file.seek(self._nframes_pos, 0)
|
||||
_write_ulong(self._file, self._nframeswritten)
|
||||
self._file.seek(self._ssnd_length_pos, 0)
|
||||
_write_ulong(self._file, datalength + 8)
|
||||
self._file.seek(curpos, 0)
|
||||
self._nframes = self._nframeswritten
|
||||
self._datalength = datalength
|
||||
|
||||
def _writemarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return
|
||||
self._file.write(b'MARK')
|
||||
length = 2
|
||||
for marker in self._markers:
|
||||
id, pos, name = marker
|
||||
length = length + len(name) + 1 + 6
|
||||
if len(name) & 1 == 0:
|
||||
length = length + 1
|
||||
_write_ulong(self._file, length)
|
||||
self._marklength = length + 8
|
||||
_write_short(self._file, len(self._markers))
|
||||
for marker in self._markers:
|
||||
id, pos, name = marker
|
||||
_write_short(self._file, id)
|
||||
_write_ulong(self._file, pos)
|
||||
_write_string(self._file, name)
|
||||
|
||||
def open(f, mode=None):
|
||||
if mode is None:
|
||||
if hasattr(f, 'mode'):
|
||||
mode = f.mode
|
||||
else:
|
||||
mode = 'rb'
|
||||
if mode in ('r', 'rb'):
|
||||
return Aifc_read(f)
|
||||
elif mode in ('w', 'wb'):
|
||||
return Aifc_write(f)
|
||||
else:
|
||||
raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
if not sys.argv[1:]:
|
||||
sys.argv.append('/usr/demos/data/audio/bach.aiff')
|
||||
fn = sys.argv[1]
|
||||
with open(fn, 'r') as f:
|
||||
print("Reading", fn)
|
||||
print("nchannels =", f.getnchannels())
|
||||
print("nframes =", f.getnframes())
|
||||
print("sampwidth =", f.getsampwidth())
|
||||
print("framerate =", f.getframerate())
|
||||
print("comptype =", f.getcomptype())
|
||||
print("compname =", f.getcompname())
|
||||
if sys.argv[2:]:
|
||||
gn = sys.argv[2]
|
||||
print("Writing", gn)
|
||||
with open(gn, 'w') as g:
|
||||
g.setparams(f.getparams())
|
||||
while 1:
|
||||
data = f.readframes(1024)
|
||||
if not data:
|
||||
break
|
||||
g.writeframes(data)
|
||||
print("Done.")
|
17
Tool/Python39/Lib/antigravity.py
Normal file
17
Tool/Python39/Lib/antigravity.py
Normal file
@ -0,0 +1,17 @@
|
||||
|
||||
import webbrowser
|
||||
import hashlib
|
||||
|
||||
webbrowser.open("https://xkcd.com/353/")
|
||||
|
||||
def geohash(latitude, longitude, datedow):
|
||||
'''Compute geohash() using the Munroe algorithm.
|
||||
|
||||
>>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
|
||||
37.857713 -122.544543
|
||||
|
||||
'''
|
||||
# https://xkcd.com/426/
|
||||
h = hashlib.md5(datedow, usedforsecurity=False).hexdigest()
|
||||
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
|
||||
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
|
2581
Tool/Python39/Lib/argparse.py
Normal file
2581
Tool/Python39/Lib/argparse.py
Normal file
File diff suppressed because it is too large
Load Diff
1600
Tool/Python39/Lib/ast.py
Normal file
1600
Tool/Python39/Lib/ast.py
Normal file
File diff suppressed because it is too large
Load Diff
307
Tool/Python39/Lib/asynchat.py
Normal file
307
Tool/Python39/Lib/asynchat.py
Normal file
@ -0,0 +1,307 @@
|
||||
# -*- Mode: Python; tab-width: 4 -*-
|
||||
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
r"""A class supporting chat-style (command/response) protocols.
|
||||
|
||||
This class adds support for 'chat' style protocols - where one side
|
||||
sends a 'command', and the other sends a response (examples would be
|
||||
the common internet protocols - smtp, nntp, ftp, etc..).
|
||||
|
||||
The handle_read() method looks at the input stream for the current
|
||||
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
|
||||
for multi-line output), calling self.found_terminator() on its
|
||||
receipt.
|
||||
|
||||
for example:
|
||||
Say you build an async nntp client using this class. At the start
|
||||
of the connection, you'll have self.terminator set to '\r\n', in
|
||||
order to process the single-line greeting. Just before issuing a
|
||||
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
|
||||
command will be accumulated (using your own 'collect_incoming_data'
|
||||
method) up to the terminator, and then control will be returned to
|
||||
you - by calling your self.found_terminator() method.
|
||||
"""
|
||||
import asyncore
|
||||
from collections import deque
|
||||
|
||||
|
||||
class async_chat(asyncore.dispatcher):
|
||||
"""This is an abstract class. You must derive from this class, and add
|
||||
the two methods collect_incoming_data() and found_terminator()"""
|
||||
|
||||
# these are overridable defaults
|
||||
|
||||
ac_in_buffer_size = 65536
|
||||
ac_out_buffer_size = 65536
|
||||
|
||||
# we don't want to enable the use of encoding by default, because that is a
|
||||
# sign of an application bug that we don't want to pass silently
|
||||
|
||||
use_encoding = 0
|
||||
encoding = 'latin-1'
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
# for string terminator matching
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
# we use a list here rather than io.BytesIO for a few reasons...
|
||||
# del lst[:] is faster than bio.truncate(0)
|
||||
# lst = [] is faster than bio.truncate(0)
|
||||
self.incoming = []
|
||||
|
||||
# we toss the use of the "simple producer" and replace it with
|
||||
# a pure deque, which the original fifo was a wrapping of
|
||||
self.producer_fifo = deque()
|
||||
asyncore.dispatcher.__init__(self, sock, map)
|
||||
|
||||
def collect_incoming_data(self, data):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def _collect_incoming_data(self, data):
|
||||
self.incoming.append(data)
|
||||
|
||||
def _get_data(self):
|
||||
d = b''.join(self.incoming)
|
||||
del self.incoming[:]
|
||||
return d
|
||||
|
||||
def found_terminator(self):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def set_terminator(self, term):
|
||||
"""Set the input delimiter.
|
||||
|
||||
Can be a fixed string of any length, an integer, or None.
|
||||
"""
|
||||
if isinstance(term, str) and self.use_encoding:
|
||||
term = bytes(term, self.encoding)
|
||||
elif isinstance(term, int) and term < 0:
|
||||
raise ValueError('the number of received bytes must be positive')
|
||||
self.terminator = term
|
||||
|
||||
def get_terminator(self):
|
||||
return self.terminator
|
||||
|
||||
# grab some more data from the socket,
|
||||
# throw it to the collector method,
|
||||
# check for the terminator,
|
||||
# if found, transition to the next state.
|
||||
|
||||
def handle_read(self):
|
||||
|
||||
try:
|
||||
data = self.recv(self.ac_in_buffer_size)
|
||||
except BlockingIOError:
|
||||
return
|
||||
except OSError:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(str, self.encoding)
|
||||
self.ac_in_buffer = self.ac_in_buffer + data
|
||||
|
||||
# Continue to search for self.terminator in self.ac_in_buffer,
|
||||
# while calling self.collect_incoming_data. The while loop
|
||||
# is necessary because we might read several data+terminator
|
||||
# combos with a single recv(4096).
|
||||
|
||||
while self.ac_in_buffer:
|
||||
lb = len(self.ac_in_buffer)
|
||||
terminator = self.get_terminator()
|
||||
if not terminator:
|
||||
# no terminator, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
elif isinstance(terminator, int):
|
||||
# numeric terminator
|
||||
n = terminator
|
||||
if lb < n:
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
self.terminator = self.terminator - lb
|
||||
else:
|
||||
self.collect_incoming_data(self.ac_in_buffer[:n])
|
||||
self.ac_in_buffer = self.ac_in_buffer[n:]
|
||||
self.terminator = 0
|
||||
self.found_terminator()
|
||||
else:
|
||||
# 3 cases:
|
||||
# 1) end of buffer matches terminator exactly:
|
||||
# collect data, transition
|
||||
# 2) end of buffer matches some prefix:
|
||||
# collect data to the prefix
|
||||
# 3) end of buffer does not match any prefix:
|
||||
# collect data
|
||||
terminator_len = len(terminator)
|
||||
index = self.ac_in_buffer.find(terminator)
|
||||
if index != -1:
|
||||
# we found the terminator
|
||||
if index > 0:
|
||||
# don't bother reporting the empty string
|
||||
# (source of subtle bugs)
|
||||
self.collect_incoming_data(self.ac_in_buffer[:index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
|
||||
# This does the Right Thing if the terminator
|
||||
# is changed here.
|
||||
self.found_terminator()
|
||||
else:
|
||||
# check for a prefix of the terminator
|
||||
index = find_prefix_at_end(self.ac_in_buffer, terminator)
|
||||
if index:
|
||||
if index != lb:
|
||||
# we found a prefix, collect up to the prefix
|
||||
self.collect_incoming_data(self.ac_in_buffer[:-index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[-index:]
|
||||
break
|
||||
else:
|
||||
# no prefix, collect it all
|
||||
self.collect_incoming_data(self.ac_in_buffer)
|
||||
self.ac_in_buffer = b''
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def handle_close(self):
|
||||
self.close()
|
||||
|
||||
def push(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError('data argument must be byte-ish (%r)',
|
||||
type(data))
|
||||
sabs = self.ac_out_buffer_size
|
||||
if len(data) > sabs:
|
||||
for i in range(0, len(data), sabs):
|
||||
self.producer_fifo.append(data[i:i+sabs])
|
||||
else:
|
||||
self.producer_fifo.append(data)
|
||||
self.initiate_send()
|
||||
|
||||
def push_with_producer(self, producer):
|
||||
self.producer_fifo.append(producer)
|
||||
self.initiate_send()
|
||||
|
||||
def readable(self):
|
||||
"predicate for inclusion in the readable for select()"
|
||||
# cannot use the old predicate, it violates the claim of the
|
||||
# set_terminator method.
|
||||
|
||||
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
|
||||
return 1
|
||||
|
||||
def writable(self):
|
||||
"predicate for inclusion in the writable for select()"
|
||||
return self.producer_fifo or (not self.connected)
|
||||
|
||||
def close_when_done(self):
|
||||
"automatically close this channel once the outgoing queue is empty"
|
||||
self.producer_fifo.append(None)
|
||||
|
||||
def initiate_send(self):
|
||||
while self.producer_fifo and self.connected:
|
||||
first = self.producer_fifo[0]
|
||||
# handle empty string/buffer or None entry
|
||||
if not first:
|
||||
del self.producer_fifo[0]
|
||||
if first is None:
|
||||
self.handle_close()
|
||||
return
|
||||
|
||||
# handle classic producer behavior
|
||||
obs = self.ac_out_buffer_size
|
||||
try:
|
||||
data = first[:obs]
|
||||
except TypeError:
|
||||
data = first.more()
|
||||
if data:
|
||||
self.producer_fifo.appendleft(data)
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
continue
|
||||
|
||||
if isinstance(data, str) and self.use_encoding:
|
||||
data = bytes(data, self.encoding)
|
||||
|
||||
# send the data
|
||||
try:
|
||||
num_sent = self.send(data)
|
||||
except OSError:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if num_sent:
|
||||
if num_sent < len(data) or obs < len(first):
|
||||
self.producer_fifo[0] = first[num_sent:]
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
# we tried to send some actual data
|
||||
return
|
||||
|
||||
def discard_buffers(self):
|
||||
# Emergencies only!
|
||||
self.ac_in_buffer = b''
|
||||
del self.incoming[:]
|
||||
self.producer_fifo.clear()
|
||||
|
||||
|
||||
class simple_producer:
|
||||
|
||||
def __init__(self, data, buffer_size=512):
|
||||
self.data = data
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def more(self):
|
||||
if len(self.data) > self.buffer_size:
|
||||
result = self.data[:self.buffer_size]
|
||||
self.data = self.data[self.buffer_size:]
|
||||
return result
|
||||
else:
|
||||
result = self.data
|
||||
self.data = b''
|
||||
return result
|
||||
|
||||
|
||||
# Given 'haystack', see if any prefix of 'needle' is at its end. This
|
||||
# assumes an exact match has already been checked. Return the number of
|
||||
# characters matched.
|
||||
# for example:
|
||||
# f_p_a_e("qwerty\r", "\r\n") => 1
|
||||
# f_p_a_e("qwertydkjf", "\r\n") => 0
|
||||
# f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
|
||||
|
||||
# this could maybe be made faster with a computed regex?
|
||||
# [answer: no; circa Python-2.0, Jan 2001]
|
||||
# new python: 28961/s
|
||||
# old python: 18307/s
|
||||
# re: 12820/s
|
||||
# regex: 14035/s
|
||||
|
||||
def find_prefix_at_end(haystack, needle):
|
||||
l = len(needle) - 1
|
||||
while l and not haystack.endswith(needle[:l]):
|
||||
l -= 1
|
||||
return l
|
47
Tool/Python39/Lib/asyncio/__init__.py
Normal file
47
Tool/Python39/Lib/asyncio/__init__.py
Normal file
@ -0,0 +1,47 @@
|
||||
"""The asyncio package, tracking PEP 3156."""
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
import sys
|
||||
|
||||
# This relies on each of the submodules having an __all__ variable.
|
||||
from .base_events import *
|
||||
from .coroutines import *
|
||||
from .events import *
|
||||
from .exceptions import *
|
||||
from .futures import *
|
||||
from .locks import *
|
||||
from .protocols import *
|
||||
from .runners import *
|
||||
from .queues import *
|
||||
from .streams import *
|
||||
from .subprocess import *
|
||||
from .tasks import *
|
||||
from .threads import *
|
||||
from .transports import *
|
||||
|
||||
# Exposed for _asynciomodule.c to implement now deprecated
|
||||
# Task.all_tasks() method. This function will be removed in 3.9.
|
||||
from .tasks import _all_tasks_compat # NoQA
|
||||
|
||||
__all__ = (base_events.__all__ +
|
||||
coroutines.__all__ +
|
||||
events.__all__ +
|
||||
exceptions.__all__ +
|
||||
futures.__all__ +
|
||||
locks.__all__ +
|
||||
protocols.__all__ +
|
||||
runners.__all__ +
|
||||
queues.__all__ +
|
||||
streams.__all__ +
|
||||
subprocess.__all__ +
|
||||
tasks.__all__ +
|
||||
threads.__all__ +
|
||||
transports.__all__)
|
||||
|
||||
if sys.platform == 'win32': # pragma: no cover
|
||||
from .windows_events import *
|
||||
__all__ += windows_events.__all__
|
||||
else:
|
||||
from .unix_events import * # pragma: no cover
|
||||
__all__ += unix_events.__all__
|
125
Tool/Python39/Lib/asyncio/__main__.py
Normal file
125
Tool/Python39/Lib/asyncio/__main__.py
Normal file
@ -0,0 +1,125 @@
|
||||
import ast
|
||||
import asyncio
|
||||
import code
|
||||
import concurrent.futures
|
||||
import inspect
|
||||
import sys
|
||||
import threading
|
||||
import types
|
||||
import warnings
|
||||
|
||||
from . import futures
|
||||
|
||||
|
||||
class AsyncIOInteractiveConsole(code.InteractiveConsole):
|
||||
|
||||
def __init__(self, locals, loop):
|
||||
super().__init__(locals)
|
||||
self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
|
||||
|
||||
self.loop = loop
|
||||
|
||||
def runcode(self, code):
|
||||
future = concurrent.futures.Future()
|
||||
|
||||
def callback():
|
||||
global repl_future
|
||||
global repl_future_interrupted
|
||||
|
||||
repl_future = None
|
||||
repl_future_interrupted = False
|
||||
|
||||
func = types.FunctionType(code, self.locals)
|
||||
try:
|
||||
coro = func()
|
||||
except SystemExit:
|
||||
raise
|
||||
except KeyboardInterrupt as ex:
|
||||
repl_future_interrupted = True
|
||||
future.set_exception(ex)
|
||||
return
|
||||
except BaseException as ex:
|
||||
future.set_exception(ex)
|
||||
return
|
||||
|
||||
if not inspect.iscoroutine(coro):
|
||||
future.set_result(coro)
|
||||
return
|
||||
|
||||
try:
|
||||
repl_future = self.loop.create_task(coro)
|
||||
futures._chain_future(repl_future, future)
|
||||
except BaseException as exc:
|
||||
future.set_exception(exc)
|
||||
|
||||
loop.call_soon_threadsafe(callback)
|
||||
|
||||
try:
|
||||
return future.result()
|
||||
except SystemExit:
|
||||
raise
|
||||
except BaseException:
|
||||
if repl_future_interrupted:
|
||||
self.write("\nKeyboardInterrupt\n")
|
||||
else:
|
||||
self.showtraceback()
|
||||
|
||||
|
||||
class REPLThread(threading.Thread):
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
banner = (
|
||||
f'asyncio REPL {sys.version} on {sys.platform}\n'
|
||||
f'Use "await" directly instead of "asyncio.run()".\n'
|
||||
f'Type "help", "copyright", "credits" or "license" '
|
||||
f'for more information.\n'
|
||||
f'{getattr(sys, "ps1", ">>> ")}import asyncio'
|
||||
)
|
||||
|
||||
console.interact(
|
||||
banner=banner,
|
||||
exitmsg='exiting asyncio REPL...')
|
||||
finally:
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
message=r'^coroutine .* was never awaited$',
|
||||
category=RuntimeWarning)
|
||||
|
||||
loop.call_soon_threadsafe(loop.stop)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
repl_locals = {'asyncio': asyncio}
|
||||
for key in {'__name__', '__package__',
|
||||
'__loader__', '__spec__',
|
||||
'__builtins__', '__file__'}:
|
||||
repl_locals[key] = locals()[key]
|
||||
|
||||
console = AsyncIOInteractiveConsole(repl_locals, loop)
|
||||
|
||||
repl_future = None
|
||||
repl_future_interrupted = False
|
||||
|
||||
try:
|
||||
import readline # NoQA
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
repl_thread = REPLThread()
|
||||
repl_thread.daemon = True
|
||||
repl_thread.start()
|
||||
|
||||
while True:
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
if repl_future and not repl_future.done():
|
||||
repl_future.cancel()
|
||||
repl_future_interrupted = True
|
||||
continue
|
||||
else:
|
||||
break
|
1915
Tool/Python39/Lib/asyncio/base_events.py
Normal file
1915
Tool/Python39/Lib/asyncio/base_events.py
Normal file
File diff suppressed because it is too large
Load Diff
80
Tool/Python39/Lib/asyncio/base_futures.py
Normal file
80
Tool/Python39/Lib/asyncio/base_futures.py
Normal file
@ -0,0 +1,80 @@
|
||||
__all__ = ()
|
||||
|
||||
import reprlib
|
||||
from _thread import get_ident
|
||||
|
||||
from . import format_helpers
|
||||
|
||||
# States for Future.
|
||||
_PENDING = 'PENDING'
|
||||
_CANCELLED = 'CANCELLED'
|
||||
_FINISHED = 'FINISHED'
|
||||
|
||||
|
||||
def isfuture(obj):
|
||||
"""Check for a Future.
|
||||
|
||||
This returns True when obj is a Future instance or is advertising
|
||||
itself as duck-type compatible by setting _asyncio_future_blocking.
|
||||
See comment in Future for more details.
|
||||
"""
|
||||
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
|
||||
obj._asyncio_future_blocking is not None)
|
||||
|
||||
|
||||
def _format_callbacks(cb):
|
||||
"""helper function for Future.__repr__"""
|
||||
size = len(cb)
|
||||
if not size:
|
||||
cb = ''
|
||||
|
||||
def format_cb(callback):
|
||||
return format_helpers._format_callback_source(callback, ())
|
||||
|
||||
if size == 1:
|
||||
cb = format_cb(cb[0][0])
|
||||
elif size == 2:
|
||||
cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
|
||||
elif size > 2:
|
||||
cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
|
||||
size - 2,
|
||||
format_cb(cb[-1][0]))
|
||||
return f'cb=[{cb}]'
|
||||
|
||||
|
||||
# bpo-42183: _repr_running is needed for repr protection
|
||||
# when a Future or Task result contains itself directly or indirectly.
|
||||
# The logic is borrowed from @reprlib.recursive_repr decorator.
|
||||
# Unfortunately, the direct decorator usage is impossible because of
|
||||
# AttributeError: '_asyncio.Task' object has no attribute '__module__' error.
|
||||
#
|
||||
# After fixing this thing we can return to the decorator based approach.
|
||||
_repr_running = set()
|
||||
|
||||
|
||||
def _future_repr_info(future):
|
||||
# (Future) -> str
|
||||
"""helper function for Future.__repr__"""
|
||||
info = [future._state.lower()]
|
||||
if future._state == _FINISHED:
|
||||
if future._exception is not None:
|
||||
info.append(f'exception={future._exception!r}')
|
||||
else:
|
||||
key = id(future), get_ident()
|
||||
if key in _repr_running:
|
||||
result = '...'
|
||||
else:
|
||||
_repr_running.add(key)
|
||||
try:
|
||||
# use reprlib to limit the length of the output, especially
|
||||
# for very long strings
|
||||
result = reprlib.repr(future._result)
|
||||
finally:
|
||||
_repr_running.discard(key)
|
||||
info.append(f'result={result}')
|
||||
if future._callbacks:
|
||||
info.append(_format_callbacks(future._callbacks))
|
||||
if future._source_traceback:
|
||||
frame = future._source_traceback[-1]
|
||||
info.append(f'created at {frame[0]}:{frame[1]}')
|
||||
return info
|
285
Tool/Python39/Lib/asyncio/base_subprocess.py
Normal file
285
Tool/Python39/Lib/asyncio/base_subprocess.py
Normal file
@ -0,0 +1,285 @@
|
||||
import collections
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .log import logger
|
||||
|
||||
|
||||
class BaseSubprocessTransport(transports.SubprocessTransport):
|
||||
|
||||
def __init__(self, loop, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
waiter=None, extra=None, **kwargs):
|
||||
super().__init__(extra)
|
||||
self._closed = False
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self._proc = None
|
||||
self._pid = None
|
||||
self._returncode = None
|
||||
self._exit_waiters = []
|
||||
self._pending_calls = collections.deque()
|
||||
self._pipes = {}
|
||||
self._finished = False
|
||||
|
||||
if stdin == subprocess.PIPE:
|
||||
self._pipes[0] = None
|
||||
if stdout == subprocess.PIPE:
|
||||
self._pipes[1] = None
|
||||
if stderr == subprocess.PIPE:
|
||||
self._pipes[2] = None
|
||||
|
||||
# Create the child process: set the _proc attribute
|
||||
try:
|
||||
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, bufsize=bufsize, **kwargs)
|
||||
except:
|
||||
self.close()
|
||||
raise
|
||||
|
||||
self._pid = self._proc.pid
|
||||
self._extra['subprocess'] = self._proc
|
||||
|
||||
if self._loop.get_debug():
|
||||
if isinstance(args, (bytes, str)):
|
||||
program = args
|
||||
else:
|
||||
program = args[0]
|
||||
logger.debug('process %r created: pid %s',
|
||||
program, self._pid)
|
||||
|
||||
self._loop.create_task(self._connect_pipes(waiter))
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._closed:
|
||||
info.append('closed')
|
||||
if self._pid is not None:
|
||||
info.append(f'pid={self._pid}')
|
||||
if self._returncode is not None:
|
||||
info.append(f'returncode={self._returncode}')
|
||||
elif self._pid is not None:
|
||||
info.append('running')
|
||||
else:
|
||||
info.append('not started')
|
||||
|
||||
stdin = self._pipes.get(0)
|
||||
if stdin is not None:
|
||||
info.append(f'stdin={stdin.pipe}')
|
||||
|
||||
stdout = self._pipes.get(1)
|
||||
stderr = self._pipes.get(2)
|
||||
if stdout is not None and stderr is stdout:
|
||||
info.append(f'stdout=stderr={stdout.pipe}')
|
||||
else:
|
||||
if stdout is not None:
|
||||
info.append(f'stdout={stdout.pipe}')
|
||||
if stderr is not None:
|
||||
info.append(f'stderr={stderr.pipe}')
|
||||
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._protocol = protocol
|
||||
|
||||
def get_protocol(self):
|
||||
return self._protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closed
|
||||
|
||||
def close(self):
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
|
||||
for proto in self._pipes.values():
|
||||
if proto is None:
|
||||
continue
|
||||
proto.pipe.close()
|
||||
|
||||
if (self._proc is not None and
|
||||
# has the child process finished?
|
||||
self._returncode is None and
|
||||
# the child process has finished, but the
|
||||
# transport hasn't been notified yet?
|
||||
self._proc.poll() is None):
|
||||
|
||||
if self._loop.get_debug():
|
||||
logger.warning('Close running child process: kill %r', self)
|
||||
|
||||
try:
|
||||
self._proc.kill()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
# Don't clear the _proc reference yet: _post_init() may still run
|
||||
|
||||
def __del__(self, _warn=warnings.warn):
|
||||
if not self._closed:
|
||||
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
||||
self.close()
|
||||
|
||||
def get_pid(self):
|
||||
return self._pid
|
||||
|
||||
def get_returncode(self):
|
||||
return self._returncode
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
if fd in self._pipes:
|
||||
return self._pipes[fd].pipe
|
||||
else:
|
||||
return None
|
||||
|
||||
def _check_proc(self):
|
||||
if self._proc is None:
|
||||
raise ProcessLookupError()
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._check_proc()
|
||||
self._proc.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._check_proc()
|
||||
self._proc.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._check_proc()
|
||||
self._proc.kill()
|
||||
|
||||
async def _connect_pipes(self, waiter):
|
||||
try:
|
||||
proc = self._proc
|
||||
loop = self._loop
|
||||
|
||||
if proc.stdin is not None:
|
||||
_, pipe = await loop.connect_write_pipe(
|
||||
lambda: WriteSubprocessPipeProto(self, 0),
|
||||
proc.stdin)
|
||||
self._pipes[0] = pipe
|
||||
|
||||
if proc.stdout is not None:
|
||||
_, pipe = await loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 1),
|
||||
proc.stdout)
|
||||
self._pipes[1] = pipe
|
||||
|
||||
if proc.stderr is not None:
|
||||
_, pipe = await loop.connect_read_pipe(
|
||||
lambda: ReadSubprocessPipeProto(self, 2),
|
||||
proc.stderr)
|
||||
self._pipes[2] = pipe
|
||||
|
||||
assert self._pending_calls is not None
|
||||
|
||||
loop.call_soon(self._protocol.connection_made, self)
|
||||
for callback, data in self._pending_calls:
|
||||
loop.call_soon(callback, *data)
|
||||
self._pending_calls = None
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
else:
|
||||
if waiter is not None and not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def _call(self, cb, *data):
|
||||
if self._pending_calls is not None:
|
||||
self._pending_calls.append((cb, data))
|
||||
else:
|
||||
self._loop.call_soon(cb, *data)
|
||||
|
||||
def _pipe_connection_lost(self, fd, exc):
|
||||
self._call(self._protocol.pipe_connection_lost, fd, exc)
|
||||
self._try_finish()
|
||||
|
||||
def _pipe_data_received(self, fd, data):
|
||||
self._call(self._protocol.pipe_data_received, fd, data)
|
||||
|
||||
def _process_exited(self, returncode):
|
||||
assert returncode is not None, returncode
|
||||
assert self._returncode is None, self._returncode
|
||||
if self._loop.get_debug():
|
||||
logger.info('%r exited with return code %r', self, returncode)
|
||||
self._returncode = returncode
|
||||
if self._proc.returncode is None:
|
||||
# asyncio uses a child watcher: copy the status into the Popen
|
||||
# object. On Python 3.6, it is required to avoid a ResourceWarning.
|
||||
self._proc.returncode = returncode
|
||||
self._call(self._protocol.process_exited)
|
||||
self._try_finish()
|
||||
|
||||
# wake up futures waiting for wait()
|
||||
for waiter in self._exit_waiters:
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(returncode)
|
||||
self._exit_waiters = None
|
||||
|
||||
async def _wait(self):
|
||||
"""Wait until the process exit and return the process return code.
|
||||
|
||||
This method is a coroutine."""
|
||||
if self._returncode is not None:
|
||||
return self._returncode
|
||||
|
||||
waiter = self._loop.create_future()
|
||||
self._exit_waiters.append(waiter)
|
||||
return await waiter
|
||||
|
||||
def _try_finish(self):
|
||||
assert not self._finished
|
||||
if self._returncode is None:
|
||||
return
|
||||
if all(p is not None and p.disconnected
|
||||
for p in self._pipes.values()):
|
||||
self._finished = True
|
||||
self._call(self._call_connection_lost, None)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
self._loop = None
|
||||
self._proc = None
|
||||
self._protocol = None
|
||||
|
||||
|
||||
class WriteSubprocessPipeProto(protocols.BaseProtocol):
|
||||
|
||||
def __init__(self, proc, fd):
|
||||
self.proc = proc
|
||||
self.fd = fd
|
||||
self.pipe = None
|
||||
self.disconnected = False
|
||||
|
||||
def connection_made(self, transport):
|
||||
self.pipe = transport
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>'
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self.disconnected = True
|
||||
self.proc._pipe_connection_lost(self.fd, exc)
|
||||
self.proc = None
|
||||
|
||||
def pause_writing(self):
|
||||
self.proc._protocol.pause_writing()
|
||||
|
||||
def resume_writing(self):
|
||||
self.proc._protocol.resume_writing()
|
||||
|
||||
|
||||
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
|
||||
protocols.Protocol):
|
||||
|
||||
def data_received(self, data):
|
||||
self.proc._pipe_data_received(self.fd, data)
|
85
Tool/Python39/Lib/asyncio/base_tasks.py
Normal file
85
Tool/Python39/Lib/asyncio/base_tasks.py
Normal file
@ -0,0 +1,85 @@
|
||||
import linecache
|
||||
import traceback
|
||||
|
||||
from . import base_futures
|
||||
from . import coroutines
|
||||
|
||||
|
||||
def _task_repr_info(task):
|
||||
info = base_futures._future_repr_info(task)
|
||||
|
||||
if task._must_cancel:
|
||||
# replace status
|
||||
info[0] = 'cancelling'
|
||||
|
||||
info.insert(1, 'name=%r' % task.get_name())
|
||||
|
||||
coro = coroutines._format_coroutine(task._coro)
|
||||
info.insert(2, f'coro=<{coro}>')
|
||||
|
||||
if task._fut_waiter is not None:
|
||||
info.insert(3, f'wait_for={task._fut_waiter!r}')
|
||||
return info
|
||||
|
||||
|
||||
def _task_get_stack(task, limit):
|
||||
frames = []
|
||||
if hasattr(task._coro, 'cr_frame'):
|
||||
# case 1: 'async def' coroutines
|
||||
f = task._coro.cr_frame
|
||||
elif hasattr(task._coro, 'gi_frame'):
|
||||
# case 2: legacy coroutines
|
||||
f = task._coro.gi_frame
|
||||
elif hasattr(task._coro, 'ag_frame'):
|
||||
# case 3: async generators
|
||||
f = task._coro.ag_frame
|
||||
else:
|
||||
# case 4: unknown objects
|
||||
f = None
|
||||
if f is not None:
|
||||
while f is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(f)
|
||||
f = f.f_back
|
||||
frames.reverse()
|
||||
elif task._exception is not None:
|
||||
tb = task._exception.__traceback__
|
||||
while tb is not None:
|
||||
if limit is not None:
|
||||
if limit <= 0:
|
||||
break
|
||||
limit -= 1
|
||||
frames.append(tb.tb_frame)
|
||||
tb = tb.tb_next
|
||||
return frames
|
||||
|
||||
|
||||
def _task_print_stack(task, limit, file):
|
||||
extracted_list = []
|
||||
checked = set()
|
||||
for f in task.get_stack(limit=limit):
|
||||
lineno = f.f_lineno
|
||||
co = f.f_code
|
||||
filename = co.co_filename
|
||||
name = co.co_name
|
||||
if filename not in checked:
|
||||
checked.add(filename)
|
||||
linecache.checkcache(filename)
|
||||
line = linecache.getline(filename, lineno, f.f_globals)
|
||||
extracted_list.append((filename, lineno, name, line))
|
||||
|
||||
exc = task._exception
|
||||
if not extracted_list:
|
||||
print(f'No stack for {task!r}', file=file)
|
||||
elif exc is not None:
|
||||
print(f'Traceback for {task!r} (most recent call last):', file=file)
|
||||
else:
|
||||
print(f'Stack for {task!r} (most recent call last):', file=file)
|
||||
|
||||
traceback.print_list(extracted_list, file=file)
|
||||
if exc is not None:
|
||||
for line in traceback.format_exception_only(exc.__class__, exc):
|
||||
print(line, file=file, end='')
|
27
Tool/Python39/Lib/asyncio/constants.py
Normal file
27
Tool/Python39/Lib/asyncio/constants.py
Normal file
@ -0,0 +1,27 @@
|
||||
import enum
|
||||
|
||||
# After the connection is lost, log warnings after this many write()s.
|
||||
LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
|
||||
|
||||
# Seconds to wait before retrying accept().
|
||||
ACCEPT_RETRY_DELAY = 1
|
||||
|
||||
# Number of stack entries to capture in debug mode.
|
||||
# The larger the number, the slower the operation in debug mode
|
||||
# (see extract_stack() in format_helpers.py).
|
||||
DEBUG_STACK_DEPTH = 10
|
||||
|
||||
# Number of seconds to wait for SSL handshake to complete
|
||||
# The default timeout matches that of Nginx.
|
||||
SSL_HANDSHAKE_TIMEOUT = 60.0
|
||||
|
||||
# Used in sendfile fallback code. We use fallback for platforms
|
||||
# that don't support sendfile, or for TLS connections.
|
||||
SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
|
||||
|
||||
# The enum should be here to break circular dependencies between
|
||||
# base_events and sslproto
|
||||
class _SendfileMode(enum.Enum):
|
||||
UNSUPPORTED = enum.auto()
|
||||
TRY_NATIVE = enum.auto()
|
||||
FALLBACK = enum.auto()
|
269
Tool/Python39/Lib/asyncio/coroutines.py
Normal file
269
Tool/Python39/Lib/asyncio/coroutines.py
Normal file
@ -0,0 +1,269 @@
|
||||
__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine'
|
||||
|
||||
import collections.abc
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
import warnings
|
||||
|
||||
from . import base_futures
|
||||
from . import constants
|
||||
from . import format_helpers
|
||||
from .log import logger
|
||||
|
||||
|
||||
def _is_debug_mode():
|
||||
# If you set _DEBUG to true, @coroutine will wrap the resulting
|
||||
# generator objects in a CoroWrapper instance (defined below). That
|
||||
# instance will log a message when the generator is never iterated
|
||||
# over, which may happen when you forget to use "await" or "yield from"
|
||||
# with a coroutine call.
|
||||
# Note that the value of the _DEBUG flag is taken
|
||||
# when the decorator is used, so to be of any use it must be set
|
||||
# before you define your coroutines. A downside of using this feature
|
||||
# is that tracebacks show entries for the CoroWrapper.__next__ method
|
||||
# when _DEBUG is true.
|
||||
return sys.flags.dev_mode or (not sys.flags.ignore_environment and
|
||||
bool(os.environ.get('PYTHONASYNCIODEBUG')))
|
||||
|
||||
|
||||
_DEBUG = _is_debug_mode()
|
||||
|
||||
|
||||
class CoroWrapper:
|
||||
# Wrapper for coroutine object in _DEBUG mode.
|
||||
|
||||
def __init__(self, gen, func=None):
|
||||
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
|
||||
self.gen = gen
|
||||
self.func = func # Used to unwrap @coroutine decorator
|
||||
self._source_traceback = format_helpers.extract_stack(sys._getframe(1))
|
||||
self.__name__ = getattr(gen, '__name__', None)
|
||||
self.__qualname__ = getattr(gen, '__qualname__', None)
|
||||
|
||||
def __repr__(self):
|
||||
coro_repr = _format_coroutine(self)
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
coro_repr += f', created at {frame[0]}:{frame[1]}'
|
||||
|
||||
return f'<{self.__class__.__name__} {coro_repr}>'
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
return self.gen.send(None)
|
||||
|
||||
def send(self, value):
|
||||
return self.gen.send(value)
|
||||
|
||||
def throw(self, type, value=None, traceback=None):
|
||||
return self.gen.throw(type, value, traceback)
|
||||
|
||||
def close(self):
|
||||
return self.gen.close()
|
||||
|
||||
@property
|
||||
def gi_frame(self):
|
||||
return self.gen.gi_frame
|
||||
|
||||
@property
|
||||
def gi_running(self):
|
||||
return self.gen.gi_running
|
||||
|
||||
@property
|
||||
def gi_code(self):
|
||||
return self.gen.gi_code
|
||||
|
||||
def __await__(self):
|
||||
return self
|
||||
|
||||
@property
|
||||
def gi_yieldfrom(self):
|
||||
return self.gen.gi_yieldfrom
|
||||
|
||||
def __del__(self):
|
||||
# Be careful accessing self.gen.frame -- self.gen might not exist.
|
||||
gen = getattr(self, 'gen', None)
|
||||
frame = getattr(gen, 'gi_frame', None)
|
||||
if frame is not None and frame.f_lasti == -1:
|
||||
msg = f'{self!r} was never yielded from'
|
||||
tb = getattr(self, '_source_traceback', ())
|
||||
if tb:
|
||||
tb = ''.join(traceback.format_list(tb))
|
||||
msg += (f'\nCoroutine object created at '
|
||||
f'(most recent call last, truncated to '
|
||||
f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
|
||||
msg += tb.rstrip()
|
||||
logger.error(msg)
|
||||
|
||||
|
||||
def coroutine(func):
|
||||
"""Decorator to mark coroutines.
|
||||
|
||||
If the coroutine is not yielded from before it is destroyed,
|
||||
an error message is logged.
|
||||
"""
|
||||
warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead',
|
||||
DeprecationWarning,
|
||||
stacklevel=2)
|
||||
if inspect.iscoroutinefunction(func):
|
||||
# In Python 3.5 that's all we need to do for coroutines
|
||||
# defined with "async def".
|
||||
return func
|
||||
|
||||
if inspect.isgeneratorfunction(func):
|
||||
coro = func
|
||||
else:
|
||||
@functools.wraps(func)
|
||||
def coro(*args, **kw):
|
||||
res = func(*args, **kw)
|
||||
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
|
||||
isinstance(res, CoroWrapper)):
|
||||
res = yield from res
|
||||
else:
|
||||
# If 'res' is an awaitable, run it.
|
||||
try:
|
||||
await_meth = res.__await__
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if isinstance(res, collections.abc.Awaitable):
|
||||
res = yield from await_meth()
|
||||
return res
|
||||
|
||||
coro = types.coroutine(coro)
|
||||
if not _DEBUG:
|
||||
wrapper = coro
|
||||
else:
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
w = CoroWrapper(coro(*args, **kwds), func=func)
|
||||
if w._source_traceback:
|
||||
del w._source_traceback[-1]
|
||||
# Python < 3.5 does not implement __qualname__
|
||||
# on generator objects, so we set it manually.
|
||||
# We use getattr as some callables (such as
|
||||
# functools.partial may lack __qualname__).
|
||||
w.__name__ = getattr(func, '__name__', None)
|
||||
w.__qualname__ = getattr(func, '__qualname__', None)
|
||||
return w
|
||||
|
||||
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
|
||||
return wrapper
|
||||
|
||||
|
||||
# A marker for iscoroutinefunction.
|
||||
_is_coroutine = object()
|
||||
|
||||
|
||||
def iscoroutinefunction(func):
|
||||
"""Return True if func is a decorated coroutine function."""
|
||||
return (inspect.iscoroutinefunction(func) or
|
||||
getattr(func, '_is_coroutine', None) is _is_coroutine)
|
||||
|
||||
|
||||
# Prioritize native coroutine check to speed-up
|
||||
# asyncio.iscoroutine.
|
||||
_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
|
||||
collections.abc.Coroutine, CoroWrapper)
|
||||
_iscoroutine_typecache = set()
|
||||
|
||||
|
||||
def iscoroutine(obj):
|
||||
"""Return True if obj is a coroutine object."""
|
||||
if type(obj) in _iscoroutine_typecache:
|
||||
return True
|
||||
|
||||
if isinstance(obj, _COROUTINE_TYPES):
|
||||
# Just in case we don't want to cache more than 100
|
||||
# positive types. That shouldn't ever happen, unless
|
||||
# someone stressing the system on purpose.
|
||||
if len(_iscoroutine_typecache) < 100:
|
||||
_iscoroutine_typecache.add(type(obj))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def _format_coroutine(coro):
|
||||
assert iscoroutine(coro)
|
||||
|
||||
is_corowrapper = isinstance(coro, CoroWrapper)
|
||||
|
||||
def get_name(coro):
|
||||
# Coroutines compiled with Cython sometimes don't have
|
||||
# proper __qualname__ or __name__. While that is a bug
|
||||
# in Cython, asyncio shouldn't crash with an AttributeError
|
||||
# in its __repr__ functions.
|
||||
if is_corowrapper:
|
||||
return format_helpers._format_callback(coro.func, (), {})
|
||||
|
||||
if hasattr(coro, '__qualname__') and coro.__qualname__:
|
||||
coro_name = coro.__qualname__
|
||||
elif hasattr(coro, '__name__') and coro.__name__:
|
||||
coro_name = coro.__name__
|
||||
else:
|
||||
# Stop masking Cython bugs, expose them in a friendly way.
|
||||
coro_name = f'<{type(coro).__name__} without __name__>'
|
||||
return f'{coro_name}()'
|
||||
|
||||
def is_running(coro):
|
||||
try:
|
||||
return coro.cr_running
|
||||
except AttributeError:
|
||||
try:
|
||||
return coro.gi_running
|
||||
except AttributeError:
|
||||
return False
|
||||
|
||||
coro_code = None
|
||||
if hasattr(coro, 'cr_code') and coro.cr_code:
|
||||
coro_code = coro.cr_code
|
||||
elif hasattr(coro, 'gi_code') and coro.gi_code:
|
||||
coro_code = coro.gi_code
|
||||
|
||||
coro_name = get_name(coro)
|
||||
|
||||
if not coro_code:
|
||||
# Built-in types might not have __qualname__ or __name__.
|
||||
if is_running(coro):
|
||||
return f'{coro_name} running'
|
||||
else:
|
||||
return coro_name
|
||||
|
||||
coro_frame = None
|
||||
if hasattr(coro, 'gi_frame') and coro.gi_frame:
|
||||
coro_frame = coro.gi_frame
|
||||
elif hasattr(coro, 'cr_frame') and coro.cr_frame:
|
||||
coro_frame = coro.cr_frame
|
||||
|
||||
# If Cython's coroutine has a fake code object without proper
|
||||
# co_filename -- expose that.
|
||||
filename = coro_code.co_filename or '<empty co_filename>'
|
||||
|
||||
lineno = 0
|
||||
if (is_corowrapper and
|
||||
coro.func is not None and
|
||||
not inspect.isgeneratorfunction(coro.func)):
|
||||
source = format_helpers._get_function_source(coro.func)
|
||||
if source is not None:
|
||||
filename, lineno = source
|
||||
if coro_frame is None:
|
||||
coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
|
||||
else:
|
||||
coro_repr = f'{coro_name} running, defined at {filename}:{lineno}'
|
||||
|
||||
elif coro_frame is not None:
|
||||
lineno = coro_frame.f_lineno
|
||||
coro_repr = f'{coro_name} running at {filename}:{lineno}'
|
||||
|
||||
else:
|
||||
lineno = coro_code.co_firstlineno
|
||||
coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
|
||||
|
||||
return coro_repr
|
795
Tool/Python39/Lib/asyncio/events.py
Normal file
795
Tool/Python39/Lib/asyncio/events.py
Normal file
@ -0,0 +1,795 @@
|
||||
"""Event loop and event loop policy."""
|
||||
|
||||
__all__ = (
|
||||
'AbstractEventLoopPolicy',
|
||||
'AbstractEventLoop', 'AbstractServer',
|
||||
'Handle', 'TimerHandle',
|
||||
'get_event_loop_policy', 'set_event_loop_policy',
|
||||
'get_event_loop', 'set_event_loop', 'new_event_loop',
|
||||
'get_child_watcher', 'set_child_watcher',
|
||||
'_set_running_loop', 'get_running_loop',
|
||||
'_get_running_loop',
|
||||
)
|
||||
|
||||
import contextvars
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from . import format_helpers
|
||||
|
||||
|
||||
class Handle:
|
||||
"""Object returned by callback registration methods."""
|
||||
|
||||
__slots__ = ('_callback', '_args', '_cancelled', '_loop',
|
||||
'_source_traceback', '_repr', '__weakref__',
|
||||
'_context')
|
||||
|
||||
def __init__(self, callback, args, loop, context=None):
|
||||
if context is None:
|
||||
context = contextvars.copy_context()
|
||||
self._context = context
|
||||
self._loop = loop
|
||||
self._callback = callback
|
||||
self._args = args
|
||||
self._cancelled = False
|
||||
self._repr = None
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = format_helpers.extract_stack(
|
||||
sys._getframe(1))
|
||||
else:
|
||||
self._source_traceback = None
|
||||
|
||||
def _repr_info(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._cancelled:
|
||||
info.append('cancelled')
|
||||
if self._callback is not None:
|
||||
info.append(format_helpers._format_callback_source(
|
||||
self._callback, self._args))
|
||||
if self._source_traceback:
|
||||
frame = self._source_traceback[-1]
|
||||
info.append(f'created at {frame[0]}:{frame[1]}')
|
||||
return info
|
||||
|
||||
def __repr__(self):
|
||||
if self._repr is not None:
|
||||
return self._repr
|
||||
info = self._repr_info()
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._cancelled = True
|
||||
if self._loop.get_debug():
|
||||
# Keep a representation in debug mode to keep callback and
|
||||
# parameters. For example, to log the warning
|
||||
# "Executing <Handle...> took 2.5 second"
|
||||
self._repr = repr(self)
|
||||
self._callback = None
|
||||
self._args = None
|
||||
|
||||
def cancelled(self):
|
||||
return self._cancelled
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
self._context.run(self._callback, *self._args)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
cb = format_helpers._format_callback_source(
|
||||
self._callback, self._args)
|
||||
msg = f'Exception in callback {cb}'
|
||||
context = {
|
||||
'message': msg,
|
||||
'exception': exc,
|
||||
'handle': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
class TimerHandle(Handle):
|
||||
"""Object returned by timed callback registration methods."""
|
||||
|
||||
__slots__ = ['_scheduled', '_when']
|
||||
|
||||
def __init__(self, when, callback, args, loop, context=None):
|
||||
assert when is not None
|
||||
super().__init__(callback, args, loop, context)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._when = when
|
||||
self._scheduled = False
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
pos = 2 if self._cancelled else 1
|
||||
info.insert(pos, f'when={self._when}')
|
||||
return info
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._when)
|
||||
|
||||
def __lt__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when < other._when
|
||||
return NotImplemented
|
||||
|
||||
def __le__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when < other._when or self.__eq__(other)
|
||||
return NotImplemented
|
||||
|
||||
def __gt__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when > other._when
|
||||
return NotImplemented
|
||||
|
||||
def __ge__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return self._when > other._when or self.__eq__(other)
|
||||
return NotImplemented
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, TimerHandle):
|
||||
return (self._when == other._when and
|
||||
self._callback == other._callback and
|
||||
self._args == other._args and
|
||||
self._cancelled == other._cancelled)
|
||||
return NotImplemented
|
||||
|
||||
def cancel(self):
|
||||
if not self._cancelled:
|
||||
self._loop._timer_handle_cancelled(self)
|
||||
super().cancel()
|
||||
|
||||
def when(self):
|
||||
"""Return a scheduled callback time.
|
||||
|
||||
The time is an absolute timestamp, using the same time
|
||||
reference as loop.time().
|
||||
"""
|
||||
return self._when
|
||||
|
||||
|
||||
class AbstractServer:
|
||||
"""Abstract server returned by create_server()."""
|
||||
|
||||
def close(self):
|
||||
"""Stop serving. This leaves existing connections open."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_loop(self):
|
||||
"""Get the event loop the Server object is attached to."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_serving(self):
|
||||
"""Return True if the server is accepting connections."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def start_serving(self):
|
||||
"""Start accepting connections.
|
||||
|
||||
This method is idempotent, so it can be called when
|
||||
the server is already being serving.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def serve_forever(self):
|
||||
"""Start accepting connections until the coroutine is cancelled.
|
||||
|
||||
The server is closed when the coroutine is cancelled.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def wait_closed(self):
|
||||
"""Coroutine to wait until service is closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *exc):
|
||||
self.close()
|
||||
await self.wait_closed()
|
||||
|
||||
|
||||
class AbstractEventLoop:
|
||||
"""Abstract event loop."""
|
||||
|
||||
# Running and stopping the event loop.
|
||||
|
||||
def run_forever(self):
|
||||
"""Run the event loop until stop() is called."""
|
||||
raise NotImplementedError
|
||||
|
||||
def run_until_complete(self, future):
|
||||
"""Run the event loop until a Future is done.
|
||||
|
||||
Return the Future's result, or raise its exception.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def stop(self):
|
||||
"""Stop the event loop as soon as reasonable.
|
||||
|
||||
Exactly how soon that is may depend on the implementation, but
|
||||
no more I/O callbacks should be scheduled.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_running(self):
|
||||
"""Return whether the event loop is currently running."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_closed(self):
|
||||
"""Returns True if the event loop was closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the loop.
|
||||
|
||||
The loop should not be running.
|
||||
|
||||
This is idempotent and irreversible.
|
||||
|
||||
No other methods should be called after this one.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def shutdown_asyncgens(self):
|
||||
"""Shutdown all active asynchronous generators."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def shutdown_default_executor(self):
|
||||
"""Schedule the shutdown of the default executor."""
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods scheduling callbacks. All these return Handles.
|
||||
|
||||
def _timer_handle_cancelled(self, handle):
|
||||
"""Notification that a TimerHandle has been cancelled."""
|
||||
raise NotImplementedError
|
||||
|
||||
def call_soon(self, callback, *args, context=None):
|
||||
return self.call_later(0, callback, *args, context=context)
|
||||
|
||||
def call_later(self, delay, callback, *args, context=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_at(self, when, callback, *args, context=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def time(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def create_future(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Method scheduling a coroutine object: create a task.
|
||||
|
||||
def create_task(self, coro, *, name=None):
|
||||
raise NotImplementedError
|
||||
|
||||
# Methods for interacting with threads.
|
||||
|
||||
def call_soon_threadsafe(self, callback, *args, context=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def run_in_executor(self, executor, func, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_default_executor(self, executor):
|
||||
raise NotImplementedError
|
||||
|
||||
# Network I/O methods returning Futures.
|
||||
|
||||
async def getaddrinfo(self, host, port, *,
|
||||
family=0, type=0, proto=0, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
async def getnameinfo(self, sockaddr, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_connection(
|
||||
self, protocol_factory, host=None, port=None,
|
||||
*, ssl=None, family=0, proto=0,
|
||||
flags=0, sock=None, local_addr=None,
|
||||
server_hostname=None,
|
||||
ssl_handshake_timeout=None,
|
||||
happy_eyeballs_delay=None, interleave=None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_server(
|
||||
self, protocol_factory, host=None, port=None,
|
||||
*, family=socket.AF_UNSPEC,
|
||||
flags=socket.AI_PASSIVE, sock=None, backlog=100,
|
||||
ssl=None, reuse_address=None, reuse_port=None,
|
||||
ssl_handshake_timeout=None,
|
||||
start_serving=True):
|
||||
"""A coroutine which creates a TCP server bound to host and port.
|
||||
|
||||
The return value is a Server object which can be used to stop
|
||||
the service.
|
||||
|
||||
If host is an empty string or None all interfaces are assumed
|
||||
and a list of multiple sockets will be returned (most likely
|
||||
one for IPv4 and another one for IPv6). The host parameter can also be
|
||||
a sequence (e.g. list) of hosts to bind to.
|
||||
|
||||
family can be set to either AF_INET or AF_INET6 to force the
|
||||
socket to use IPv4 or IPv6. If not set it will be determined
|
||||
from host (defaults to AF_UNSPEC).
|
||||
|
||||
flags is a bitmask for getaddrinfo().
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
|
||||
reuse_address tells the kernel to reuse a local socket in
|
||||
TIME_WAIT state, without waiting for its natural timeout to
|
||||
expire. If not specified will automatically be set to True on
|
||||
UNIX.
|
||||
|
||||
reuse_port tells the kernel to allow this endpoint to be bound to
|
||||
the same port as other existing endpoints are bound to, so long as
|
||||
they all set this flag when being created. This option is not
|
||||
supported on Windows.
|
||||
|
||||
ssl_handshake_timeout is the time in seconds that an SSL server
|
||||
will wait for completion of the SSL handshake before aborting the
|
||||
connection. Default is 60s.
|
||||
|
||||
start_serving set to True (default) causes the created server
|
||||
to start accepting connections immediately. When set to False,
|
||||
the user should await Server.start_serving() or Server.serve_forever()
|
||||
to make the server to start accepting connections.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def sendfile(self, transport, file, offset=0, count=None,
|
||||
*, fallback=True):
|
||||
"""Send a file through a transport.
|
||||
|
||||
Return an amount of sent bytes.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def start_tls(self, transport, protocol, sslcontext, *,
|
||||
server_side=False,
|
||||
server_hostname=None,
|
||||
ssl_handshake_timeout=None):
|
||||
"""Upgrade a transport to TLS.
|
||||
|
||||
Return a new transport that *protocol* should start using
|
||||
immediately.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_unix_connection(
|
||||
self, protocol_factory, path=None, *,
|
||||
ssl=None, sock=None,
|
||||
server_hostname=None,
|
||||
ssl_handshake_timeout=None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_unix_server(
|
||||
self, protocol_factory, path=None, *,
|
||||
sock=None, backlog=100, ssl=None,
|
||||
ssl_handshake_timeout=None,
|
||||
start_serving=True):
|
||||
"""A coroutine which creates a UNIX Domain Socket server.
|
||||
|
||||
The return value is a Server object, which can be used to stop
|
||||
the service.
|
||||
|
||||
path is a str, representing a file system path to bind the
|
||||
server socket to.
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
|
||||
backlog is the maximum number of queued connections passed to
|
||||
listen() (defaults to 100).
|
||||
|
||||
ssl can be set to an SSLContext to enable SSL over the
|
||||
accepted connections.
|
||||
|
||||
ssl_handshake_timeout is the time in seconds that an SSL server
|
||||
will wait for the SSL handshake to complete (defaults to 60s).
|
||||
|
||||
start_serving set to True (default) causes the created server
|
||||
to start accepting connections immediately. When set to False,
|
||||
the user should await Server.start_serving() or Server.serve_forever()
|
||||
to make the server to start accepting connections.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def create_datagram_endpoint(self, protocol_factory,
|
||||
local_addr=None, remote_addr=None, *,
|
||||
family=0, proto=0, flags=0,
|
||||
reuse_address=None, reuse_port=None,
|
||||
allow_broadcast=None, sock=None):
|
||||
"""A coroutine which creates a datagram endpoint.
|
||||
|
||||
This method will try to establish the endpoint in the background.
|
||||
When successful, the coroutine returns a (transport, protocol) pair.
|
||||
|
||||
protocol_factory must be a callable returning a protocol instance.
|
||||
|
||||
socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
|
||||
host (or family if specified), socket type SOCK_DGRAM.
|
||||
|
||||
reuse_address tells the kernel to reuse a local socket in
|
||||
TIME_WAIT state, without waiting for its natural timeout to
|
||||
expire. If not specified it will automatically be set to True on
|
||||
UNIX.
|
||||
|
||||
reuse_port tells the kernel to allow this endpoint to be bound to
|
||||
the same port as other existing endpoints are bound to, so long as
|
||||
they all set this flag when being created. This option is not
|
||||
supported on Windows and some UNIX's. If the
|
||||
:py:data:`~socket.SO_REUSEPORT` constant is not defined then this
|
||||
capability is unsupported.
|
||||
|
||||
allow_broadcast tells the kernel to allow this endpoint to send
|
||||
messages to the broadcast address.
|
||||
|
||||
sock can optionally be specified in order to use a preexisting
|
||||
socket object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# Pipes and subprocesses.
|
||||
|
||||
async def connect_read_pipe(self, protocol_factory, pipe):
|
||||
"""Register read pipe in event loop. Set the pipe to non-blocking mode.
|
||||
|
||||
protocol_factory should instantiate object with Protocol interface.
|
||||
pipe is a file-like object.
|
||||
Return pair (transport, protocol), where transport supports the
|
||||
ReadTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vice versa.
|
||||
raise NotImplementedError
|
||||
|
||||
async def connect_write_pipe(self, protocol_factory, pipe):
|
||||
"""Register write pipe in event loop.
|
||||
|
||||
protocol_factory should instantiate object with BaseProtocol interface.
|
||||
Pipe is file-like object already switched to nonblocking.
|
||||
Return pair (transport, protocol), where transport support
|
||||
WriteTransport interface."""
|
||||
# The reason to accept file-like object instead of just file descriptor
|
||||
# is: we need to own pipe and close it at transport finishing
|
||||
# Can got complicated errors if pass f.fileno(),
|
||||
# close fd in pipe transport then close f and vice versa.
|
||||
raise NotImplementedError
|
||||
|
||||
async def subprocess_shell(self, protocol_factory, cmd, *,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
async def subprocess_exec(self, protocol_factory, *args,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
**kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
# Ready-based callback registration methods.
|
||||
# The add_*() methods return None.
|
||||
# The remove_*() methods return True if something was removed,
|
||||
# False if there was nothing to delete.
|
||||
|
||||
def add_reader(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_reader(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
def add_writer(self, fd, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_writer(self, fd):
|
||||
raise NotImplementedError
|
||||
|
||||
# Completion based I/O methods returning Futures.
|
||||
|
||||
async def sock_recv(self, sock, nbytes):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_recv_into(self, sock, buf):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_sendall(self, sock, data):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_connect(self, sock, address):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_accept(self, sock):
|
||||
raise NotImplementedError
|
||||
|
||||
async def sock_sendfile(self, sock, file, offset=0, count=None,
|
||||
*, fallback=None):
|
||||
raise NotImplementedError
|
||||
|
||||
# Signal handling.
|
||||
|
||||
def add_signal_handler(self, sig, callback, *args):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove_signal_handler(self, sig):
|
||||
raise NotImplementedError
|
||||
|
||||
# Task factory.
|
||||
|
||||
def set_task_factory(self, factory):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_task_factory(self):
|
||||
raise NotImplementedError
|
||||
|
||||
# Error handlers.
|
||||
|
||||
def get_exception_handler(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_exception_handler(self, handler):
|
||||
raise NotImplementedError
|
||||
|
||||
def default_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
def call_exception_handler(self, context):
|
||||
raise NotImplementedError
|
||||
|
||||
# Debug flag management.
|
||||
|
||||
def get_debug(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_debug(self, enabled):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AbstractEventLoopPolicy:
|
||||
"""Abstract policy for accessing the event loop."""
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop for the current context.
|
||||
|
||||
Returns an event loop object implementing the BaseEventLoop interface,
|
||||
or raises an exception in case no event loop has been set for the
|
||||
current context and the current policy does not specify to create one.
|
||||
|
||||
It should never return None."""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop for the current context to loop."""
|
||||
raise NotImplementedError
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create and return a new event loop object according to this
|
||||
policy's rules. If there's need to set this loop as the event loop for
|
||||
the current context, set_event_loop must be called explicitly."""
|
||||
raise NotImplementedError
|
||||
|
||||
# Child processes handling (Unix only).
|
||||
|
||||
def get_child_watcher(self):
|
||||
"Get the watcher for child processes."
|
||||
raise NotImplementedError
|
||||
|
||||
def set_child_watcher(self, watcher):
|
||||
"""Set the watcher for child processes."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
|
||||
"""Default policy implementation for accessing the event loop.
|
||||
|
||||
In this policy, each thread has its own event loop. However, we
|
||||
only automatically create an event loop by default for the main
|
||||
thread; other threads by default have no event loop.
|
||||
|
||||
Other policies may have different rules (e.g. a single global
|
||||
event loop, or automatically creating an event loop per thread, or
|
||||
using some other notion of context to which an event loop is
|
||||
associated).
|
||||
"""
|
||||
|
||||
_loop_factory = None
|
||||
|
||||
class _Local(threading.local):
|
||||
_loop = None
|
||||
_set_called = False
|
||||
|
||||
def __init__(self):
|
||||
self._local = self._Local()
|
||||
|
||||
def get_event_loop(self):
|
||||
"""Get the event loop for the current context.
|
||||
|
||||
Returns an instance of EventLoop or raises an exception.
|
||||
"""
|
||||
if (self._local._loop is None and
|
||||
not self._local._set_called and
|
||||
threading.current_thread() is threading.main_thread()):
|
||||
self.set_event_loop(self.new_event_loop())
|
||||
|
||||
if self._local._loop is None:
|
||||
raise RuntimeError('There is no current event loop in thread %r.'
|
||||
% threading.current_thread().name)
|
||||
|
||||
return self._local._loop
|
||||
|
||||
def set_event_loop(self, loop):
|
||||
"""Set the event loop."""
|
||||
self._local._set_called = True
|
||||
assert loop is None or isinstance(loop, AbstractEventLoop)
|
||||
self._local._loop = loop
|
||||
|
||||
def new_event_loop(self):
|
||||
"""Create a new event loop.
|
||||
|
||||
You must call set_event_loop() to make this the current event
|
||||
loop.
|
||||
"""
|
||||
return self._loop_factory()
|
||||
|
||||
|
||||
# Event loop policy. The policy itself is always global, even if the
|
||||
# policy's rules say that there is an event loop per thread (or other
|
||||
# notion of context). The default policy is installed by the first
|
||||
# call to get_event_loop_policy().
|
||||
_event_loop_policy = None
|
||||
|
||||
# Lock for protecting the on-the-fly creation of the event loop policy.
|
||||
_lock = threading.Lock()
|
||||
|
||||
|
||||
# A TLS for the running event loop, used by _get_running_loop.
|
||||
class _RunningLoop(threading.local):
|
||||
loop_pid = (None, None)
|
||||
|
||||
|
||||
_running_loop = _RunningLoop()
|
||||
|
||||
|
||||
def get_running_loop():
|
||||
"""Return the running event loop. Raise a RuntimeError if there is none.
|
||||
|
||||
This function is thread-specific.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
loop = _get_running_loop()
|
||||
if loop is None:
|
||||
raise RuntimeError('no running event loop')
|
||||
return loop
|
||||
|
||||
|
||||
def _get_running_loop():
|
||||
"""Return the running event loop or None.
|
||||
|
||||
This is a low-level function intended to be used by event loops.
|
||||
This function is thread-specific.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
running_loop, pid = _running_loop.loop_pid
|
||||
if running_loop is not None and pid == os.getpid():
|
||||
return running_loop
|
||||
|
||||
|
||||
def _set_running_loop(loop):
|
||||
"""Set the running event loop.
|
||||
|
||||
This is a low-level function intended to be used by event loops.
|
||||
This function is thread-specific.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
_running_loop.loop_pid = (loop, os.getpid())
|
||||
|
||||
|
||||
def _init_event_loop_policy():
|
||||
global _event_loop_policy
|
||||
with _lock:
|
||||
if _event_loop_policy is None: # pragma: no branch
|
||||
from . import DefaultEventLoopPolicy
|
||||
_event_loop_policy = DefaultEventLoopPolicy()
|
||||
|
||||
|
||||
def get_event_loop_policy():
|
||||
"""Get the current event loop policy."""
|
||||
if _event_loop_policy is None:
|
||||
_init_event_loop_policy()
|
||||
return _event_loop_policy
|
||||
|
||||
|
||||
def set_event_loop_policy(policy):
|
||||
"""Set the current event loop policy.
|
||||
|
||||
If policy is None, the default policy is restored."""
|
||||
global _event_loop_policy
|
||||
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
|
||||
_event_loop_policy = policy
|
||||
|
||||
|
||||
def get_event_loop():
|
||||
"""Return an asyncio event loop.
|
||||
|
||||
When called from a coroutine or a callback (e.g. scheduled with call_soon
|
||||
or similar API), this function will always return the running event loop.
|
||||
|
||||
If there is no running event loop set, the function will return
|
||||
the result of `get_event_loop_policy().get_event_loop()` call.
|
||||
"""
|
||||
# NOTE: this function is implemented in C (see _asynciomodule.c)
|
||||
current_loop = _get_running_loop()
|
||||
if current_loop is not None:
|
||||
return current_loop
|
||||
return get_event_loop_policy().get_event_loop()
|
||||
|
||||
|
||||
def set_event_loop(loop):
|
||||
"""Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
|
||||
get_event_loop_policy().set_event_loop(loop)
|
||||
|
||||
|
||||
def new_event_loop():
|
||||
"""Equivalent to calling get_event_loop_policy().new_event_loop()."""
|
||||
return get_event_loop_policy().new_event_loop()
|
||||
|
||||
|
||||
def get_child_watcher():
|
||||
"""Equivalent to calling get_event_loop_policy().get_child_watcher()."""
|
||||
return get_event_loop_policy().get_child_watcher()
|
||||
|
||||
|
||||
def set_child_watcher(watcher):
|
||||
"""Equivalent to calling
|
||||
get_event_loop_policy().set_child_watcher(watcher)."""
|
||||
return get_event_loop_policy().set_child_watcher(watcher)
|
||||
|
||||
|
||||
# Alias pure-Python implementations for testing purposes.
|
||||
_py__get_running_loop = _get_running_loop
|
||||
_py__set_running_loop = _set_running_loop
|
||||
_py_get_running_loop = get_running_loop
|
||||
_py_get_event_loop = get_event_loop
|
||||
|
||||
|
||||
try:
|
||||
# get_event_loop() is one of the most frequently called
|
||||
# functions in asyncio. Pure Python implementation is
|
||||
# about 4 times slower than C-accelerated.
|
||||
from _asyncio import (_get_running_loop, _set_running_loop,
|
||||
get_running_loop, get_event_loop)
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# Alias C implementations for testing purposes.
|
||||
_c__get_running_loop = _get_running_loop
|
||||
_c__set_running_loop = _set_running_loop
|
||||
_c_get_running_loop = get_running_loop
|
||||
_c_get_event_loop = get_event_loop
|
58
Tool/Python39/Lib/asyncio/exceptions.py
Normal file
58
Tool/Python39/Lib/asyncio/exceptions.py
Normal file
@ -0,0 +1,58 @@
|
||||
"""asyncio exceptions."""
|
||||
|
||||
|
||||
__all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError',
|
||||
'IncompleteReadError', 'LimitOverrunError',
|
||||
'SendfileNotAvailableError')
|
||||
|
||||
|
||||
class CancelledError(BaseException):
|
||||
"""The Future or Task was cancelled."""
|
||||
|
||||
|
||||
class TimeoutError(Exception):
|
||||
"""The operation exceeded the given deadline."""
|
||||
|
||||
|
||||
class InvalidStateError(Exception):
|
||||
"""The operation is not allowed in this state."""
|
||||
|
||||
|
||||
class SendfileNotAvailableError(RuntimeError):
|
||||
"""Sendfile syscall is not available.
|
||||
|
||||
Raised if OS does not support sendfile syscall for given socket or
|
||||
file type.
|
||||
"""
|
||||
|
||||
|
||||
class IncompleteReadError(EOFError):
|
||||
"""
|
||||
Incomplete read error. Attributes:
|
||||
|
||||
- partial: read bytes string before the end of stream was reached
|
||||
- expected: total number of expected bytes (or None if unknown)
|
||||
"""
|
||||
def __init__(self, partial, expected):
|
||||
r_expected = 'undefined' if expected is None else repr(expected)
|
||||
super().__init__(f'{len(partial)} bytes read on a total of '
|
||||
f'{r_expected} expected bytes')
|
||||
self.partial = partial
|
||||
self.expected = expected
|
||||
|
||||
def __reduce__(self):
|
||||
return type(self), (self.partial, self.expected)
|
||||
|
||||
|
||||
class LimitOverrunError(Exception):
|
||||
"""Reached the buffer limit while looking for a separator.
|
||||
|
||||
Attributes:
|
||||
- consumed: total number of to be consumed bytes.
|
||||
"""
|
||||
def __init__(self, message, consumed):
|
||||
super().__init__(message)
|
||||
self.consumed = consumed
|
||||
|
||||
def __reduce__(self):
|
||||
return type(self), (self.args[0], self.consumed)
|
76
Tool/Python39/Lib/asyncio/format_helpers.py
Normal file
76
Tool/Python39/Lib/asyncio/format_helpers.py
Normal file
@ -0,0 +1,76 @@
|
||||
import functools
|
||||
import inspect
|
||||
import reprlib
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from . import constants
|
||||
|
||||
|
||||
def _get_function_source(func):
|
||||
func = inspect.unwrap(func)
|
||||
if inspect.isfunction(func):
|
||||
code = func.__code__
|
||||
return (code.co_filename, code.co_firstlineno)
|
||||
if isinstance(func, functools.partial):
|
||||
return _get_function_source(func.func)
|
||||
if isinstance(func, functools.partialmethod):
|
||||
return _get_function_source(func.func)
|
||||
return None
|
||||
|
||||
|
||||
def _format_callback_source(func, args):
|
||||
func_repr = _format_callback(func, args, None)
|
||||
source = _get_function_source(func)
|
||||
if source:
|
||||
func_repr += f' at {source[0]}:{source[1]}'
|
||||
return func_repr
|
||||
|
||||
|
||||
def _format_args_and_kwargs(args, kwargs):
|
||||
"""Format function arguments and keyword arguments.
|
||||
|
||||
Special case for a single parameter: ('hello',) is formatted as ('hello').
|
||||
"""
|
||||
# use reprlib to limit the length of the output
|
||||
items = []
|
||||
if args:
|
||||
items.extend(reprlib.repr(arg) for arg in args)
|
||||
if kwargs:
|
||||
items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
|
||||
return '({})'.format(', '.join(items))
|
||||
|
||||
|
||||
def _format_callback(func, args, kwargs, suffix=''):
|
||||
if isinstance(func, functools.partial):
|
||||
suffix = _format_args_and_kwargs(args, kwargs) + suffix
|
||||
return _format_callback(func.func, func.args, func.keywords, suffix)
|
||||
|
||||
if hasattr(func, '__qualname__') and func.__qualname__:
|
||||
func_repr = func.__qualname__
|
||||
elif hasattr(func, '__name__') and func.__name__:
|
||||
func_repr = func.__name__
|
||||
else:
|
||||
func_repr = repr(func)
|
||||
|
||||
func_repr += _format_args_and_kwargs(args, kwargs)
|
||||
if suffix:
|
||||
func_repr += suffix
|
||||
return func_repr
|
||||
|
||||
|
||||
def extract_stack(f=None, limit=None):
|
||||
"""Replacement for traceback.extract_stack() that only does the
|
||||
necessary work for asyncio debug mode.
|
||||
"""
|
||||
if f is None:
|
||||
f = sys._getframe().f_back
|
||||
if limit is None:
|
||||
# Limit the amount of work to a reasonable amount, as extract_stack()
|
||||
# can be called for each coroutine and future in debug mode.
|
||||
limit = constants.DEBUG_STACK_DEPTH
|
||||
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
|
||||
limit=limit,
|
||||
lookup_lines=False)
|
||||
stack.reverse()
|
||||
return stack
|
423
Tool/Python39/Lib/asyncio/futures.py
Normal file
423
Tool/Python39/Lib/asyncio/futures.py
Normal file
@ -0,0 +1,423 @@
|
||||
"""A Future class similar to the one in PEP 3148."""
|
||||
|
||||
__all__ = (
|
||||
'Future', 'wrap_future', 'isfuture',
|
||||
)
|
||||
|
||||
import concurrent.futures
|
||||
import contextvars
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from . import base_futures
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import format_helpers
|
||||
|
||||
|
||||
isfuture = base_futures.isfuture
|
||||
|
||||
|
||||
_PENDING = base_futures._PENDING
|
||||
_CANCELLED = base_futures._CANCELLED
|
||||
_FINISHED = base_futures._FINISHED
|
||||
|
||||
|
||||
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
|
||||
|
||||
|
||||
class Future:
|
||||
"""This class is *almost* compatible with concurrent.futures.Future.
|
||||
|
||||
Differences:
|
||||
|
||||
- This class is not thread-safe.
|
||||
|
||||
- result() and exception() do not take a timeout argument and
|
||||
raise an exception when the future isn't done yet.
|
||||
|
||||
- Callbacks registered with add_done_callback() are always called
|
||||
via the event loop's call_soon().
|
||||
|
||||
- This class is not compatible with the wait() and as_completed()
|
||||
methods in the concurrent.futures package.
|
||||
|
||||
(In Python 3.4 or later we may be able to unify the implementations.)
|
||||
"""
|
||||
|
||||
# Class variables serving as defaults for instance variables.
|
||||
_state = _PENDING
|
||||
_result = None
|
||||
_exception = None
|
||||
_loop = None
|
||||
_source_traceback = None
|
||||
_cancel_message = None
|
||||
# A saved CancelledError for later chaining as an exception context.
|
||||
_cancelled_exc = None
|
||||
|
||||
# This field is used for a dual purpose:
|
||||
# - Its presence is a marker to declare that a class implements
|
||||
# the Future protocol (i.e. is intended to be duck-type compatible).
|
||||
# The value must also be not-None, to enable a subclass to declare
|
||||
# that it is not compatible by setting this to None.
|
||||
# - It is set by __iter__() below so that Task._step() can tell
|
||||
# the difference between
|
||||
# `await Future()` or`yield from Future()` (correct) vs.
|
||||
# `yield Future()` (incorrect).
|
||||
_asyncio_future_blocking = False
|
||||
|
||||
__log_traceback = False
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
"""Initialize the future.
|
||||
|
||||
The optional event_loop argument allows explicitly setting the event
|
||||
loop object used by the future. If it's not provided, the future uses
|
||||
the default event loop.
|
||||
"""
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._callbacks = []
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = format_helpers.extract_stack(
|
||||
sys._getframe(1))
|
||||
|
||||
_repr_info = base_futures._future_repr_info
|
||||
|
||||
def __repr__(self):
|
||||
return '<{} {}>'.format(self.__class__.__name__,
|
||||
' '.join(self._repr_info()))
|
||||
|
||||
def __del__(self):
|
||||
if not self.__log_traceback:
|
||||
# set_exception() was not called, or result() or exception()
|
||||
# has consumed the exception
|
||||
return
|
||||
exc = self._exception
|
||||
context = {
|
||||
'message':
|
||||
f'{self.__class__.__name__} exception was never retrieved',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
def __class_getitem__(cls, type):
|
||||
return cls
|
||||
|
||||
@property
|
||||
def _log_traceback(self):
|
||||
return self.__log_traceback
|
||||
|
||||
@_log_traceback.setter
|
||||
def _log_traceback(self, val):
|
||||
if bool(val):
|
||||
raise ValueError('_log_traceback can only be set to False')
|
||||
self.__log_traceback = False
|
||||
|
||||
def get_loop(self):
|
||||
"""Return the event loop the Future is bound to."""
|
||||
loop = self._loop
|
||||
if loop is None:
|
||||
raise RuntimeError("Future object is not initialized.")
|
||||
return loop
|
||||
|
||||
def _make_cancelled_error(self):
|
||||
"""Create the CancelledError to raise if the Future is cancelled.
|
||||
|
||||
This should only be called once when handling a cancellation since
|
||||
it erases the saved context exception value.
|
||||
"""
|
||||
if self._cancel_message is None:
|
||||
exc = exceptions.CancelledError()
|
||||
else:
|
||||
exc = exceptions.CancelledError(self._cancel_message)
|
||||
exc.__context__ = self._cancelled_exc
|
||||
# Remove the reference since we don't need this anymore.
|
||||
self._cancelled_exc = None
|
||||
return exc
|
||||
|
||||
def cancel(self, msg=None):
|
||||
"""Cancel the future and schedule callbacks.
|
||||
|
||||
If the future is already done or cancelled, return False. Otherwise,
|
||||
change the future's state to cancelled, schedule the callbacks and
|
||||
return True.
|
||||
"""
|
||||
self.__log_traceback = False
|
||||
if self._state != _PENDING:
|
||||
return False
|
||||
self._state = _CANCELLED
|
||||
self._cancel_message = msg
|
||||
self.__schedule_callbacks()
|
||||
return True
|
||||
|
||||
def __schedule_callbacks(self):
|
||||
"""Internal: Ask the event loop to call all callbacks.
|
||||
|
||||
The callbacks are scheduled to be called as soon as possible. Also
|
||||
clears the callback list.
|
||||
"""
|
||||
callbacks = self._callbacks[:]
|
||||
if not callbacks:
|
||||
return
|
||||
|
||||
self._callbacks[:] = []
|
||||
for callback, ctx in callbacks:
|
||||
self._loop.call_soon(callback, self, context=ctx)
|
||||
|
||||
def cancelled(self):
|
||||
"""Return True if the future was cancelled."""
|
||||
return self._state == _CANCELLED
|
||||
|
||||
# Don't implement running(); see http://bugs.python.org/issue18699
|
||||
|
||||
def done(self):
|
||||
"""Return True if the future is done.
|
||||
|
||||
Done means either that a result / exception are available, or that the
|
||||
future was cancelled.
|
||||
"""
|
||||
return self._state != _PENDING
|
||||
|
||||
def result(self):
|
||||
"""Return the result this future represents.
|
||||
|
||||
If the future has been cancelled, raises CancelledError. If the
|
||||
future's result isn't yet available, raises InvalidStateError. If
|
||||
the future is done and has an exception set, this exception is raised.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
exc = self._make_cancelled_error()
|
||||
raise exc
|
||||
if self._state != _FINISHED:
|
||||
raise exceptions.InvalidStateError('Result is not ready.')
|
||||
self.__log_traceback = False
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
return self._result
|
||||
|
||||
def exception(self):
|
||||
"""Return the exception that was set on this future.
|
||||
|
||||
The exception (or None if no exception was set) is returned only if
|
||||
the future is done. If the future has been cancelled, raises
|
||||
CancelledError. If the future isn't done yet, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state == _CANCELLED:
|
||||
exc = self._make_cancelled_error()
|
||||
raise exc
|
||||
if self._state != _FINISHED:
|
||||
raise exceptions.InvalidStateError('Exception is not set.')
|
||||
self.__log_traceback = False
|
||||
return self._exception
|
||||
|
||||
def add_done_callback(self, fn, *, context=None):
|
||||
"""Add a callback to be run when the future becomes done.
|
||||
|
||||
The callback is called with a single argument - the future object. If
|
||||
the future is already done when this is called, the callback is
|
||||
scheduled with call_soon.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
self._loop.call_soon(fn, self, context=context)
|
||||
else:
|
||||
if context is None:
|
||||
context = contextvars.copy_context()
|
||||
self._callbacks.append((fn, context))
|
||||
|
||||
# New method not in PEP 3148.
|
||||
|
||||
def remove_done_callback(self, fn):
|
||||
"""Remove all instances of a callback from the "call when done" list.
|
||||
|
||||
Returns the number of callbacks removed.
|
||||
"""
|
||||
filtered_callbacks = [(f, ctx)
|
||||
for (f, ctx) in self._callbacks
|
||||
if f != fn]
|
||||
removed_count = len(self._callbacks) - len(filtered_callbacks)
|
||||
if removed_count:
|
||||
self._callbacks[:] = filtered_callbacks
|
||||
return removed_count
|
||||
|
||||
# So-called internal methods (note: no set_running_or_notify_cancel()).
|
||||
|
||||
def set_result(self, result):
|
||||
"""Mark the future done and set its result.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
|
||||
self._result = result
|
||||
self._state = _FINISHED
|
||||
self.__schedule_callbacks()
|
||||
|
||||
def set_exception(self, exception):
|
||||
"""Mark the future done and set an exception.
|
||||
|
||||
If the future is already done when this method is called, raises
|
||||
InvalidStateError.
|
||||
"""
|
||||
if self._state != _PENDING:
|
||||
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
|
||||
if isinstance(exception, type):
|
||||
exception = exception()
|
||||
if type(exception) is StopIteration:
|
||||
raise TypeError("StopIteration interacts badly with generators "
|
||||
"and cannot be raised into a Future")
|
||||
self._exception = exception
|
||||
self._state = _FINISHED
|
||||
self.__schedule_callbacks()
|
||||
self.__log_traceback = True
|
||||
|
||||
def __await__(self):
|
||||
if not self.done():
|
||||
self._asyncio_future_blocking = True
|
||||
yield self # This tells Task to wait for completion.
|
||||
if not self.done():
|
||||
raise RuntimeError("await wasn't used with future")
|
||||
return self.result() # May raise too.
|
||||
|
||||
__iter__ = __await__ # make compatible with 'yield from'.
|
||||
|
||||
|
||||
# Needed for testing purposes.
|
||||
_PyFuture = Future
|
||||
|
||||
|
||||
def _get_loop(fut):
|
||||
# Tries to call Future.get_loop() if it's available.
|
||||
# Otherwise fallbacks to using the old '_loop' property.
|
||||
try:
|
||||
get_loop = fut.get_loop
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
return get_loop()
|
||||
return fut._loop
|
||||
|
||||
|
||||
def _set_result_unless_cancelled(fut, result):
|
||||
"""Helper setting the result only if the future was not cancelled."""
|
||||
if fut.cancelled():
|
||||
return
|
||||
fut.set_result(result)
|
||||
|
||||
|
||||
def _convert_future_exc(exc):
|
||||
exc_class = type(exc)
|
||||
if exc_class is concurrent.futures.CancelledError:
|
||||
return exceptions.CancelledError(*exc.args)
|
||||
elif exc_class is concurrent.futures.TimeoutError:
|
||||
return exceptions.TimeoutError(*exc.args)
|
||||
elif exc_class is concurrent.futures.InvalidStateError:
|
||||
return exceptions.InvalidStateError(*exc.args)
|
||||
else:
|
||||
return exc
|
||||
|
||||
|
||||
def _set_concurrent_future_state(concurrent, source):
|
||||
"""Copy state from a future to a concurrent.futures.Future."""
|
||||
assert source.done()
|
||||
if source.cancelled():
|
||||
concurrent.cancel()
|
||||
if not concurrent.set_running_or_notify_cancel():
|
||||
return
|
||||
exception = source.exception()
|
||||
if exception is not None:
|
||||
concurrent.set_exception(_convert_future_exc(exception))
|
||||
else:
|
||||
result = source.result()
|
||||
concurrent.set_result(result)
|
||||
|
||||
|
||||
def _copy_future_state(source, dest):
|
||||
"""Internal helper to copy state from another Future.
|
||||
|
||||
The other Future may be a concurrent.futures.Future.
|
||||
"""
|
||||
assert source.done()
|
||||
if dest.cancelled():
|
||||
return
|
||||
assert not dest.done()
|
||||
if source.cancelled():
|
||||
dest.cancel()
|
||||
else:
|
||||
exception = source.exception()
|
||||
if exception is not None:
|
||||
dest.set_exception(_convert_future_exc(exception))
|
||||
else:
|
||||
result = source.result()
|
||||
dest.set_result(result)
|
||||
|
||||
|
||||
def _chain_future(source, destination):
|
||||
"""Chain two futures so that when one completes, so does the other.
|
||||
|
||||
The result (or exception) of source will be copied to destination.
|
||||
If destination is cancelled, source gets cancelled too.
|
||||
Compatible with both asyncio.Future and concurrent.futures.Future.
|
||||
"""
|
||||
if not isfuture(source) and not isinstance(source,
|
||||
concurrent.futures.Future):
|
||||
raise TypeError('A future is required for source argument')
|
||||
if not isfuture(destination) and not isinstance(destination,
|
||||
concurrent.futures.Future):
|
||||
raise TypeError('A future is required for destination argument')
|
||||
source_loop = _get_loop(source) if isfuture(source) else None
|
||||
dest_loop = _get_loop(destination) if isfuture(destination) else None
|
||||
|
||||
def _set_state(future, other):
|
||||
if isfuture(future):
|
||||
_copy_future_state(other, future)
|
||||
else:
|
||||
_set_concurrent_future_state(future, other)
|
||||
|
||||
def _call_check_cancel(destination):
|
||||
if destination.cancelled():
|
||||
if source_loop is None or source_loop is dest_loop:
|
||||
source.cancel()
|
||||
else:
|
||||
source_loop.call_soon_threadsafe(source.cancel)
|
||||
|
||||
def _call_set_state(source):
|
||||
if (destination.cancelled() and
|
||||
dest_loop is not None and dest_loop.is_closed()):
|
||||
return
|
||||
if dest_loop is None or dest_loop is source_loop:
|
||||
_set_state(destination, source)
|
||||
else:
|
||||
dest_loop.call_soon_threadsafe(_set_state, destination, source)
|
||||
|
||||
destination.add_done_callback(_call_check_cancel)
|
||||
source.add_done_callback(_call_set_state)
|
||||
|
||||
|
||||
def wrap_future(future, *, loop=None):
|
||||
"""Wrap concurrent.futures.Future object."""
|
||||
if isfuture(future):
|
||||
return future
|
||||
assert isinstance(future, concurrent.futures.Future), \
|
||||
f'concurrent.futures.Future is expected, got {future!r}'
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
new_future = loop.create_future()
|
||||
_chain_future(future, new_future)
|
||||
return new_future
|
||||
|
||||
|
||||
try:
|
||||
import _asyncio
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# _CFuture is needed for tests.
|
||||
Future = _CFuture = _asyncio.Future
|
451
Tool/Python39/Lib/asyncio/locks.py
Normal file
451
Tool/Python39/Lib/asyncio/locks.py
Normal file
@ -0,0 +1,451 @@
|
||||
"""Synchronization primitives."""
|
||||
|
||||
__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore')
|
||||
|
||||
import collections
|
||||
import warnings
|
||||
|
||||
from . import events
|
||||
from . import exceptions
|
||||
|
||||
|
||||
class _ContextManagerMixin:
|
||||
async def __aenter__(self):
|
||||
await self.acquire()
|
||||
# We have no use for the "as ..." clause in the with
|
||||
# statement for locks.
|
||||
return None
|
||||
|
||||
async def __aexit__(self, exc_type, exc, tb):
|
||||
self.release()
|
||||
|
||||
|
||||
class Lock(_ContextManagerMixin):
|
||||
"""Primitive lock objects.
|
||||
|
||||
A primitive lock is a synchronization primitive that is not owned
|
||||
by a particular coroutine when locked. A primitive lock is in one
|
||||
of two states, 'locked' or 'unlocked'.
|
||||
|
||||
It is created in the unlocked state. It has two basic methods,
|
||||
acquire() and release(). When the state is unlocked, acquire()
|
||||
changes the state to locked and returns immediately. When the
|
||||
state is locked, acquire() blocks until a call to release() in
|
||||
another coroutine changes it to unlocked, then the acquire() call
|
||||
resets it to locked and returns. The release() method should only
|
||||
be called in the locked state; it changes the state to unlocked
|
||||
and returns immediately. If an attempt is made to release an
|
||||
unlocked lock, a RuntimeError will be raised.
|
||||
|
||||
When more than one coroutine is blocked in acquire() waiting for
|
||||
the state to turn to unlocked, only one coroutine proceeds when a
|
||||
release() call resets the state to unlocked; first coroutine which
|
||||
is blocked in acquire() is being processed.
|
||||
|
||||
acquire() is a coroutine and should be called with 'await'.
|
||||
|
||||
Locks also support the asynchronous context management protocol.
|
||||
'async with lock' statement should be used.
|
||||
|
||||
Usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
await lock.acquire()
|
||||
try:
|
||||
...
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
Context manager usage:
|
||||
|
||||
lock = Lock()
|
||||
...
|
||||
async with lock:
|
||||
...
|
||||
|
||||
Lock objects can be tested for locking state:
|
||||
|
||||
if not lock.locked():
|
||||
await lock.acquire()
|
||||
else:
|
||||
# lock is acquired
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
self._waiters = None
|
||||
self._locked = False
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self._locked else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
def locked(self):
|
||||
"""Return True if lock is acquired."""
|
||||
return self._locked
|
||||
|
||||
async def acquire(self):
|
||||
"""Acquire a lock.
|
||||
|
||||
This method blocks until the lock is unlocked, then sets it to
|
||||
locked and returns True.
|
||||
"""
|
||||
if (not self._locked and (self._waiters is None or
|
||||
all(w.cancelled() for w in self._waiters))):
|
||||
self._locked = True
|
||||
return True
|
||||
|
||||
if self._waiters is None:
|
||||
self._waiters = collections.deque()
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
|
||||
# Finally block should be called before the CancelledError
|
||||
# handling as we don't want CancelledError to call
|
||||
# _wake_up_first() and attempt to wake up itself.
|
||||
try:
|
||||
try:
|
||||
await fut
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
except exceptions.CancelledError:
|
||||
if not self._locked:
|
||||
self._wake_up_first()
|
||||
raise
|
||||
|
||||
self._locked = True
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
"""Release a lock.
|
||||
|
||||
When the lock is locked, reset it to unlocked, and return.
|
||||
If any other coroutines are blocked waiting for the lock to become
|
||||
unlocked, allow exactly one of them to proceed.
|
||||
|
||||
When invoked on an unlocked lock, a RuntimeError is raised.
|
||||
|
||||
There is no return value.
|
||||
"""
|
||||
if self._locked:
|
||||
self._locked = False
|
||||
self._wake_up_first()
|
||||
else:
|
||||
raise RuntimeError('Lock is not acquired.')
|
||||
|
||||
def _wake_up_first(self):
|
||||
"""Wake up the first waiter if it isn't done."""
|
||||
if not self._waiters:
|
||||
return
|
||||
try:
|
||||
fut = next(iter(self._waiters))
|
||||
except StopIteration:
|
||||
return
|
||||
|
||||
# .done() necessarily means that a waiter will wake up later on and
|
||||
# either take the lock, or, if it was cancelled and lock wasn't
|
||||
# taken already, will hit this again and wake up a new waiter.
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
|
||||
|
||||
class Event:
|
||||
"""Asynchronous equivalent to threading.Event.
|
||||
|
||||
Class implementing event objects. An event manages a flag that can be set
|
||||
to true with the set() method and reset to false with the clear() method.
|
||||
The wait() method blocks until the flag is true. The flag is initially
|
||||
false.
|
||||
"""
|
||||
|
||||
def __init__(self, *, loop=None):
|
||||
self._waiters = collections.deque()
|
||||
self._value = False
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'set' if self._value else 'unset'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
def is_set(self):
|
||||
"""Return True if and only if the internal flag is true."""
|
||||
return self._value
|
||||
|
||||
def set(self):
|
||||
"""Set the internal flag to true. All coroutines waiting for it to
|
||||
become true are awakened. Coroutine that call wait() once the flag is
|
||||
true will not block at all.
|
||||
"""
|
||||
if not self._value:
|
||||
self._value = True
|
||||
|
||||
for fut in self._waiters:
|
||||
if not fut.done():
|
||||
fut.set_result(True)
|
||||
|
||||
def clear(self):
|
||||
"""Reset the internal flag to false. Subsequently, coroutines calling
|
||||
wait() will block until set() is called to set the internal flag
|
||||
to true again."""
|
||||
self._value = False
|
||||
|
||||
async def wait(self):
|
||||
"""Block until the internal flag is true.
|
||||
|
||||
If the internal flag is true on entry, return True
|
||||
immediately. Otherwise, block until another coroutine calls
|
||||
set() to set the flag to true, then return True.
|
||||
"""
|
||||
if self._value:
|
||||
return True
|
||||
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
await fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
|
||||
class Condition(_ContextManagerMixin):
|
||||
"""Asynchronous equivalent to threading.Condition.
|
||||
|
||||
This class implements condition variable objects. A condition variable
|
||||
allows one or more coroutines to wait until they are notified by another
|
||||
coroutine.
|
||||
|
||||
A new Lock object is created and used as the underlying lock.
|
||||
"""
|
||||
|
||||
def __init__(self, lock=None, *, loop=None):
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
if lock is None:
|
||||
lock = Lock(loop=loop)
|
||||
elif lock._loop is not self._loop:
|
||||
raise ValueError("loop argument must agree with lock")
|
||||
|
||||
self._lock = lock
|
||||
# Export the lock's locked(), acquire() and release() methods.
|
||||
self.locked = lock.locked
|
||||
self.acquire = lock.acquire
|
||||
self.release = lock.release
|
||||
|
||||
self._waiters = collections.deque()
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else 'unlocked'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
async def wait(self):
|
||||
"""Wait until notified.
|
||||
|
||||
If the calling coroutine has not acquired the lock when this
|
||||
method is called, a RuntimeError is raised.
|
||||
|
||||
This method releases the underlying lock, and then blocks
|
||||
until it is awakened by a notify() or notify_all() call for
|
||||
the same condition variable in another coroutine. Once
|
||||
awakened, it re-acquires the lock and returns True.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot wait on un-acquired lock')
|
||||
|
||||
self.release()
|
||||
try:
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
await fut
|
||||
return True
|
||||
finally:
|
||||
self._waiters.remove(fut)
|
||||
|
||||
finally:
|
||||
# Must reacquire lock even if wait is cancelled
|
||||
cancelled = False
|
||||
while True:
|
||||
try:
|
||||
await self.acquire()
|
||||
break
|
||||
except exceptions.CancelledError:
|
||||
cancelled = True
|
||||
|
||||
if cancelled:
|
||||
raise exceptions.CancelledError
|
||||
|
||||
async def wait_for(self, predicate):
|
||||
"""Wait until a predicate becomes true.
|
||||
|
||||
The predicate should be a callable which result will be
|
||||
interpreted as a boolean value. The final predicate value is
|
||||
the return value.
|
||||
"""
|
||||
result = predicate()
|
||||
while not result:
|
||||
await self.wait()
|
||||
result = predicate()
|
||||
return result
|
||||
|
||||
def notify(self, n=1):
|
||||
"""By default, wake up one coroutine waiting on this condition, if any.
|
||||
If the calling coroutine has not acquired the lock when this method
|
||||
is called, a RuntimeError is raised.
|
||||
|
||||
This method wakes up at most n of the coroutines waiting for the
|
||||
condition variable; it is a no-op if no coroutines are waiting.
|
||||
|
||||
Note: an awakened coroutine does not actually return from its
|
||||
wait() call until it can reacquire the lock. Since notify() does
|
||||
not release the lock, its caller should.
|
||||
"""
|
||||
if not self.locked():
|
||||
raise RuntimeError('cannot notify on un-acquired lock')
|
||||
|
||||
idx = 0
|
||||
for fut in self._waiters:
|
||||
if idx >= n:
|
||||
break
|
||||
|
||||
if not fut.done():
|
||||
idx += 1
|
||||
fut.set_result(False)
|
||||
|
||||
def notify_all(self):
|
||||
"""Wake up all threads waiting on this condition. This method acts
|
||||
like notify(), but wakes up all waiting threads instead of one. If the
|
||||
calling thread has not acquired the lock when this method is called,
|
||||
a RuntimeError is raised.
|
||||
"""
|
||||
self.notify(len(self._waiters))
|
||||
|
||||
|
||||
class Semaphore(_ContextManagerMixin):
|
||||
"""A Semaphore implementation.
|
||||
|
||||
A semaphore manages an internal counter which is decremented by each
|
||||
acquire() call and incremented by each release() call. The counter
|
||||
can never go below zero; when acquire() finds that it is zero, it blocks,
|
||||
waiting until some other thread calls release().
|
||||
|
||||
Semaphores also support the context management protocol.
|
||||
|
||||
The optional argument gives the initial value for the internal
|
||||
counter; it defaults to 1. If the value given is less than 0,
|
||||
ValueError is raised.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1, *, loop=None):
|
||||
if value < 0:
|
||||
raise ValueError("Semaphore initial value must be >= 0")
|
||||
self._value = value
|
||||
self._waiters = collections.deque()
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
def __repr__(self):
|
||||
res = super().__repr__()
|
||||
extra = 'locked' if self.locked() else f'unlocked, value:{self._value}'
|
||||
if self._waiters:
|
||||
extra = f'{extra}, waiters:{len(self._waiters)}'
|
||||
return f'<{res[1:-1]} [{extra}]>'
|
||||
|
||||
def _wake_up_next(self):
|
||||
while self._waiters:
|
||||
waiter = self._waiters.popleft()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
return
|
||||
|
||||
def locked(self):
|
||||
"""Returns True if semaphore can not be acquired immediately."""
|
||||
return self._value == 0
|
||||
|
||||
async def acquire(self):
|
||||
"""Acquire a semaphore.
|
||||
|
||||
If the internal counter is larger than zero on entry,
|
||||
decrement it by one and return True immediately. If it is
|
||||
zero on entry, block, waiting until some other coroutine has
|
||||
called release() to make it larger than 0, and then return
|
||||
True.
|
||||
"""
|
||||
while self._value <= 0:
|
||||
fut = self._loop.create_future()
|
||||
self._waiters.append(fut)
|
||||
try:
|
||||
await fut
|
||||
except:
|
||||
# See the similar code in Queue.get.
|
||||
fut.cancel()
|
||||
if self._value > 0 and not fut.cancelled():
|
||||
self._wake_up_next()
|
||||
raise
|
||||
self._value -= 1
|
||||
return True
|
||||
|
||||
def release(self):
|
||||
"""Release a semaphore, incrementing the internal counter by one.
|
||||
When it was zero on entry and another coroutine is waiting for it to
|
||||
become larger than zero again, wake up that coroutine.
|
||||
"""
|
||||
self._value += 1
|
||||
self._wake_up_next()
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
"""A bounded semaphore implementation.
|
||||
|
||||
This raises ValueError in release() if it would increase the value
|
||||
above the initial value.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1, *, loop=None):
|
||||
if loop:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
self._bound_value = value
|
||||
super().__init__(value, loop=loop)
|
||||
|
||||
def release(self):
|
||||
if self._value >= self._bound_value:
|
||||
raise ValueError('BoundedSemaphore released too many times')
|
||||
super().release()
|
7
Tool/Python39/Lib/asyncio/log.py
Normal file
7
Tool/Python39/Lib/asyncio/log.py
Normal file
@ -0,0 +1,7 @@
|
||||
"""Logging configuration."""
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
# Name the logger after the package.
|
||||
logger = logging.getLogger(__package__)
|
868
Tool/Python39/Lib/asyncio/proactor_events.py
Normal file
868
Tool/Python39/Lib/asyncio/proactor_events.py
Normal file
@ -0,0 +1,868 @@
|
||||
"""Event loop using a proactor and related classes.
|
||||
|
||||
A proactor is a "notify-on-completion" multiplexer. Currently a
|
||||
proactor is only implemented on Windows with IOCP.
|
||||
"""
|
||||
|
||||
__all__ = 'BaseProactorEventLoop',
|
||||
|
||||
import io
|
||||
import os
|
||||
import socket
|
||||
import warnings
|
||||
import signal
|
||||
import threading
|
||||
import collections
|
||||
|
||||
from . import base_events
|
||||
from . import constants
|
||||
from . import futures
|
||||
from . import exceptions
|
||||
from . import protocols
|
||||
from . import sslproto
|
||||
from . import transports
|
||||
from . import trsock
|
||||
from .log import logger
|
||||
|
||||
|
||||
def _set_socket_extra(transport, sock):
|
||||
transport._extra['socket'] = trsock.TransportSocket(sock)
|
||||
|
||||
try:
|
||||
transport._extra['sockname'] = sock.getsockname()
|
||||
except socket.error:
|
||||
if transport._loop.get_debug():
|
||||
logger.warning(
|
||||
"getsockname() failed on %r", sock, exc_info=True)
|
||||
|
||||
if 'peername' not in transport._extra:
|
||||
try:
|
||||
transport._extra['peername'] = sock.getpeername()
|
||||
except socket.error:
|
||||
# UDP sockets may not have a peer name
|
||||
transport._extra['peername'] = None
|
||||
|
||||
|
||||
class _ProactorBasePipeTransport(transports._FlowControlMixin,
|
||||
transports.BaseTransport):
|
||||
"""Base class for pipe and socket transports."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(extra, loop)
|
||||
self._set_extra(sock)
|
||||
self._sock = sock
|
||||
self.set_protocol(protocol)
|
||||
self._server = server
|
||||
self._buffer = None # None or bytearray.
|
||||
self._read_fut = None
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
self._conn_lost = 0
|
||||
self._closing = False # Set when close() called.
|
||||
self._eof_written = False
|
||||
if self._server is not None:
|
||||
self._server._attach()
|
||||
self._loop.call_soon(self._protocol.connection_made, self)
|
||||
if waiter is not None:
|
||||
# only wake up the waiter when connection_made() has been called
|
||||
self._loop.call_soon(futures._set_result_unless_cancelled,
|
||||
waiter, None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self._sock is None:
|
||||
info.append('closed')
|
||||
elif self._closing:
|
||||
info.append('closing')
|
||||
if self._sock is not None:
|
||||
info.append(f'fd={self._sock.fileno()}')
|
||||
if self._read_fut is not None:
|
||||
info.append(f'read={self._read_fut!r}')
|
||||
if self._write_fut is not None:
|
||||
info.append(f'write={self._write_fut!r}')
|
||||
if self._buffer:
|
||||
info.append(f'write_bufsize={len(self._buffer)}')
|
||||
if self._eof_written:
|
||||
info.append('EOF written')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def _set_extra(self, sock):
|
||||
self._extra['pipe'] = sock
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._protocol = protocol
|
||||
|
||||
def get_protocol(self):
|
||||
return self._protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closing
|
||||
|
||||
def close(self):
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if not self._buffer and self._write_fut is None:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._read_fut is not None:
|
||||
self._read_fut.cancel()
|
||||
self._read_fut = None
|
||||
|
||||
def __del__(self, _warn=warnings.warn):
|
||||
if self._sock is not None:
|
||||
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
||||
self.close()
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
|
||||
try:
|
||||
if isinstance(exc, OSError):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
finally:
|
||||
self._force_close(exc)
|
||||
|
||||
def _force_close(self, exc):
|
||||
if self._empty_waiter is not None and not self._empty_waiter.done():
|
||||
if exc is None:
|
||||
self._empty_waiter.set_result(None)
|
||||
else:
|
||||
self._empty_waiter.set_exception(exc)
|
||||
if self._closing:
|
||||
return
|
||||
self._closing = True
|
||||
self._conn_lost += 1
|
||||
if self._write_fut:
|
||||
self._write_fut.cancel()
|
||||
self._write_fut = None
|
||||
if self._read_fut:
|
||||
self._read_fut.cancel()
|
||||
self._read_fut = None
|
||||
self._pending_write = 0
|
||||
self._buffer = None
|
||||
self._loop.call_soon(self._call_connection_lost, exc)
|
||||
|
||||
def _call_connection_lost(self, exc):
|
||||
try:
|
||||
self._protocol.connection_lost(exc)
|
||||
finally:
|
||||
# XXX If there is a pending overlapped read on the other
|
||||
# end then it may fail with ERROR_NETNAME_DELETED if we
|
||||
# just close our end. First calling shutdown() seems to
|
||||
# cure it, but maybe using DisconnectEx() would be better.
|
||||
if hasattr(self._sock, 'shutdown'):
|
||||
self._sock.shutdown(socket.SHUT_RDWR)
|
||||
self._sock.close()
|
||||
self._sock = None
|
||||
server = self._server
|
||||
if server is not None:
|
||||
server._detach()
|
||||
self._server = None
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
size = self._pending_write
|
||||
if self._buffer is not None:
|
||||
size += len(self._buffer)
|
||||
return size
|
||||
|
||||
|
||||
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
|
||||
transports.ReadTransport):
|
||||
"""Transport for read pipes."""
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
self._pending_data = None
|
||||
self._paused = True
|
||||
super().__init__(loop, sock, protocol, waiter, extra, server)
|
||||
|
||||
self._loop.call_soon(self._loop_reading)
|
||||
self._paused = False
|
||||
|
||||
def is_reading(self):
|
||||
return not self._paused and not self._closing
|
||||
|
||||
def pause_reading(self):
|
||||
if self._closing or self._paused:
|
||||
return
|
||||
self._paused = True
|
||||
|
||||
# bpo-33694: Don't cancel self._read_fut because cancelling an
|
||||
# overlapped WSASend() loss silently data with the current proactor
|
||||
# implementation.
|
||||
#
|
||||
# If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
|
||||
# completed (even if HasOverlappedIoCompleted() returns 0), but
|
||||
# Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
|
||||
# error. Once the overlapped is ignored, the IOCP loop will ignores the
|
||||
# completion I/O event and so not read the result of the overlapped
|
||||
# WSARecv().
|
||||
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses reading", self)
|
||||
|
||||
def resume_reading(self):
|
||||
if self._closing or not self._paused:
|
||||
return
|
||||
|
||||
self._paused = False
|
||||
if self._read_fut is None:
|
||||
self._loop.call_soon(self._loop_reading, None)
|
||||
|
||||
data = self._pending_data
|
||||
self._pending_data = None
|
||||
if data is not None:
|
||||
# Call the protocol methode after calling _loop_reading(),
|
||||
# since the protocol can decide to pause reading again.
|
||||
self._loop.call_soon(self._data_received, data)
|
||||
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes reading", self)
|
||||
|
||||
def _eof_received(self):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
|
||||
try:
|
||||
keep_open = self._protocol.eof_received()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._fatal_error(
|
||||
exc, 'Fatal error: protocol.eof_received() call failed.')
|
||||
return
|
||||
|
||||
if not keep_open:
|
||||
self.close()
|
||||
|
||||
def _data_received(self, data):
|
||||
if self._paused:
|
||||
# Don't call any protocol method while reading is paused.
|
||||
# The protocol will be called on resume_reading().
|
||||
assert self._pending_data is None
|
||||
self._pending_data = data
|
||||
return
|
||||
|
||||
if not data:
|
||||
self._eof_received()
|
||||
return
|
||||
|
||||
if isinstance(self._protocol, protocols.BufferedProtocol):
|
||||
try:
|
||||
protocols._feed_data_to_buffered_proto(self._protocol, data)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._fatal_error(exc,
|
||||
'Fatal error: protocol.buffer_updated() '
|
||||
'call failed.')
|
||||
return
|
||||
else:
|
||||
self._protocol.data_received(data)
|
||||
|
||||
def _loop_reading(self, fut=None):
|
||||
data = None
|
||||
try:
|
||||
if fut is not None:
|
||||
assert self._read_fut is fut or (self._read_fut is None and
|
||||
self._closing)
|
||||
self._read_fut = None
|
||||
if fut.done():
|
||||
# deliver data later in "finally" clause
|
||||
data = fut.result()
|
||||
else:
|
||||
# the future will be replaced by next proactor.recv call
|
||||
fut.cancel()
|
||||
|
||||
if self._closing:
|
||||
# since close() has been called we ignore any read data
|
||||
data = None
|
||||
return
|
||||
|
||||
if data == b'':
|
||||
# we got end-of-file so no need to reschedule a new read
|
||||
return
|
||||
|
||||
# bpo-33694: buffer_updated() has currently no fast path because of
|
||||
# a data loss issue caused by overlapped WSASend() cancellation.
|
||||
|
||||
if not self._paused:
|
||||
# reschedule a new read
|
||||
self._read_fut = self._loop._proactor.recv(self._sock, 32768)
|
||||
except ConnectionAbortedError as exc:
|
||||
if not self._closing:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
elif self._loop.get_debug():
|
||||
logger.debug("Read error on pipe transport while closing",
|
||||
exc_info=True)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal read error on pipe transport')
|
||||
except exceptions.CancelledError:
|
||||
if not self._closing:
|
||||
raise
|
||||
else:
|
||||
if not self._paused:
|
||||
self._read_fut.add_done_callback(self._loop_reading)
|
||||
finally:
|
||||
if data is not None:
|
||||
self._data_received(data)
|
||||
|
||||
|
||||
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
|
||||
transports.WriteTransport):
|
||||
"""Transport for write pipes."""
|
||||
|
||||
_start_tls_compatible = True
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
self._empty_waiter = None
|
||||
|
||||
def write(self, data):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError(
|
||||
f"data argument must be a bytes-like object, "
|
||||
f"not {type(data).__name__}")
|
||||
if self._eof_written:
|
||||
raise RuntimeError('write_eof() already called')
|
||||
if self._empty_waiter is not None:
|
||||
raise RuntimeError('unable to write; sendfile is in progress')
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
if self._conn_lost:
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('socket.send() raised exception.')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
# Observable states:
|
||||
# 1. IDLE: _write_fut and _buffer both None
|
||||
# 2. WRITING: _write_fut set; _buffer None
|
||||
# 3. BACKED UP: _write_fut set; _buffer a bytearray
|
||||
# We always copy the data, so the caller can't modify it
|
||||
# while we're still waiting for the I/O to happen.
|
||||
if self._write_fut is None: # IDLE -> WRITING
|
||||
assert self._buffer is None
|
||||
# Pass a copy, except if it's already immutable.
|
||||
self._loop_writing(data=bytes(data))
|
||||
elif not self._buffer: # WRITING -> BACKED UP
|
||||
# Make a mutable copy which we can extend.
|
||||
self._buffer = bytearray(data)
|
||||
self._maybe_pause_protocol()
|
||||
else: # BACKED UP
|
||||
# Append to buffer (also copies).
|
||||
self._buffer.extend(data)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def _loop_writing(self, f=None, data=None):
|
||||
try:
|
||||
if f is not None and self._write_fut is None and self._closing:
|
||||
# XXX most likely self._force_close() has been called, and
|
||||
# it has set self._write_fut to None.
|
||||
return
|
||||
assert f is self._write_fut
|
||||
self._write_fut = None
|
||||
self._pending_write = 0
|
||||
if f:
|
||||
f.result()
|
||||
if data is None:
|
||||
data = self._buffer
|
||||
self._buffer = None
|
||||
if not data:
|
||||
if self._closing:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
if self._eof_written:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
# Now that we've reduced the buffer size, tell the
|
||||
# protocol to resume writing if it was paused. Note that
|
||||
# we do this last since the callback is called immediately
|
||||
# and it may add more data to the buffer (even causing the
|
||||
# protocol to be paused again).
|
||||
self._maybe_resume_protocol()
|
||||
else:
|
||||
self._write_fut = self._loop._proactor.send(self._sock, data)
|
||||
if not self._write_fut.done():
|
||||
assert self._pending_write == 0
|
||||
self._pending_write = len(data)
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
self._maybe_pause_protocol()
|
||||
else:
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
if self._empty_waiter is not None and self._write_fut is None:
|
||||
self._empty_waiter.set_result(None)
|
||||
except ConnectionResetError as exc:
|
||||
self._force_close(exc)
|
||||
except OSError as exc:
|
||||
self._fatal_error(exc, 'Fatal write error on pipe transport')
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
self.close()
|
||||
|
||||
def abort(self):
|
||||
self._force_close(None)
|
||||
|
||||
def _make_empty_waiter(self):
|
||||
if self._empty_waiter is not None:
|
||||
raise RuntimeError("Empty waiter is already set")
|
||||
self._empty_waiter = self._loop.create_future()
|
||||
if self._write_fut is None:
|
||||
self._empty_waiter.set_result(None)
|
||||
return self._empty_waiter
|
||||
|
||||
def _reset_empty_waiter(self):
|
||||
self._empty_waiter = None
|
||||
|
||||
|
||||
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
|
||||
def __init__(self, *args, **kw):
|
||||
super().__init__(*args, **kw)
|
||||
self._read_fut = self._loop._proactor.recv(self._sock, 16)
|
||||
self._read_fut.add_done_callback(self._pipe_closed)
|
||||
|
||||
def _pipe_closed(self, fut):
|
||||
if fut.cancelled():
|
||||
# the transport has been closed
|
||||
return
|
||||
assert fut.result() == b''
|
||||
if self._closing:
|
||||
assert self._read_fut is None
|
||||
return
|
||||
assert fut is self._read_fut, (fut, self._read_fut)
|
||||
self._read_fut = None
|
||||
if self._write_fut is not None:
|
||||
self._force_close(BrokenPipeError())
|
||||
else:
|
||||
self.close()
|
||||
|
||||
|
||||
class _ProactorDatagramTransport(_ProactorBasePipeTransport):
|
||||
max_size = 256 * 1024
|
||||
def __init__(self, loop, sock, protocol, address=None,
|
||||
waiter=None, extra=None):
|
||||
self._address = address
|
||||
self._empty_waiter = None
|
||||
# We don't need to call _protocol.connection_made() since our base
|
||||
# constructor does it for us.
|
||||
super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
|
||||
|
||||
# The base constructor sets _buffer = None, so we set it here
|
||||
self._buffer = collections.deque()
|
||||
self._loop.call_soon(self._loop_reading)
|
||||
|
||||
def _set_extra(self, sock):
|
||||
_set_socket_extra(self, sock)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
return sum(len(data) for data, _ in self._buffer)
|
||||
|
||||
def abort(self):
|
||||
self._force_close(None)
|
||||
|
||||
def sendto(self, data, addr=None):
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError('data argument must be bytes-like object (%r)',
|
||||
type(data))
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
if self._address is not None and addr not in (None, self._address):
|
||||
raise ValueError(
|
||||
f'Invalid address: must be None or {self._address}')
|
||||
|
||||
if self._conn_lost and self._address:
|
||||
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
|
||||
logger.warning('socket.sendto() raised exception.')
|
||||
self._conn_lost += 1
|
||||
return
|
||||
|
||||
# Ensure that what we buffer is immutable.
|
||||
self._buffer.append((bytes(data), addr))
|
||||
|
||||
if self._write_fut is None:
|
||||
# No current write operations are active, kick one off
|
||||
self._loop_writing()
|
||||
# else: A write operation is already kicked off
|
||||
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def _loop_writing(self, fut=None):
|
||||
try:
|
||||
if self._conn_lost:
|
||||
return
|
||||
|
||||
assert fut is self._write_fut
|
||||
self._write_fut = None
|
||||
if fut:
|
||||
# We are in a _loop_writing() done callback, get the result
|
||||
fut.result()
|
||||
|
||||
if not self._buffer or (self._conn_lost and self._address):
|
||||
# The connection has been closed
|
||||
if self._closing:
|
||||
self._loop.call_soon(self._call_connection_lost, None)
|
||||
return
|
||||
|
||||
data, addr = self._buffer.popleft()
|
||||
if self._address is not None:
|
||||
self._write_fut = self._loop._proactor.send(self._sock,
|
||||
data)
|
||||
else:
|
||||
self._write_fut = self._loop._proactor.sendto(self._sock,
|
||||
data,
|
||||
addr=addr)
|
||||
except OSError as exc:
|
||||
self._protocol.error_received(exc)
|
||||
except Exception as exc:
|
||||
self._fatal_error(exc, 'Fatal write error on datagram transport')
|
||||
else:
|
||||
self._write_fut.add_done_callback(self._loop_writing)
|
||||
self._maybe_resume_protocol()
|
||||
|
||||
def _loop_reading(self, fut=None):
|
||||
data = None
|
||||
try:
|
||||
if self._conn_lost:
|
||||
return
|
||||
|
||||
assert self._read_fut is fut or (self._read_fut is None and
|
||||
self._closing)
|
||||
|
||||
self._read_fut = None
|
||||
if fut is not None:
|
||||
res = fut.result()
|
||||
|
||||
if self._closing:
|
||||
# since close() has been called we ignore any read data
|
||||
data = None
|
||||
return
|
||||
|
||||
if self._address is not None:
|
||||
data, addr = res, self._address
|
||||
else:
|
||||
data, addr = res
|
||||
|
||||
if self._conn_lost:
|
||||
return
|
||||
if self._address is not None:
|
||||
self._read_fut = self._loop._proactor.recv(self._sock,
|
||||
self.max_size)
|
||||
else:
|
||||
self._read_fut = self._loop._proactor.recvfrom(self._sock,
|
||||
self.max_size)
|
||||
except OSError as exc:
|
||||
self._protocol.error_received(exc)
|
||||
except exceptions.CancelledError:
|
||||
if not self._closing:
|
||||
raise
|
||||
else:
|
||||
if self._read_fut is not None:
|
||||
self._read_fut.add_done_callback(self._loop_reading)
|
||||
finally:
|
||||
if data:
|
||||
self._protocol.datagram_received(data, addr)
|
||||
|
||||
|
||||
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for duplex pipes."""
|
||||
|
||||
def can_write_eof(self):
|
||||
return False
|
||||
|
||||
def write_eof(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _ProactorSocketTransport(_ProactorReadPipeTransport,
|
||||
_ProactorBaseWritePipeTransport,
|
||||
transports.Transport):
|
||||
"""Transport for connected sockets."""
|
||||
|
||||
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
|
||||
|
||||
def __init__(self, loop, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
super().__init__(loop, sock, protocol, waiter, extra, server)
|
||||
base_events._set_nodelay(sock)
|
||||
|
||||
def _set_extra(self, sock):
|
||||
_set_socket_extra(self, sock)
|
||||
|
||||
def can_write_eof(self):
|
||||
return True
|
||||
|
||||
def write_eof(self):
|
||||
if self._closing or self._eof_written:
|
||||
return
|
||||
self._eof_written = True
|
||||
if self._write_fut is None:
|
||||
self._sock.shutdown(socket.SHUT_WR)
|
||||
|
||||
|
||||
class BaseProactorEventLoop(base_events.BaseEventLoop):
|
||||
|
||||
def __init__(self, proactor):
|
||||
super().__init__()
|
||||
logger.debug('Using proactor: %s', proactor.__class__.__name__)
|
||||
self._proactor = proactor
|
||||
self._selector = proactor # convenient alias
|
||||
self._self_reading_future = None
|
||||
self._accept_futures = {} # socket file descriptor => Future
|
||||
proactor.set_loop(self)
|
||||
self._make_self_pipe()
|
||||
if threading.current_thread() is threading.main_thread():
|
||||
# wakeup fd can only be installed to a file descriptor from the main thread
|
||||
signal.set_wakeup_fd(self._csock.fileno())
|
||||
|
||||
def _make_socket_transport(self, sock, protocol, waiter=None,
|
||||
extra=None, server=None):
|
||||
return _ProactorSocketTransport(self, sock, protocol, waiter,
|
||||
extra, server)
|
||||
|
||||
def _make_ssl_transport(
|
||||
self, rawsock, protocol, sslcontext, waiter=None,
|
||||
*, server_side=False, server_hostname=None,
|
||||
extra=None, server=None,
|
||||
ssl_handshake_timeout=None):
|
||||
ssl_protocol = sslproto.SSLProtocol(
|
||||
self, protocol, sslcontext, waiter,
|
||||
server_side, server_hostname,
|
||||
ssl_handshake_timeout=ssl_handshake_timeout)
|
||||
_ProactorSocketTransport(self, rawsock, ssl_protocol,
|
||||
extra=extra, server=server)
|
||||
return ssl_protocol._app_transport
|
||||
|
||||
def _make_datagram_transport(self, sock, protocol,
|
||||
address=None, waiter=None, extra=None):
|
||||
return _ProactorDatagramTransport(self, sock, protocol, address,
|
||||
waiter, extra)
|
||||
|
||||
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorDuplexPipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
|
||||
|
||||
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
|
||||
extra=None):
|
||||
# We want connection_lost() to be called when other end closes
|
||||
return _ProactorWritePipeTransport(self,
|
||||
sock, protocol, waiter, extra)
|
||||
|
||||
def close(self):
|
||||
if self.is_running():
|
||||
raise RuntimeError("Cannot close a running event loop")
|
||||
if self.is_closed():
|
||||
return
|
||||
|
||||
if threading.current_thread() is threading.main_thread():
|
||||
signal.set_wakeup_fd(-1)
|
||||
# Call these methods before closing the event loop (before calling
|
||||
# BaseEventLoop.close), because they can schedule callbacks with
|
||||
# call_soon(), which is forbidden when the event loop is closed.
|
||||
self._stop_accept_futures()
|
||||
self._close_self_pipe()
|
||||
self._proactor.close()
|
||||
self._proactor = None
|
||||
self._selector = None
|
||||
|
||||
# Close the event loop
|
||||
super().close()
|
||||
|
||||
async def sock_recv(self, sock, n):
|
||||
return await self._proactor.recv(sock, n)
|
||||
|
||||
async def sock_recv_into(self, sock, buf):
|
||||
return await self._proactor.recv_into(sock, buf)
|
||||
|
||||
async def sock_sendall(self, sock, data):
|
||||
return await self._proactor.send(sock, data)
|
||||
|
||||
async def sock_connect(self, sock, address):
|
||||
return await self._proactor.connect(sock, address)
|
||||
|
||||
async def sock_accept(self, sock):
|
||||
return await self._proactor.accept(sock)
|
||||
|
||||
async def _sock_sendfile_native(self, sock, file, offset, count):
|
||||
try:
|
||||
fileno = file.fileno()
|
||||
except (AttributeError, io.UnsupportedOperation) as err:
|
||||
raise exceptions.SendfileNotAvailableError("not a regular file")
|
||||
try:
|
||||
fsize = os.fstat(fileno).st_size
|
||||
except OSError:
|
||||
raise exceptions.SendfileNotAvailableError("not a regular file")
|
||||
blocksize = count if count else fsize
|
||||
if not blocksize:
|
||||
return 0 # empty file
|
||||
|
||||
blocksize = min(blocksize, 0xffff_ffff)
|
||||
end_pos = min(offset + count, fsize) if count else fsize
|
||||
offset = min(offset, fsize)
|
||||
total_sent = 0
|
||||
try:
|
||||
while True:
|
||||
blocksize = min(end_pos - offset, blocksize)
|
||||
if blocksize <= 0:
|
||||
return total_sent
|
||||
await self._proactor.sendfile(sock, file, offset, blocksize)
|
||||
offset += blocksize
|
||||
total_sent += blocksize
|
||||
finally:
|
||||
if total_sent > 0:
|
||||
file.seek(offset)
|
||||
|
||||
async def _sendfile_native(self, transp, file, offset, count):
|
||||
resume_reading = transp.is_reading()
|
||||
transp.pause_reading()
|
||||
await transp._make_empty_waiter()
|
||||
try:
|
||||
return await self.sock_sendfile(transp._sock, file, offset, count,
|
||||
fallback=False)
|
||||
finally:
|
||||
transp._reset_empty_waiter()
|
||||
if resume_reading:
|
||||
transp.resume_reading()
|
||||
|
||||
def _close_self_pipe(self):
|
||||
if self._self_reading_future is not None:
|
||||
self._self_reading_future.cancel()
|
||||
self._self_reading_future = None
|
||||
self._ssock.close()
|
||||
self._ssock = None
|
||||
self._csock.close()
|
||||
self._csock = None
|
||||
self._internal_fds -= 1
|
||||
|
||||
def _make_self_pipe(self):
|
||||
# A self-socket, really. :-)
|
||||
self._ssock, self._csock = socket.socketpair()
|
||||
self._ssock.setblocking(False)
|
||||
self._csock.setblocking(False)
|
||||
self._internal_fds += 1
|
||||
|
||||
def _loop_self_reading(self, f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
f.result() # may raise
|
||||
if self._self_reading_future is not f:
|
||||
# When we scheduled this Future, we assigned it to
|
||||
# _self_reading_future. If it's not there now, something has
|
||||
# tried to cancel the loop while this callback was still in the
|
||||
# queue (see windows_events.ProactorEventLoop.run_forever). In
|
||||
# that case stop here instead of continuing to schedule a new
|
||||
# iteration.
|
||||
return
|
||||
f = self._proactor.recv(self._ssock, 4096)
|
||||
except exceptions.CancelledError:
|
||||
# _close_self_pipe() has been called, stop waiting for data
|
||||
return
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self.call_exception_handler({
|
||||
'message': 'Error on reading from the event loop self pipe',
|
||||
'exception': exc,
|
||||
'loop': self,
|
||||
})
|
||||
else:
|
||||
self._self_reading_future = f
|
||||
f.add_done_callback(self._loop_self_reading)
|
||||
|
||||
def _write_to_self(self):
|
||||
# This may be called from a different thread, possibly after
|
||||
# _close_self_pipe() has been called or even while it is
|
||||
# running. Guard for self._csock being None or closed. When
|
||||
# a socket is closed, send() raises OSError (with errno set to
|
||||
# EBADF, but let's not rely on the exact error code).
|
||||
csock = self._csock
|
||||
if csock is None:
|
||||
return
|
||||
|
||||
try:
|
||||
csock.send(b'\0')
|
||||
except OSError:
|
||||
if self._debug:
|
||||
logger.debug("Fail to write a null byte into the "
|
||||
"self-pipe socket",
|
||||
exc_info=True)
|
||||
|
||||
def _start_serving(self, protocol_factory, sock,
|
||||
sslcontext=None, server=None, backlog=100,
|
||||
ssl_handshake_timeout=None):
|
||||
|
||||
def loop(f=None):
|
||||
try:
|
||||
if f is not None:
|
||||
conn, addr = f.result()
|
||||
if self._debug:
|
||||
logger.debug("%r got a new connection from %r: %r",
|
||||
server, addr, conn)
|
||||
protocol = protocol_factory()
|
||||
if sslcontext is not None:
|
||||
self._make_ssl_transport(
|
||||
conn, protocol, sslcontext, server_side=True,
|
||||
extra={'peername': addr}, server=server,
|
||||
ssl_handshake_timeout=ssl_handshake_timeout)
|
||||
else:
|
||||
self._make_socket_transport(
|
||||
conn, protocol,
|
||||
extra={'peername': addr}, server=server)
|
||||
if self.is_closed():
|
||||
return
|
||||
f = self._proactor.accept(sock)
|
||||
except OSError as exc:
|
||||
if sock.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Accept failed on a socket',
|
||||
'exception': exc,
|
||||
'socket': trsock.TransportSocket(sock),
|
||||
})
|
||||
sock.close()
|
||||
elif self._debug:
|
||||
logger.debug("Accept failed on socket %r",
|
||||
sock, exc_info=True)
|
||||
except exceptions.CancelledError:
|
||||
sock.close()
|
||||
else:
|
||||
self._accept_futures[sock.fileno()] = f
|
||||
f.add_done_callback(loop)
|
||||
|
||||
self.call_soon(loop)
|
||||
|
||||
def _process_events(self, event_list):
|
||||
# Events are processed in the IocpProactor._poll() method
|
||||
pass
|
||||
|
||||
def _stop_accept_futures(self):
|
||||
for future in self._accept_futures.values():
|
||||
future.cancel()
|
||||
self._accept_futures.clear()
|
||||
|
||||
def _stop_serving(self, sock):
|
||||
future = self._accept_futures.pop(sock.fileno(), None)
|
||||
if future:
|
||||
future.cancel()
|
||||
self._proactor._stop_serving(sock)
|
||||
sock.close()
|
220
Tool/Python39/Lib/asyncio/protocols.py
Normal file
220
Tool/Python39/Lib/asyncio/protocols.py
Normal file
@ -0,0 +1,220 @@
|
||||
"""Abstract Protocol base classes."""
|
||||
|
||||
__all__ = (
|
||||
'BaseProtocol', 'Protocol', 'DatagramProtocol',
|
||||
'SubprocessProtocol', 'BufferedProtocol',
|
||||
)
|
||||
|
||||
|
||||
class BaseProtocol:
|
||||
"""Common base class for protocol interfaces.
|
||||
|
||||
Usually user implements protocols that derived from BaseProtocol
|
||||
like Protocol or ProcessProtocol.
|
||||
|
||||
The only case when BaseProtocol should be implemented directly is
|
||||
write-only transport like write pipe
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when a connection is made.
|
||||
|
||||
The argument is the transport representing the pipe connection.
|
||||
To receive data, wait for data_received() calls.
|
||||
When the connection is closed, connection_lost() is called.
|
||||
"""
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the transport's buffer goes over the high-water mark.
|
||||
|
||||
Pause and resume calls are paired -- pause_writing() is called
|
||||
once when the buffer goes strictly over the high-water mark
|
||||
(even if subsequent writes increases the buffer size even
|
||||
more), and eventually resume_writing() is called once when the
|
||||
buffer size reaches the low-water mark.
|
||||
|
||||
Note that if the buffer size equals the high-water mark,
|
||||
pause_writing() is not called -- it must go strictly over.
|
||||
Conversely, resume_writing() is called when the buffer size is
|
||||
equal or lower than the low-water mark. These end conditions
|
||||
are important to ensure that things go as expected when either
|
||||
mark is zero.
|
||||
|
||||
NOTE: This is the only Protocol callback that is not called
|
||||
through EventLoop.call_soon() -- if it were, it would have no
|
||||
effect when it's most needed (when the app keeps writing
|
||||
without yielding until pause_writing() is called).
|
||||
"""
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the transport's buffer drains below the low-water mark.
|
||||
|
||||
See pause_writing() for details.
|
||||
"""
|
||||
|
||||
|
||||
class Protocol(BaseProtocol):
|
||||
"""Interface for stream protocol.
|
||||
|
||||
The user should implement this interface. They can inherit from
|
||||
this class but don't need to. The implementations here do
|
||||
nothing (they don't raise exceptions).
|
||||
|
||||
When the user wants to requests a transport, they pass a protocol
|
||||
factory to a utility function (e.g., EventLoop.create_connection()).
|
||||
|
||||
When the connection is made successfully, connection_made() is
|
||||
called with a suitable transport object. Then data_received()
|
||||
will be called 0 or more times with data (bytes) received from the
|
||||
transport; finally, connection_lost() will be called exactly once
|
||||
with either an exception object or None as an argument.
|
||||
|
||||
State machine of calls:
|
||||
|
||||
start -> CM [-> DR*] [-> ER?] -> CL -> end
|
||||
|
||||
* CM: connection_made()
|
||||
* DR: data_received()
|
||||
* ER: eof_received()
|
||||
* CL: connection_lost()
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def data_received(self, data):
|
||||
"""Called when some data is received.
|
||||
|
||||
The argument is a bytes object.
|
||||
"""
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end calls write_eof() or equivalent.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
|
||||
|
||||
class BufferedProtocol(BaseProtocol):
|
||||
"""Interface for stream protocol with manual buffer control.
|
||||
|
||||
Important: this has been added to asyncio in Python 3.7
|
||||
*on a provisional basis*! Consider it as an experimental API that
|
||||
might be changed or removed in Python 3.8.
|
||||
|
||||
Event methods, such as `create_server` and `create_connection`,
|
||||
accept factories that return protocols that implement this interface.
|
||||
|
||||
The idea of BufferedProtocol is that it allows to manually allocate
|
||||
and control the receive buffer. Event loops can then use the buffer
|
||||
provided by the protocol to avoid unnecessary data copies. This
|
||||
can result in noticeable performance improvement for protocols that
|
||||
receive big amounts of data. Sophisticated protocols can allocate
|
||||
the buffer only once at creation time.
|
||||
|
||||
State machine of calls:
|
||||
|
||||
start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end
|
||||
|
||||
* CM: connection_made()
|
||||
* GB: get_buffer()
|
||||
* BU: buffer_updated()
|
||||
* ER: eof_received()
|
||||
* CL: connection_lost()
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def get_buffer(self, sizehint):
|
||||
"""Called to allocate a new receive buffer.
|
||||
|
||||
*sizehint* is a recommended minimal size for the returned
|
||||
buffer. When set to -1, the buffer size can be arbitrary.
|
||||
|
||||
Must return an object that implements the
|
||||
:ref:`buffer protocol <bufferobjects>`.
|
||||
It is an error to return a zero-sized buffer.
|
||||
"""
|
||||
|
||||
def buffer_updated(self, nbytes):
|
||||
"""Called when the buffer was updated with the received data.
|
||||
|
||||
*nbytes* is the total number of bytes that were written to
|
||||
the buffer.
|
||||
"""
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end calls write_eof() or equivalent.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
|
||||
|
||||
class DatagramProtocol(BaseProtocol):
|
||||
"""Interface for datagram protocol."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def datagram_received(self, data, addr):
|
||||
"""Called when some datagram is received."""
|
||||
|
||||
def error_received(self, exc):
|
||||
"""Called when a send or receive operation raises an OSError.
|
||||
|
||||
(Other than BlockingIOError or InterruptedError.)
|
||||
"""
|
||||
|
||||
|
||||
class SubprocessProtocol(BaseProtocol):
|
||||
"""Interface for protocol for subprocess calls."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
"""Called when the subprocess writes data into stdout/stderr pipe.
|
||||
|
||||
fd is int file descriptor.
|
||||
data is bytes object.
|
||||
"""
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
"""Called when a file descriptor associated with the child process is
|
||||
closed.
|
||||
|
||||
fd is the int file descriptor that was closed.
|
||||
"""
|
||||
|
||||
def process_exited(self):
|
||||
"""Called when subprocess has exited."""
|
||||
|
||||
|
||||
def _feed_data_to_buffered_proto(proto, data):
|
||||
data_len = len(data)
|
||||
while data_len:
|
||||
buf = proto.get_buffer(data_len)
|
||||
buf_len = len(buf)
|
||||
if not buf_len:
|
||||
raise RuntimeError('get_buffer() returned an empty buffer')
|
||||
|
||||
if buf_len >= data_len:
|
||||
buf[:data_len] = data
|
||||
proto.buffer_updated(data_len)
|
||||
return
|
||||
else:
|
||||
buf[:buf_len] = data[:buf_len]
|
||||
proto.buffer_updated(buf_len)
|
||||
data = data[buf_len:]
|
||||
data_len = len(data)
|
252
Tool/Python39/Lib/asyncio/queues.py
Normal file
252
Tool/Python39/Lib/asyncio/queues.py
Normal file
@ -0,0 +1,252 @@
|
||||
__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty')
|
||||
|
||||
import collections
|
||||
import heapq
|
||||
import warnings
|
||||
|
||||
from . import events
|
||||
from . import locks
|
||||
|
||||
|
||||
class QueueEmpty(Exception):
|
||||
"""Raised when Queue.get_nowait() is called on an empty Queue."""
|
||||
pass
|
||||
|
||||
|
||||
class QueueFull(Exception):
|
||||
"""Raised when the Queue.put_nowait() method is called on a full Queue."""
|
||||
pass
|
||||
|
||||
|
||||
class Queue:
|
||||
"""A queue, useful for coordinating producer and consumer coroutines.
|
||||
|
||||
If maxsize is less than or equal to zero, the queue size is infinite. If it
|
||||
is an integer greater than 0, then "await put()" will block when the
|
||||
queue reaches maxsize, until an item is removed by get().
|
||||
|
||||
Unlike the standard library Queue, you can reliably know this Queue's size
|
||||
with qsize(), since your single-threaded asyncio application won't be
|
||||
interrupted between calling qsize() and doing an operation on the Queue.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0, *, loop=None):
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
self._maxsize = maxsize
|
||||
|
||||
# Futures.
|
||||
self._getters = collections.deque()
|
||||
# Futures.
|
||||
self._putters = collections.deque()
|
||||
self._unfinished_tasks = 0
|
||||
self._finished = locks.Event(loop=loop)
|
||||
self._finished.set()
|
||||
self._init(maxsize)
|
||||
|
||||
# These three are overridable in subclasses.
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = collections.deque()
|
||||
|
||||
def _get(self):
|
||||
return self._queue.popleft()
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
# End of the overridable methods.
|
||||
|
||||
def _wakeup_next(self, waiters):
|
||||
# Wake up the next waiter (if any) that isn't cancelled.
|
||||
while waiters:
|
||||
waiter = waiters.popleft()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
break
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
|
||||
|
||||
def __str__(self):
|
||||
return f'<{type(self).__name__} {self._format()}>'
|
||||
|
||||
def __class_getitem__(cls, type):
|
||||
return cls
|
||||
|
||||
def _format(self):
|
||||
result = f'maxsize={self._maxsize!r}'
|
||||
if getattr(self, '_queue', None):
|
||||
result += f' _queue={list(self._queue)!r}'
|
||||
if self._getters:
|
||||
result += f' _getters[{len(self._getters)}]'
|
||||
if self._putters:
|
||||
result += f' _putters[{len(self._putters)}]'
|
||||
if self._unfinished_tasks:
|
||||
result += f' tasks={self._unfinished_tasks}'
|
||||
return result
|
||||
|
||||
def qsize(self):
|
||||
"""Number of items in the queue."""
|
||||
return len(self._queue)
|
||||
|
||||
@property
|
||||
def maxsize(self):
|
||||
"""Number of items allowed in the queue."""
|
||||
return self._maxsize
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise."""
|
||||
return not self._queue
|
||||
|
||||
def full(self):
|
||||
"""Return True if there are maxsize items in the queue.
|
||||
|
||||
Note: if the Queue was initialized with maxsize=0 (the default),
|
||||
then full() is never True.
|
||||
"""
|
||||
if self._maxsize <= 0:
|
||||
return False
|
||||
else:
|
||||
return self.qsize() >= self._maxsize
|
||||
|
||||
async def put(self, item):
|
||||
"""Put an item into the queue.
|
||||
|
||||
Put an item into the queue. If the queue is full, wait until a free
|
||||
slot is available before adding item.
|
||||
"""
|
||||
while self.full():
|
||||
putter = self._loop.create_future()
|
||||
self._putters.append(putter)
|
||||
try:
|
||||
await putter
|
||||
except:
|
||||
putter.cancel() # Just in case putter is not done yet.
|
||||
try:
|
||||
# Clean self._putters from canceled putters.
|
||||
self._putters.remove(putter)
|
||||
except ValueError:
|
||||
# The putter could be removed from self._putters by a
|
||||
# previous get_nowait call.
|
||||
pass
|
||||
if not self.full() and not putter.cancelled():
|
||||
# We were woken up by get_nowait(), but can't take
|
||||
# the call. Wake up the next in line.
|
||||
self._wakeup_next(self._putters)
|
||||
raise
|
||||
return self.put_nowait(item)
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
If no free slot is immediately available, raise QueueFull.
|
||||
"""
|
||||
if self.full():
|
||||
raise QueueFull
|
||||
self._put(item)
|
||||
self._unfinished_tasks += 1
|
||||
self._finished.clear()
|
||||
self._wakeup_next(self._getters)
|
||||
|
||||
async def get(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If queue is empty, wait until an item is available.
|
||||
"""
|
||||
while self.empty():
|
||||
getter = self._loop.create_future()
|
||||
self._getters.append(getter)
|
||||
try:
|
||||
await getter
|
||||
except:
|
||||
getter.cancel() # Just in case getter is not done yet.
|
||||
try:
|
||||
# Clean self._getters from canceled getters.
|
||||
self._getters.remove(getter)
|
||||
except ValueError:
|
||||
# The getter could be removed from self._getters by a
|
||||
# previous put_nowait call.
|
||||
pass
|
||||
if not self.empty() and not getter.cancelled():
|
||||
# We were woken up by put_nowait(), but can't take
|
||||
# the call. Wake up the next in line.
|
||||
self._wakeup_next(self._getters)
|
||||
raise
|
||||
return self.get_nowait()
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
Return an item if one is immediately available, else raise QueueEmpty.
|
||||
"""
|
||||
if self.empty():
|
||||
raise QueueEmpty
|
||||
item = self._get()
|
||||
self._wakeup_next(self._putters)
|
||||
return item
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by queue consumers. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items have
|
||||
been processed (meaning that a task_done() call was received for every
|
||||
item that had been put() into the queue).
|
||||
|
||||
Raises ValueError if called more times than there were items placed in
|
||||
the queue.
|
||||
"""
|
||||
if self._unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self._unfinished_tasks -= 1
|
||||
if self._unfinished_tasks == 0:
|
||||
self._finished.set()
|
||||
|
||||
async def join(self):
|
||||
"""Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer calls task_done() to
|
||||
indicate that the item was retrieved and all work on it is complete.
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
if self._unfinished_tasks > 0:
|
||||
await self._finished.wait()
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
"""A subclass of Queue; retrieves entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: (priority number, data).
|
||||
"""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self._queue, item)
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self._queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
"""A subclass of Queue that retrieves most recently added entries first."""
|
||||
|
||||
def _init(self, maxsize):
|
||||
self._queue = []
|
||||
|
||||
def _put(self, item):
|
||||
self._queue.append(item)
|
||||
|
||||
def _get(self):
|
||||
return self._queue.pop()
|
74
Tool/Python39/Lib/asyncio/runners.py
Normal file
74
Tool/Python39/Lib/asyncio/runners.py
Normal file
@ -0,0 +1,74 @@
|
||||
__all__ = 'run',
|
||||
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import tasks
|
||||
|
||||
|
||||
def run(main, *, debug=None):
|
||||
"""Execute the coroutine and return the result.
|
||||
|
||||
This function runs the passed coroutine, taking care of
|
||||
managing the asyncio event loop and finalizing asynchronous
|
||||
generators.
|
||||
|
||||
This function cannot be called when another asyncio event loop is
|
||||
running in the same thread.
|
||||
|
||||
If debug is True, the event loop will be run in debug mode.
|
||||
|
||||
This function always creates a new event loop and closes it at the end.
|
||||
It should be used as a main entry point for asyncio programs, and should
|
||||
ideally only be called once.
|
||||
|
||||
Example:
|
||||
|
||||
async def main():
|
||||
await asyncio.sleep(1)
|
||||
print('hello')
|
||||
|
||||
asyncio.run(main())
|
||||
"""
|
||||
if events._get_running_loop() is not None:
|
||||
raise RuntimeError(
|
||||
"asyncio.run() cannot be called from a running event loop")
|
||||
|
||||
if not coroutines.iscoroutine(main):
|
||||
raise ValueError("a coroutine was expected, got {!r}".format(main))
|
||||
|
||||
loop = events.new_event_loop()
|
||||
try:
|
||||
events.set_event_loop(loop)
|
||||
if debug is not None:
|
||||
loop.set_debug(debug)
|
||||
return loop.run_until_complete(main)
|
||||
finally:
|
||||
try:
|
||||
_cancel_all_tasks(loop)
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
loop.run_until_complete(loop.shutdown_default_executor())
|
||||
finally:
|
||||
events.set_event_loop(None)
|
||||
loop.close()
|
||||
|
||||
|
||||
def _cancel_all_tasks(loop):
|
||||
to_cancel = tasks.all_tasks(loop)
|
||||
if not to_cancel:
|
||||
return
|
||||
|
||||
for task in to_cancel:
|
||||
task.cancel()
|
||||
|
||||
loop.run_until_complete(
|
||||
tasks._gather(*to_cancel, loop=loop, return_exceptions=True))
|
||||
|
||||
for task in to_cancel:
|
||||
if task.cancelled():
|
||||
continue
|
||||
if task.exception() is not None:
|
||||
loop.call_exception_handler({
|
||||
'message': 'unhandled exception during asyncio.run() shutdown',
|
||||
'exception': task.exception(),
|
||||
'task': task,
|
||||
})
|
1099
Tool/Python39/Lib/asyncio/selector_events.py
Normal file
1099
Tool/Python39/Lib/asyncio/selector_events.py
Normal file
File diff suppressed because it is too large
Load Diff
733
Tool/Python39/Lib/asyncio/sslproto.py
Normal file
733
Tool/Python39/Lib/asyncio/sslproto.py
Normal file
@ -0,0 +1,733 @@
|
||||
import collections
|
||||
import warnings
|
||||
try:
|
||||
import ssl
|
||||
except ImportError: # pragma: no cover
|
||||
ssl = None
|
||||
|
||||
from . import constants
|
||||
from . import protocols
|
||||
from . import transports
|
||||
from .log import logger
|
||||
|
||||
|
||||
def _create_transport_context(server_side, server_hostname):
|
||||
if server_side:
|
||||
raise ValueError('Server side SSL needs a valid SSLContext')
|
||||
|
||||
# Client side may pass ssl=True to use a default
|
||||
# context; in that case the sslcontext passed is None.
|
||||
# The default is secure for client connections.
|
||||
# Python 3.4+: use up-to-date strong settings.
|
||||
sslcontext = ssl.create_default_context()
|
||||
if not server_hostname:
|
||||
sslcontext.check_hostname = False
|
||||
return sslcontext
|
||||
|
||||
|
||||
# States of an _SSLPipe.
|
||||
_UNWRAPPED = "UNWRAPPED"
|
||||
_DO_HANDSHAKE = "DO_HANDSHAKE"
|
||||
_WRAPPED = "WRAPPED"
|
||||
_SHUTDOWN = "SHUTDOWN"
|
||||
|
||||
|
||||
class _SSLPipe(object):
|
||||
"""An SSL "Pipe".
|
||||
|
||||
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
|
||||
through memory buffers. It can be used to implement a security layer for an
|
||||
existing connection where you don't have access to the connection's file
|
||||
descriptor, or for some reason you don't want to use it.
|
||||
|
||||
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
|
||||
data is passed through untransformed. In wrapped mode, application level
|
||||
data is encrypted to SSL record level data and vice versa. The SSL record
|
||||
level is the lowest level in the SSL protocol suite and is what travels
|
||||
as-is over the wire.
|
||||
|
||||
An SslPipe initially is in "unwrapped" mode. To start SSL, call
|
||||
do_handshake(). To shutdown SSL again, call unwrap().
|
||||
"""
|
||||
|
||||
max_size = 256 * 1024 # Buffer size passed to read()
|
||||
|
||||
def __init__(self, context, server_side, server_hostname=None):
|
||||
"""
|
||||
The *context* argument specifies the ssl.SSLContext to use.
|
||||
|
||||
The *server_side* argument indicates whether this is a server side or
|
||||
client side transport.
|
||||
|
||||
The optional *server_hostname* argument can be used to specify the
|
||||
hostname you are connecting to. You may only specify this parameter if
|
||||
the _ssl module supports Server Name Indication (SNI).
|
||||
"""
|
||||
self._context = context
|
||||
self._server_side = server_side
|
||||
self._server_hostname = server_hostname
|
||||
self._state = _UNWRAPPED
|
||||
self._incoming = ssl.MemoryBIO()
|
||||
self._outgoing = ssl.MemoryBIO()
|
||||
self._sslobj = None
|
||||
self._need_ssldata = False
|
||||
self._handshake_cb = None
|
||||
self._shutdown_cb = None
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
"""The SSL context passed to the constructor."""
|
||||
return self._context
|
||||
|
||||
@property
|
||||
def ssl_object(self):
|
||||
"""The internal ssl.SSLObject instance.
|
||||
|
||||
Return None if the pipe is not wrapped.
|
||||
"""
|
||||
return self._sslobj
|
||||
|
||||
@property
|
||||
def need_ssldata(self):
|
||||
"""Whether more record level data is needed to complete a handshake
|
||||
that is currently in progress."""
|
||||
return self._need_ssldata
|
||||
|
||||
@property
|
||||
def wrapped(self):
|
||||
"""
|
||||
Whether a security layer is currently in effect.
|
||||
|
||||
Return False during handshake.
|
||||
"""
|
||||
return self._state == _WRAPPED
|
||||
|
||||
def do_handshake(self, callback=None):
|
||||
"""Start the SSL handshake.
|
||||
|
||||
Return a list of ssldata. A ssldata element is a list of buffers
|
||||
|
||||
The optional *callback* argument can be used to install a callback that
|
||||
will be called when the handshake is complete. The callback will be
|
||||
called with None if successful, else an exception instance.
|
||||
"""
|
||||
if self._state != _UNWRAPPED:
|
||||
raise RuntimeError('handshake in progress or completed')
|
||||
self._sslobj = self._context.wrap_bio(
|
||||
self._incoming, self._outgoing,
|
||||
server_side=self._server_side,
|
||||
server_hostname=self._server_hostname)
|
||||
self._state = _DO_HANDSHAKE
|
||||
self._handshake_cb = callback
|
||||
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
|
||||
assert len(appdata) == 0
|
||||
return ssldata
|
||||
|
||||
def shutdown(self, callback=None):
|
||||
"""Start the SSL shutdown sequence.
|
||||
|
||||
Return a list of ssldata. A ssldata element is a list of buffers
|
||||
|
||||
The optional *callback* argument can be used to install a callback that
|
||||
will be called when the shutdown is complete. The callback will be
|
||||
called without arguments.
|
||||
"""
|
||||
if self._state == _UNWRAPPED:
|
||||
raise RuntimeError('no security layer present')
|
||||
if self._state == _SHUTDOWN:
|
||||
raise RuntimeError('shutdown in progress')
|
||||
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
|
||||
self._state = _SHUTDOWN
|
||||
self._shutdown_cb = callback
|
||||
ssldata, appdata = self.feed_ssldata(b'')
|
||||
assert appdata == [] or appdata == [b'']
|
||||
return ssldata
|
||||
|
||||
def feed_eof(self):
|
||||
"""Send a potentially "ragged" EOF.
|
||||
|
||||
This method will raise an SSL_ERROR_EOF exception if the EOF is
|
||||
unexpected.
|
||||
"""
|
||||
self._incoming.write_eof()
|
||||
ssldata, appdata = self.feed_ssldata(b'')
|
||||
assert appdata == [] or appdata == [b'']
|
||||
|
||||
def feed_ssldata(self, data, only_handshake=False):
|
||||
"""Feed SSL record level data into the pipe.
|
||||
|
||||
The data must be a bytes instance. It is OK to send an empty bytes
|
||||
instance. This can be used to get ssldata for a handshake initiated by
|
||||
this endpoint.
|
||||
|
||||
Return a (ssldata, appdata) tuple. The ssldata element is a list of
|
||||
buffers containing SSL data that needs to be sent to the remote SSL.
|
||||
|
||||
The appdata element is a list of buffers containing plaintext data that
|
||||
needs to be forwarded to the application. The appdata list may contain
|
||||
an empty buffer indicating an SSL "close_notify" alert. This alert must
|
||||
be acknowledged by calling shutdown().
|
||||
"""
|
||||
if self._state == _UNWRAPPED:
|
||||
# If unwrapped, pass plaintext data straight through.
|
||||
if data:
|
||||
appdata = [data]
|
||||
else:
|
||||
appdata = []
|
||||
return ([], appdata)
|
||||
|
||||
self._need_ssldata = False
|
||||
if data:
|
||||
self._incoming.write(data)
|
||||
|
||||
ssldata = []
|
||||
appdata = []
|
||||
try:
|
||||
if self._state == _DO_HANDSHAKE:
|
||||
# Call do_handshake() until it doesn't raise anymore.
|
||||
self._sslobj.do_handshake()
|
||||
self._state = _WRAPPED
|
||||
if self._handshake_cb:
|
||||
self._handshake_cb(None)
|
||||
if only_handshake:
|
||||
return (ssldata, appdata)
|
||||
# Handshake done: execute the wrapped block
|
||||
|
||||
if self._state == _WRAPPED:
|
||||
# Main state: read data from SSL until close_notify
|
||||
while True:
|
||||
chunk = self._sslobj.read(self.max_size)
|
||||
appdata.append(chunk)
|
||||
if not chunk: # close_notify
|
||||
break
|
||||
|
||||
elif self._state == _SHUTDOWN:
|
||||
# Call shutdown() until it doesn't raise anymore.
|
||||
self._sslobj.unwrap()
|
||||
self._sslobj = None
|
||||
self._state = _UNWRAPPED
|
||||
if self._shutdown_cb:
|
||||
self._shutdown_cb()
|
||||
|
||||
elif self._state == _UNWRAPPED:
|
||||
# Drain possible plaintext data after close_notify.
|
||||
appdata.append(self._incoming.read())
|
||||
except (ssl.SSLError, ssl.CertificateError) as exc:
|
||||
exc_errno = getattr(exc, 'errno', None)
|
||||
if exc_errno not in (
|
||||
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
|
||||
ssl.SSL_ERROR_SYSCALL):
|
||||
if self._state == _DO_HANDSHAKE and self._handshake_cb:
|
||||
self._handshake_cb(exc)
|
||||
raise
|
||||
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
|
||||
|
||||
# Check for record level data that needs to be sent back.
|
||||
# Happens for the initial handshake and renegotiations.
|
||||
if self._outgoing.pending:
|
||||
ssldata.append(self._outgoing.read())
|
||||
return (ssldata, appdata)
|
||||
|
||||
def feed_appdata(self, data, offset=0):
|
||||
"""Feed plaintext data into the pipe.
|
||||
|
||||
Return an (ssldata, offset) tuple. The ssldata element is a list of
|
||||
buffers containing record level data that needs to be sent to the
|
||||
remote SSL instance. The offset is the number of plaintext bytes that
|
||||
were processed, which may be less than the length of data.
|
||||
|
||||
NOTE: In case of short writes, this call MUST be retried with the SAME
|
||||
buffer passed into the *data* argument (i.e. the id() must be the
|
||||
same). This is an OpenSSL requirement. A further particularity is that
|
||||
a short write will always have offset == 0, because the _ssl module
|
||||
does not enable partial writes. And even though the offset is zero,
|
||||
there will still be encrypted data in ssldata.
|
||||
"""
|
||||
assert 0 <= offset <= len(data)
|
||||
if self._state == _UNWRAPPED:
|
||||
# pass through data in unwrapped mode
|
||||
if offset < len(data):
|
||||
ssldata = [data[offset:]]
|
||||
else:
|
||||
ssldata = []
|
||||
return (ssldata, len(data))
|
||||
|
||||
ssldata = []
|
||||
view = memoryview(data)
|
||||
while True:
|
||||
self._need_ssldata = False
|
||||
try:
|
||||
if offset < len(view):
|
||||
offset += self._sslobj.write(view[offset:])
|
||||
except ssl.SSLError as exc:
|
||||
# It is not allowed to call write() after unwrap() until the
|
||||
# close_notify is acknowledged. We return the condition to the
|
||||
# caller as a short write.
|
||||
exc_errno = getattr(exc, 'errno', None)
|
||||
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
|
||||
exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ
|
||||
if exc_errno not in (ssl.SSL_ERROR_WANT_READ,
|
||||
ssl.SSL_ERROR_WANT_WRITE,
|
||||
ssl.SSL_ERROR_SYSCALL):
|
||||
raise
|
||||
self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ)
|
||||
|
||||
# See if there's any record level data back for us.
|
||||
if self._outgoing.pending:
|
||||
ssldata.append(self._outgoing.read())
|
||||
if offset == len(view) or self._need_ssldata:
|
||||
break
|
||||
return (ssldata, offset)
|
||||
|
||||
|
||||
class _SSLProtocolTransport(transports._FlowControlMixin,
|
||||
transports.Transport):
|
||||
|
||||
_sendfile_compatible = constants._SendfileMode.FALLBACK
|
||||
|
||||
def __init__(self, loop, ssl_protocol):
|
||||
self._loop = loop
|
||||
# SSLProtocol instance
|
||||
self._ssl_protocol = ssl_protocol
|
||||
self._closed = False
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._ssl_protocol._get_extra_info(name, default)
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
self._ssl_protocol._set_app_protocol(protocol)
|
||||
|
||||
def get_protocol(self):
|
||||
return self._ssl_protocol._app_protocol
|
||||
|
||||
def is_closing(self):
|
||||
return self._closed
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) called
|
||||
with None as its argument.
|
||||
"""
|
||||
self._closed = True
|
||||
self._ssl_protocol._start_shutdown()
|
||||
|
||||
def __del__(self, _warn=warnings.warn):
|
||||
if not self._closed:
|
||||
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
||||
self.close()
|
||||
|
||||
def is_reading(self):
|
||||
tr = self._ssl_protocol._transport
|
||||
if tr is None:
|
||||
raise RuntimeError('SSL transport has not been initialized yet')
|
||||
return tr.is_reading()
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
self._ssl_protocol._transport.pause_reading()
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
self._ssl_protocol._transport.resume_reading()
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to an
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffer."""
|
||||
return self._ssl_protocol._transport.get_write_buffer_size()
|
||||
|
||||
@property
|
||||
def _protocol_paused(self):
|
||||
# Required for sendfile fallback pause_writing/resume_writing logic
|
||||
return self._ssl_protocol._transport._protocol_paused
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
raise TypeError(f"data: expecting a bytes-like instance, "
|
||||
f"got {type(data).__name__}")
|
||||
if not data:
|
||||
return
|
||||
self._ssl_protocol._write_appdata(data)
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
return False
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
self._ssl_protocol._abort()
|
||||
self._closed = True
|
||||
|
||||
|
||||
class SSLProtocol(protocols.Protocol):
|
||||
"""SSL protocol.
|
||||
|
||||
Implementation of SSL on top of a socket using incoming and outgoing
|
||||
buffers which are ssl.MemoryBIO objects.
|
||||
"""
|
||||
|
||||
def __init__(self, loop, app_protocol, sslcontext, waiter,
|
||||
server_side=False, server_hostname=None,
|
||||
call_connection_made=True,
|
||||
ssl_handshake_timeout=None):
|
||||
if ssl is None:
|
||||
raise RuntimeError('stdlib ssl module not available')
|
||||
|
||||
if ssl_handshake_timeout is None:
|
||||
ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT
|
||||
elif ssl_handshake_timeout <= 0:
|
||||
raise ValueError(
|
||||
f"ssl_handshake_timeout should be a positive number, "
|
||||
f"got {ssl_handshake_timeout}")
|
||||
|
||||
if not sslcontext:
|
||||
sslcontext = _create_transport_context(
|
||||
server_side, server_hostname)
|
||||
|
||||
self._server_side = server_side
|
||||
if server_hostname and not server_side:
|
||||
self._server_hostname = server_hostname
|
||||
else:
|
||||
self._server_hostname = None
|
||||
self._sslcontext = sslcontext
|
||||
# SSL-specific extra info. More info are set when the handshake
|
||||
# completes.
|
||||
self._extra = dict(sslcontext=sslcontext)
|
||||
|
||||
# App data write buffering
|
||||
self._write_backlog = collections.deque()
|
||||
self._write_buffer_size = 0
|
||||
|
||||
self._waiter = waiter
|
||||
self._loop = loop
|
||||
self._set_app_protocol(app_protocol)
|
||||
self._app_transport = _SSLProtocolTransport(self._loop, self)
|
||||
# _SSLPipe instance (None until the connection is made)
|
||||
self._sslpipe = None
|
||||
self._session_established = False
|
||||
self._in_handshake = False
|
||||
self._in_shutdown = False
|
||||
# transport, ex: SelectorSocketTransport
|
||||
self._transport = None
|
||||
self._call_connection_made = call_connection_made
|
||||
self._ssl_handshake_timeout = ssl_handshake_timeout
|
||||
|
||||
def _set_app_protocol(self, app_protocol):
|
||||
self._app_protocol = app_protocol
|
||||
self._app_protocol_is_buffer = \
|
||||
isinstance(app_protocol, protocols.BufferedProtocol)
|
||||
|
||||
def _wakeup_waiter(self, exc=None):
|
||||
if self._waiter is None:
|
||||
return
|
||||
if not self._waiter.cancelled():
|
||||
if exc is not None:
|
||||
self._waiter.set_exception(exc)
|
||||
else:
|
||||
self._waiter.set_result(None)
|
||||
self._waiter = None
|
||||
|
||||
def connection_made(self, transport):
|
||||
"""Called when the low-level connection is made.
|
||||
|
||||
Start the SSL handshake.
|
||||
"""
|
||||
self._transport = transport
|
||||
self._sslpipe = _SSLPipe(self._sslcontext,
|
||||
self._server_side,
|
||||
self._server_hostname)
|
||||
self._start_handshake()
|
||||
|
||||
def connection_lost(self, exc):
|
||||
"""Called when the low-level connection is lost or closed.
|
||||
|
||||
The argument is an exception object or None (the latter
|
||||
meaning a regular EOF is received or the connection was
|
||||
aborted or closed).
|
||||
"""
|
||||
if self._session_established:
|
||||
self._session_established = False
|
||||
self._loop.call_soon(self._app_protocol.connection_lost, exc)
|
||||
else:
|
||||
# Most likely an exception occurred while in SSL handshake.
|
||||
# Just mark the app transport as closed so that its __del__
|
||||
# doesn't complain.
|
||||
if self._app_transport is not None:
|
||||
self._app_transport._closed = True
|
||||
self._transport = None
|
||||
self._app_transport = None
|
||||
if getattr(self, '_handshake_timeout_handle', None):
|
||||
self._handshake_timeout_handle.cancel()
|
||||
self._wakeup_waiter(exc)
|
||||
self._app_protocol = None
|
||||
self._sslpipe = None
|
||||
|
||||
def pause_writing(self):
|
||||
"""Called when the low-level transport's buffer goes over
|
||||
the high-water mark.
|
||||
"""
|
||||
self._app_protocol.pause_writing()
|
||||
|
||||
def resume_writing(self):
|
||||
"""Called when the low-level transport's buffer drains below
|
||||
the low-water mark.
|
||||
"""
|
||||
self._app_protocol.resume_writing()
|
||||
|
||||
def data_received(self, data):
|
||||
"""Called when some SSL data is received.
|
||||
|
||||
The argument is a bytes object.
|
||||
"""
|
||||
if self._sslpipe is None:
|
||||
# transport closing, sslpipe is destroyed
|
||||
return
|
||||
|
||||
try:
|
||||
ssldata, appdata = self._sslpipe.feed_ssldata(data)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as e:
|
||||
self._fatal_error(e, 'SSL error in data received')
|
||||
return
|
||||
|
||||
for chunk in ssldata:
|
||||
self._transport.write(chunk)
|
||||
|
||||
for chunk in appdata:
|
||||
if chunk:
|
||||
try:
|
||||
if self._app_protocol_is_buffer:
|
||||
protocols._feed_data_to_buffered_proto(
|
||||
self._app_protocol, chunk)
|
||||
else:
|
||||
self._app_protocol.data_received(chunk)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as ex:
|
||||
self._fatal_error(
|
||||
ex, 'application protocol failed to receive SSL data')
|
||||
return
|
||||
else:
|
||||
self._start_shutdown()
|
||||
break
|
||||
|
||||
def eof_received(self):
|
||||
"""Called when the other end of the low-level stream
|
||||
is half-closed.
|
||||
|
||||
If this returns a false value (including None), the transport
|
||||
will close itself. If it returns a true value, closing the
|
||||
transport is up to the protocol.
|
||||
"""
|
||||
try:
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r received EOF", self)
|
||||
|
||||
self._wakeup_waiter(ConnectionResetError)
|
||||
|
||||
if not self._in_handshake:
|
||||
keep_open = self._app_protocol.eof_received()
|
||||
if keep_open:
|
||||
logger.warning('returning true from eof_received() '
|
||||
'has no effect when using ssl')
|
||||
finally:
|
||||
self._transport.close()
|
||||
|
||||
def _get_extra_info(self, name, default=None):
|
||||
if name in self._extra:
|
||||
return self._extra[name]
|
||||
elif self._transport is not None:
|
||||
return self._transport.get_extra_info(name, default)
|
||||
else:
|
||||
return default
|
||||
|
||||
def _start_shutdown(self):
|
||||
if self._in_shutdown:
|
||||
return
|
||||
if self._in_handshake:
|
||||
self._abort()
|
||||
else:
|
||||
self._in_shutdown = True
|
||||
self._write_appdata(b'')
|
||||
|
||||
def _write_appdata(self, data):
|
||||
self._write_backlog.append((data, 0))
|
||||
self._write_buffer_size += len(data)
|
||||
self._process_write_backlog()
|
||||
|
||||
def _start_handshake(self):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r starts SSL handshake", self)
|
||||
self._handshake_start_time = self._loop.time()
|
||||
else:
|
||||
self._handshake_start_time = None
|
||||
self._in_handshake = True
|
||||
# (b'', 1) is a special value in _process_write_backlog() to do
|
||||
# the SSL handshake
|
||||
self._write_backlog.append((b'', 1))
|
||||
self._handshake_timeout_handle = \
|
||||
self._loop.call_later(self._ssl_handshake_timeout,
|
||||
self._check_handshake_timeout)
|
||||
self._process_write_backlog()
|
||||
|
||||
def _check_handshake_timeout(self):
|
||||
if self._in_handshake is True:
|
||||
msg = (
|
||||
f"SSL handshake is taking longer than "
|
||||
f"{self._ssl_handshake_timeout} seconds: "
|
||||
f"aborting the connection"
|
||||
)
|
||||
self._fatal_error(ConnectionAbortedError(msg))
|
||||
|
||||
def _on_handshake_complete(self, handshake_exc):
|
||||
self._in_handshake = False
|
||||
self._handshake_timeout_handle.cancel()
|
||||
|
||||
sslobj = self._sslpipe.ssl_object
|
||||
try:
|
||||
if handshake_exc is not None:
|
||||
raise handshake_exc
|
||||
|
||||
peercert = sslobj.getpeercert()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
if isinstance(exc, ssl.CertificateError):
|
||||
msg = 'SSL handshake failed on verifying the certificate'
|
||||
else:
|
||||
msg = 'SSL handshake failed'
|
||||
self._fatal_error(exc, msg)
|
||||
return
|
||||
|
||||
if self._loop.get_debug():
|
||||
dt = self._loop.time() - self._handshake_start_time
|
||||
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
|
||||
|
||||
# Add extra info that becomes available after handshake.
|
||||
self._extra.update(peercert=peercert,
|
||||
cipher=sslobj.cipher(),
|
||||
compression=sslobj.compression(),
|
||||
ssl_object=sslobj,
|
||||
)
|
||||
if self._call_connection_made:
|
||||
self._app_protocol.connection_made(self._app_transport)
|
||||
self._wakeup_waiter()
|
||||
self._session_established = True
|
||||
# In case transport.write() was already called. Don't call
|
||||
# immediately _process_write_backlog(), but schedule it:
|
||||
# _on_handshake_complete() can be called indirectly from
|
||||
# _process_write_backlog(), and _process_write_backlog() is not
|
||||
# reentrant.
|
||||
self._loop.call_soon(self._process_write_backlog)
|
||||
|
||||
def _process_write_backlog(self):
|
||||
# Try to make progress on the write backlog.
|
||||
if self._transport is None or self._sslpipe is None:
|
||||
return
|
||||
|
||||
try:
|
||||
for i in range(len(self._write_backlog)):
|
||||
data, offset = self._write_backlog[0]
|
||||
if data:
|
||||
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
|
||||
elif offset:
|
||||
ssldata = self._sslpipe.do_handshake(
|
||||
self._on_handshake_complete)
|
||||
offset = 1
|
||||
else:
|
||||
ssldata = self._sslpipe.shutdown(self._finalize)
|
||||
offset = 1
|
||||
|
||||
for chunk in ssldata:
|
||||
self._transport.write(chunk)
|
||||
|
||||
if offset < len(data):
|
||||
self._write_backlog[0] = (data, offset)
|
||||
# A short write means that a write is blocked on a read
|
||||
# We need to enable reading if it is paused!
|
||||
assert self._sslpipe.need_ssldata
|
||||
if self._transport._paused:
|
||||
self._transport.resume_reading()
|
||||
break
|
||||
|
||||
# An entire chunk from the backlog was processed. We can
|
||||
# delete it and reduce the outstanding buffer size.
|
||||
del self._write_backlog[0]
|
||||
self._write_buffer_size -= len(data)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
if self._in_handshake:
|
||||
# Exceptions will be re-raised in _on_handshake_complete.
|
||||
self._on_handshake_complete(exc)
|
||||
else:
|
||||
self._fatal_error(exc, 'Fatal error on SSL transport')
|
||||
|
||||
def _fatal_error(self, exc, message='Fatal error on transport'):
|
||||
if isinstance(exc, OSError):
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r: %s", self, message, exc_info=True)
|
||||
else:
|
||||
self._loop.call_exception_handler({
|
||||
'message': message,
|
||||
'exception': exc,
|
||||
'transport': self._transport,
|
||||
'protocol': self,
|
||||
})
|
||||
if self._transport:
|
||||
self._transport._force_close(exc)
|
||||
|
||||
def _finalize(self):
|
||||
self._sslpipe = None
|
||||
|
||||
if self._transport is not None:
|
||||
self._transport.close()
|
||||
|
||||
def _abort(self):
|
||||
try:
|
||||
if self._transport is not None:
|
||||
self._transport.abort()
|
||||
finally:
|
||||
self._finalize()
|
149
Tool/Python39/Lib/asyncio/staggered.py
Normal file
149
Tool/Python39/Lib/asyncio/staggered.py
Normal file
@ -0,0 +1,149 @@
|
||||
"""Support for running coroutines in parallel with staggered start times."""
|
||||
|
||||
__all__ = 'staggered_race',
|
||||
|
||||
import contextlib
|
||||
import typing
|
||||
|
||||
from . import events
|
||||
from . import exceptions as exceptions_mod
|
||||
from . import locks
|
||||
from . import tasks
|
||||
|
||||
|
||||
async def staggered_race(
|
||||
coro_fns: typing.Iterable[typing.Callable[[], typing.Awaitable]],
|
||||
delay: typing.Optional[float],
|
||||
*,
|
||||
loop: events.AbstractEventLoop = None,
|
||||
) -> typing.Tuple[
|
||||
typing.Any,
|
||||
typing.Optional[int],
|
||||
typing.List[typing.Optional[Exception]]
|
||||
]:
|
||||
"""Run coroutines with staggered start times and take the first to finish.
|
||||
|
||||
This method takes an iterable of coroutine functions. The first one is
|
||||
started immediately. From then on, whenever the immediately preceding one
|
||||
fails (raises an exception), or when *delay* seconds has passed, the next
|
||||
coroutine is started. This continues until one of the coroutines complete
|
||||
successfully, in which case all others are cancelled, or until all
|
||||
coroutines fail.
|
||||
|
||||
The coroutines provided should be well-behaved in the following way:
|
||||
|
||||
* They should only ``return`` if completed successfully.
|
||||
|
||||
* They should always raise an exception if they did not complete
|
||||
successfully. In particular, if they handle cancellation, they should
|
||||
probably reraise, like this::
|
||||
|
||||
try:
|
||||
# do work
|
||||
except asyncio.CancelledError:
|
||||
# undo partially completed work
|
||||
raise
|
||||
|
||||
Args:
|
||||
coro_fns: an iterable of coroutine functions, i.e. callables that
|
||||
return a coroutine object when called. Use ``functools.partial`` or
|
||||
lambdas to pass arguments.
|
||||
|
||||
delay: amount of time, in seconds, between starting coroutines. If
|
||||
``None``, the coroutines will run sequentially.
|
||||
|
||||
loop: the event loop to use.
|
||||
|
||||
Returns:
|
||||
tuple *(winner_result, winner_index, exceptions)* where
|
||||
|
||||
- *winner_result*: the result of the winning coroutine, or ``None``
|
||||
if no coroutines won.
|
||||
|
||||
- *winner_index*: the index of the winning coroutine in
|
||||
``coro_fns``, or ``None`` if no coroutines won. If the winning
|
||||
coroutine may return None on success, *winner_index* can be used
|
||||
to definitively determine whether any coroutine won.
|
||||
|
||||
- *exceptions*: list of exceptions returned by the coroutines.
|
||||
``len(exceptions)`` is equal to the number of coroutines actually
|
||||
started, and the order is the same as in ``coro_fns``. The winning
|
||||
coroutine's entry is ``None``.
|
||||
|
||||
"""
|
||||
# TODO: when we have aiter() and anext(), allow async iterables in coro_fns.
|
||||
loop = loop or events.get_running_loop()
|
||||
enum_coro_fns = enumerate(coro_fns)
|
||||
winner_result = None
|
||||
winner_index = None
|
||||
exceptions = []
|
||||
running_tasks = []
|
||||
|
||||
async def run_one_coro(
|
||||
previous_failed: typing.Optional[locks.Event]) -> None:
|
||||
# Wait for the previous task to finish, or for delay seconds
|
||||
if previous_failed is not None:
|
||||
with contextlib.suppress(exceptions_mod.TimeoutError):
|
||||
# Use asyncio.wait_for() instead of asyncio.wait() here, so
|
||||
# that if we get cancelled at this point, Event.wait() is also
|
||||
# cancelled, otherwise there will be a "Task destroyed but it is
|
||||
# pending" later.
|
||||
await tasks.wait_for(previous_failed.wait(), delay)
|
||||
# Get the next coroutine to run
|
||||
try:
|
||||
this_index, coro_fn = next(enum_coro_fns)
|
||||
except StopIteration:
|
||||
return
|
||||
# Start task that will run the next coroutine
|
||||
this_failed = locks.Event()
|
||||
next_task = loop.create_task(run_one_coro(this_failed))
|
||||
running_tasks.append(next_task)
|
||||
assert len(running_tasks) == this_index + 2
|
||||
# Prepare place to put this coroutine's exceptions if not won
|
||||
exceptions.append(None)
|
||||
assert len(exceptions) == this_index + 1
|
||||
|
||||
try:
|
||||
result = await coro_fn()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as e:
|
||||
exceptions[this_index] = e
|
||||
this_failed.set() # Kickstart the next coroutine
|
||||
else:
|
||||
# Store winner's results
|
||||
nonlocal winner_index, winner_result
|
||||
assert winner_index is None
|
||||
winner_index = this_index
|
||||
winner_result = result
|
||||
# Cancel all other tasks. We take care to not cancel the current
|
||||
# task as well. If we do so, then since there is no `await` after
|
||||
# here and CancelledError are usually thrown at one, we will
|
||||
# encounter a curious corner case where the current task will end
|
||||
# up as done() == True, cancelled() == False, exception() ==
|
||||
# asyncio.CancelledError. This behavior is specified in
|
||||
# https://bugs.python.org/issue30048
|
||||
for i, t in enumerate(running_tasks):
|
||||
if i != this_index:
|
||||
t.cancel()
|
||||
|
||||
first_task = loop.create_task(run_one_coro(None))
|
||||
running_tasks.append(first_task)
|
||||
try:
|
||||
# Wait for a growing list of tasks to all finish: poor man's version of
|
||||
# curio's TaskGroup or trio's nursery
|
||||
done_count = 0
|
||||
while done_count != len(running_tasks):
|
||||
done, _ = await tasks.wait(running_tasks)
|
||||
done_count = len(done)
|
||||
# If run_one_coro raises an unhandled exception, it's probably a
|
||||
# programming error, and I want to see it.
|
||||
if __debug__:
|
||||
for d in done:
|
||||
if d.done() and not d.cancelled() and d.exception():
|
||||
raise d.exception()
|
||||
return winner_result, winner_index, exceptions
|
||||
finally:
|
||||
# Make sure no tasks are left running if we leave this function
|
||||
for t in running_tasks:
|
||||
t.cancel()
|
741
Tool/Python39/Lib/asyncio/streams.py
Normal file
741
Tool/Python39/Lib/asyncio/streams.py
Normal file
@ -0,0 +1,741 @@
|
||||
__all__ = (
|
||||
'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
|
||||
'open_connection', 'start_server')
|
||||
|
||||
import socket
|
||||
import sys
|
||||
import warnings
|
||||
import weakref
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
__all__ += ('open_unix_connection', 'start_unix_server')
|
||||
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import format_helpers
|
||||
from . import protocols
|
||||
from .log import logger
|
||||
from .tasks import sleep
|
||||
|
||||
|
||||
_DEFAULT_LIMIT = 2 ** 16 # 64 KiB
|
||||
|
||||
|
||||
async def open_connection(host=None, port=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""A wrapper for create_connection() returning a (reader, writer) pair.
|
||||
|
||||
The reader returned is a StreamReader instance; the writer is a
|
||||
StreamWriter instance.
|
||||
|
||||
The arguments are all the usual arguments to create_connection()
|
||||
except protocol_factory; most common are positional host and port,
|
||||
with various optional keyword arguments following.
|
||||
|
||||
Additional optional keyword arguments are loop (to set the event loop
|
||||
instance to use) and limit (to set the buffer limit passed to the
|
||||
StreamReader).
|
||||
|
||||
(If you want to customize the StreamReader and/or
|
||||
StreamReaderProtocol classes, just copy the code -- there's
|
||||
really nothing special here except some convenience.)
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = await loop.create_connection(
|
||||
lambda: protocol, host, port, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
|
||||
async def start_server(client_connected_cb, host=None, port=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Start a socket server, call back for each client connected.
|
||||
|
||||
The first parameter, `client_connected_cb`, takes two parameters:
|
||||
client_reader, client_writer. client_reader is a StreamReader
|
||||
object, while client_writer is a StreamWriter object. This
|
||||
parameter can either be a plain callback function or a coroutine;
|
||||
if it is a coroutine, it will be automatically converted into a
|
||||
Task.
|
||||
|
||||
The rest of the arguments are all the usual arguments to
|
||||
loop.create_server() except protocol_factory; most common are
|
||||
positional host and port, with various optional keyword arguments
|
||||
following. The return value is the same as loop.create_server().
|
||||
|
||||
Additional optional keyword arguments are loop (to set the event loop
|
||||
instance to use) and limit (to set the buffer limit passed to the
|
||||
StreamReader).
|
||||
|
||||
The return value is the same as loop.create_server(), i.e. a
|
||||
Server object which can be used to stop the service.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return await loop.create_server(factory, host, port, **kwds)
|
||||
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
# UNIX Domain Sockets are supported on this platform
|
||||
|
||||
async def open_unix_connection(path=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, loop=loop)
|
||||
transport, _ = await loop.create_unix_connection(
|
||||
lambda: protocol, path, **kwds)
|
||||
writer = StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
|
||||
async def start_unix_server(client_connected_cb, path=None, *,
|
||||
loop=None, limit=_DEFAULT_LIMIT, **kwds):
|
||||
"""Similar to `start_server` but works with UNIX Domain Sockets."""
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
def factory():
|
||||
reader = StreamReader(limit=limit, loop=loop)
|
||||
protocol = StreamReaderProtocol(reader, client_connected_cb,
|
||||
loop=loop)
|
||||
return protocol
|
||||
|
||||
return await loop.create_unix_server(factory, path, **kwds)
|
||||
|
||||
|
||||
class FlowControlMixin(protocols.Protocol):
|
||||
"""Reusable flow control logic for StreamWriter.drain().
|
||||
|
||||
This implements the protocol methods pause_writing(),
|
||||
resume_writing() and connection_lost(). If the subclass overrides
|
||||
these it must call the super methods.
|
||||
|
||||
StreamWriter.drain() must wait for _drain_helper() coroutine.
|
||||
"""
|
||||
|
||||
def __init__(self, loop=None):
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._paused = False
|
||||
self._drain_waiter = None
|
||||
self._connection_lost = False
|
||||
|
||||
def pause_writing(self):
|
||||
assert not self._paused
|
||||
self._paused = True
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r pauses writing", self)
|
||||
|
||||
def resume_writing(self):
|
||||
assert self._paused
|
||||
self._paused = False
|
||||
if self._loop.get_debug():
|
||||
logger.debug("%r resumes writing", self)
|
||||
|
||||
waiter = self._drain_waiter
|
||||
if waiter is not None:
|
||||
self._drain_waiter = None
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
def connection_lost(self, exc):
|
||||
self._connection_lost = True
|
||||
# Wake up the writer if currently paused.
|
||||
if not self._paused:
|
||||
return
|
||||
waiter = self._drain_waiter
|
||||
if waiter is None:
|
||||
return
|
||||
self._drain_waiter = None
|
||||
if waiter.done():
|
||||
return
|
||||
if exc is None:
|
||||
waiter.set_result(None)
|
||||
else:
|
||||
waiter.set_exception(exc)
|
||||
|
||||
async def _drain_helper(self):
|
||||
if self._connection_lost:
|
||||
raise ConnectionResetError('Connection lost')
|
||||
if not self._paused:
|
||||
return
|
||||
waiter = self._drain_waiter
|
||||
assert waiter is None or waiter.cancelled()
|
||||
waiter = self._loop.create_future()
|
||||
self._drain_waiter = waiter
|
||||
await waiter
|
||||
|
||||
def _get_close_waiter(self, stream):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
|
||||
"""Helper class to adapt between Protocol and StreamReader.
|
||||
|
||||
(This is a helper class instead of making StreamReader itself a
|
||||
Protocol subclass, because the StreamReader has other potential
|
||||
uses, and to prevent the user of the StreamReader to accidentally
|
||||
call inappropriate methods of the protocol.)
|
||||
"""
|
||||
|
||||
_source_traceback = None
|
||||
|
||||
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if stream_reader is not None:
|
||||
self._stream_reader_wr = weakref.ref(stream_reader)
|
||||
self._source_traceback = stream_reader._source_traceback
|
||||
else:
|
||||
self._stream_reader_wr = None
|
||||
if client_connected_cb is not None:
|
||||
# This is a stream created by the `create_server()` function.
|
||||
# Keep a strong reference to the reader until a connection
|
||||
# is established.
|
||||
self._strong_reader = stream_reader
|
||||
self._reject_connection = False
|
||||
self._stream_writer = None
|
||||
self._transport = None
|
||||
self._client_connected_cb = client_connected_cb
|
||||
self._over_ssl = False
|
||||
self._closed = self._loop.create_future()
|
||||
|
||||
@property
|
||||
def _stream_reader(self):
|
||||
if self._stream_reader_wr is None:
|
||||
return None
|
||||
return self._stream_reader_wr()
|
||||
|
||||
def connection_made(self, transport):
|
||||
if self._reject_connection:
|
||||
context = {
|
||||
'message': ('An open stream was garbage collected prior to '
|
||||
'establishing network connection; '
|
||||
'call "stream.close()" explicitly.')
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
transport.abort()
|
||||
return
|
||||
self._transport = transport
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
reader.set_transport(transport)
|
||||
self._over_ssl = transport.get_extra_info('sslcontext') is not None
|
||||
if self._client_connected_cb is not None:
|
||||
self._stream_writer = StreamWriter(transport, self,
|
||||
reader,
|
||||
self._loop)
|
||||
res = self._client_connected_cb(reader,
|
||||
self._stream_writer)
|
||||
if coroutines.iscoroutine(res):
|
||||
self._loop.create_task(res)
|
||||
self._strong_reader = None
|
||||
|
||||
def connection_lost(self, exc):
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
if exc is None:
|
||||
reader.feed_eof()
|
||||
else:
|
||||
reader.set_exception(exc)
|
||||
if not self._closed.done():
|
||||
if exc is None:
|
||||
self._closed.set_result(None)
|
||||
else:
|
||||
self._closed.set_exception(exc)
|
||||
super().connection_lost(exc)
|
||||
self._stream_reader_wr = None
|
||||
self._stream_writer = None
|
||||
self._transport = None
|
||||
|
||||
def data_received(self, data):
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
reader.feed_data(data)
|
||||
|
||||
def eof_received(self):
|
||||
reader = self._stream_reader
|
||||
if reader is not None:
|
||||
reader.feed_eof()
|
||||
if self._over_ssl:
|
||||
# Prevent a warning in SSLProtocol.eof_received:
|
||||
# "returning true from eof_received()
|
||||
# has no effect when using ssl"
|
||||
return False
|
||||
return True
|
||||
|
||||
def _get_close_waiter(self, stream):
|
||||
return self._closed
|
||||
|
||||
def __del__(self):
|
||||
# Prevent reports about unhandled exceptions.
|
||||
# Better than self._closed._log_traceback = False hack
|
||||
closed = self._closed
|
||||
if closed.done() and not closed.cancelled():
|
||||
closed.exception()
|
||||
|
||||
|
||||
class StreamWriter:
|
||||
"""Wraps a Transport.
|
||||
|
||||
This exposes write(), writelines(), [can_]write_eof(),
|
||||
get_extra_info() and close(). It adds drain() which returns an
|
||||
optional Future on which you can wait for flow control. It also
|
||||
adds a transport property which references the Transport
|
||||
directly.
|
||||
"""
|
||||
|
||||
def __init__(self, transport, protocol, reader, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
# drain() expects that the reader has an exception() method
|
||||
assert reader is None or isinstance(reader, StreamReader)
|
||||
self._reader = reader
|
||||
self._loop = loop
|
||||
self._complete_fut = self._loop.create_future()
|
||||
self._complete_fut.set_result(None)
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__, f'transport={self._transport!r}']
|
||||
if self._reader is not None:
|
||||
info.append(f'reader={self._reader!r}')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
return self._transport
|
||||
|
||||
def write(self, data):
|
||||
self._transport.write(data)
|
||||
|
||||
def writelines(self, data):
|
||||
self._transport.writelines(data)
|
||||
|
||||
def write_eof(self):
|
||||
return self._transport.write_eof()
|
||||
|
||||
def can_write_eof(self):
|
||||
return self._transport.can_write_eof()
|
||||
|
||||
def close(self):
|
||||
return self._transport.close()
|
||||
|
||||
def is_closing(self):
|
||||
return self._transport.is_closing()
|
||||
|
||||
async def wait_closed(self):
|
||||
await self._protocol._get_close_waiter(self)
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
return self._transport.get_extra_info(name, default)
|
||||
|
||||
async def drain(self):
|
||||
"""Flush the write buffer.
|
||||
|
||||
The intended use is to write
|
||||
|
||||
w.write(data)
|
||||
await w.drain()
|
||||
"""
|
||||
if self._reader is not None:
|
||||
exc = self._reader.exception()
|
||||
if exc is not None:
|
||||
raise exc
|
||||
if self._transport.is_closing():
|
||||
# Wait for protocol.connection_lost() call
|
||||
# Raise connection closing error if any,
|
||||
# ConnectionResetError otherwise
|
||||
# Yield to the event loop so connection_lost() may be
|
||||
# called. Without this, _drain_helper() would return
|
||||
# immediately, and code that calls
|
||||
# write(...); await drain()
|
||||
# in a loop would never call connection_lost(), so it
|
||||
# would not see an error when the socket is closed.
|
||||
await sleep(0)
|
||||
await self._protocol._drain_helper()
|
||||
|
||||
|
||||
class StreamReader:
|
||||
|
||||
_source_traceback = None
|
||||
|
||||
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
|
||||
# The line length limit is a security feature;
|
||||
# it also doubles as half the buffer limit.
|
||||
|
||||
if limit <= 0:
|
||||
raise ValueError('Limit cannot be <= 0')
|
||||
|
||||
self._limit = limit
|
||||
if loop is None:
|
||||
self._loop = events.get_event_loop()
|
||||
else:
|
||||
self._loop = loop
|
||||
self._buffer = bytearray()
|
||||
self._eof = False # Whether we're done.
|
||||
self._waiter = None # A future used by _wait_for_data()
|
||||
self._exception = None
|
||||
self._transport = None
|
||||
self._paused = False
|
||||
if self._loop.get_debug():
|
||||
self._source_traceback = format_helpers.extract_stack(
|
||||
sys._getframe(1))
|
||||
|
||||
def __repr__(self):
|
||||
info = ['StreamReader']
|
||||
if self._buffer:
|
||||
info.append(f'{len(self._buffer)} bytes')
|
||||
if self._eof:
|
||||
info.append('eof')
|
||||
if self._limit != _DEFAULT_LIMIT:
|
||||
info.append(f'limit={self._limit}')
|
||||
if self._waiter:
|
||||
info.append(f'waiter={self._waiter!r}')
|
||||
if self._exception:
|
||||
info.append(f'exception={self._exception!r}')
|
||||
if self._transport:
|
||||
info.append(f'transport={self._transport!r}')
|
||||
if self._paused:
|
||||
info.append('paused')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def exception(self):
|
||||
return self._exception
|
||||
|
||||
def set_exception(self, exc):
|
||||
self._exception = exc
|
||||
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_exception(exc)
|
||||
|
||||
def _wakeup_waiter(self):
|
||||
"""Wakeup read*() functions waiting for data or EOF."""
|
||||
waiter = self._waiter
|
||||
if waiter is not None:
|
||||
self._waiter = None
|
||||
if not waiter.cancelled():
|
||||
waiter.set_result(None)
|
||||
|
||||
def set_transport(self, transport):
|
||||
assert self._transport is None, 'Transport already set'
|
||||
self._transport = transport
|
||||
|
||||
def _maybe_resume_transport(self):
|
||||
if self._paused and len(self._buffer) <= self._limit:
|
||||
self._paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
def feed_eof(self):
|
||||
self._eof = True
|
||||
self._wakeup_waiter()
|
||||
|
||||
def at_eof(self):
|
||||
"""Return True if the buffer is empty and 'feed_eof' was called."""
|
||||
return self._eof and not self._buffer
|
||||
|
||||
def feed_data(self, data):
|
||||
assert not self._eof, 'feed_data after feed_eof'
|
||||
|
||||
if not data:
|
||||
return
|
||||
|
||||
self._buffer.extend(data)
|
||||
self._wakeup_waiter()
|
||||
|
||||
if (self._transport is not None and
|
||||
not self._paused and
|
||||
len(self._buffer) > 2 * self._limit):
|
||||
try:
|
||||
self._transport.pause_reading()
|
||||
except NotImplementedError:
|
||||
# The transport can't be paused.
|
||||
# We'll just have to buffer all data.
|
||||
# Forget the transport so we don't keep trying.
|
||||
self._transport = None
|
||||
else:
|
||||
self._paused = True
|
||||
|
||||
async def _wait_for_data(self, func_name):
|
||||
"""Wait until feed_data() or feed_eof() is called.
|
||||
|
||||
If stream was paused, automatically resume it.
|
||||
"""
|
||||
# StreamReader uses a future to link the protocol feed_data() method
|
||||
# to a read coroutine. Running two read coroutines at the same time
|
||||
# would have an unexpected behaviour. It would not possible to know
|
||||
# which coroutine would get the next data.
|
||||
if self._waiter is not None:
|
||||
raise RuntimeError(
|
||||
f'{func_name}() called while another coroutine is '
|
||||
f'already waiting for incoming data')
|
||||
|
||||
assert not self._eof, '_wait_for_data after EOF'
|
||||
|
||||
# Waiting for data while paused will make deadlock, so prevent it.
|
||||
# This is essential for readexactly(n) for case when n > self._limit.
|
||||
if self._paused:
|
||||
self._paused = False
|
||||
self._transport.resume_reading()
|
||||
|
||||
self._waiter = self._loop.create_future()
|
||||
try:
|
||||
await self._waiter
|
||||
finally:
|
||||
self._waiter = None
|
||||
|
||||
async def readline(self):
|
||||
"""Read chunk of data from the stream until newline (b'\n') is found.
|
||||
|
||||
On success, return chunk that ends with newline. If only partial
|
||||
line can be read due to EOF, return incomplete line without
|
||||
terminating newline. When EOF was reached while no bytes read, empty
|
||||
bytes object is returned.
|
||||
|
||||
If limit is reached, ValueError will be raised. In that case, if
|
||||
newline was found, complete line including newline will be removed
|
||||
from internal buffer. Else, internal buffer will be cleared. Limit is
|
||||
compared against part of the line without newline.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
sep = b'\n'
|
||||
seplen = len(sep)
|
||||
try:
|
||||
line = await self.readuntil(sep)
|
||||
except exceptions.IncompleteReadError as e:
|
||||
return e.partial
|
||||
except exceptions.LimitOverrunError as e:
|
||||
if self._buffer.startswith(sep, e.consumed):
|
||||
del self._buffer[:e.consumed + seplen]
|
||||
else:
|
||||
self._buffer.clear()
|
||||
self._maybe_resume_transport()
|
||||
raise ValueError(e.args[0])
|
||||
return line
|
||||
|
||||
async def readuntil(self, separator=b'\n'):
|
||||
"""Read data from the stream until ``separator`` is found.
|
||||
|
||||
On success, the data and separator will be removed from the
|
||||
internal buffer (consumed). Returned data will include the
|
||||
separator at the end.
|
||||
|
||||
Configured stream limit is used to check result. Limit sets the
|
||||
maximal length of data that can be returned, not counting the
|
||||
separator.
|
||||
|
||||
If an EOF occurs and the complete separator is still not found,
|
||||
an IncompleteReadError exception will be raised, and the internal
|
||||
buffer will be reset. The IncompleteReadError.partial attribute
|
||||
may contain the separator partially.
|
||||
|
||||
If the data cannot be read because of over limit, a
|
||||
LimitOverrunError exception will be raised, and the data
|
||||
will be left in the internal buffer, so it can be read again.
|
||||
"""
|
||||
seplen = len(separator)
|
||||
if seplen == 0:
|
||||
raise ValueError('Separator should be at least one-byte string')
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
# Consume whole buffer except last bytes, which length is
|
||||
# one less than seplen. Let's check corner cases with
|
||||
# separator='SEPARATOR':
|
||||
# * we have received almost complete separator (without last
|
||||
# byte). i.e buffer='some textSEPARATO'. In this case we
|
||||
# can safely consume len(separator) - 1 bytes.
|
||||
# * last byte of buffer is first byte of separator, i.e.
|
||||
# buffer='abcdefghijklmnopqrS'. We may safely consume
|
||||
# everything except that last byte, but this require to
|
||||
# analyze bytes of buffer that match partial separator.
|
||||
# This is slow and/or require FSM. For this case our
|
||||
# implementation is not optimal, since require rescanning
|
||||
# of data that is known to not belong to separator. In
|
||||
# real world, separator will not be so long to notice
|
||||
# performance problems. Even when reading MIME-encoded
|
||||
# messages :)
|
||||
|
||||
# `offset` is the number of bytes from the beginning of the buffer
|
||||
# where there is no occurrence of `separator`.
|
||||
offset = 0
|
||||
|
||||
# Loop until we find `separator` in the buffer, exceed the buffer size,
|
||||
# or an EOF has happened.
|
||||
while True:
|
||||
buflen = len(self._buffer)
|
||||
|
||||
# Check if we now have enough data in the buffer for `separator` to
|
||||
# fit.
|
||||
if buflen - offset >= seplen:
|
||||
isep = self._buffer.find(separator, offset)
|
||||
|
||||
if isep != -1:
|
||||
# `separator` is in the buffer. `isep` will be used later
|
||||
# to retrieve the data.
|
||||
break
|
||||
|
||||
# see upper comment for explanation.
|
||||
offset = buflen + 1 - seplen
|
||||
if offset > self._limit:
|
||||
raise exceptions.LimitOverrunError(
|
||||
'Separator is not found, and chunk exceed the limit',
|
||||
offset)
|
||||
|
||||
# Complete message (with full separator) may be present in buffer
|
||||
# even when EOF flag is set. This may happen when the last chunk
|
||||
# adds data which makes separator be found. That's why we check for
|
||||
# EOF *ater* inspecting the buffer.
|
||||
if self._eof:
|
||||
chunk = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
raise exceptions.IncompleteReadError(chunk, None)
|
||||
|
||||
# _wait_for_data() will resume reading if stream was paused.
|
||||
await self._wait_for_data('readuntil')
|
||||
|
||||
if isep > self._limit:
|
||||
raise exceptions.LimitOverrunError(
|
||||
'Separator is found, but chunk is longer than limit', isep)
|
||||
|
||||
chunk = self._buffer[:isep + seplen]
|
||||
del self._buffer[:isep + seplen]
|
||||
self._maybe_resume_transport()
|
||||
return bytes(chunk)
|
||||
|
||||
async def read(self, n=-1):
|
||||
"""Read up to `n` bytes from the stream.
|
||||
|
||||
If n is not provided, or set to -1, read until EOF and return all read
|
||||
bytes. If the EOF was received and the internal buffer is empty, return
|
||||
an empty bytes object.
|
||||
|
||||
If n is zero, return empty bytes object immediately.
|
||||
|
||||
If n is positive, this function try to read `n` bytes, and may return
|
||||
less or equal bytes than requested, but at least one byte. If EOF was
|
||||
received before any byte is read, this function returns empty byte
|
||||
object.
|
||||
|
||||
Returned value is not limited with limit, configured at stream
|
||||
creation.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
if n == 0:
|
||||
return b''
|
||||
|
||||
if n < 0:
|
||||
# This used to just loop creating a new waiter hoping to
|
||||
# collect everything in self._buffer, but that would
|
||||
# deadlock if the subprocess sends more than self.limit
|
||||
# bytes. So just call self.read(self._limit) until EOF.
|
||||
blocks = []
|
||||
while True:
|
||||
block = await self.read(self._limit)
|
||||
if not block:
|
||||
break
|
||||
blocks.append(block)
|
||||
return b''.join(blocks)
|
||||
|
||||
if not self._buffer and not self._eof:
|
||||
await self._wait_for_data('read')
|
||||
|
||||
# This will work right even if buffer is less than n bytes
|
||||
data = bytes(self._buffer[:n])
|
||||
del self._buffer[:n]
|
||||
|
||||
self._maybe_resume_transport()
|
||||
return data
|
||||
|
||||
async def readexactly(self, n):
|
||||
"""Read exactly `n` bytes.
|
||||
|
||||
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
|
||||
read. The IncompleteReadError.partial attribute of the exception will
|
||||
contain the partial read bytes.
|
||||
|
||||
if n is zero, return empty bytes object.
|
||||
|
||||
Returned value is not limited with limit, configured at stream
|
||||
creation.
|
||||
|
||||
If stream was paused, this function will automatically resume it if
|
||||
needed.
|
||||
"""
|
||||
if n < 0:
|
||||
raise ValueError('readexactly size can not be less than zero')
|
||||
|
||||
if self._exception is not None:
|
||||
raise self._exception
|
||||
|
||||
if n == 0:
|
||||
return b''
|
||||
|
||||
while len(self._buffer) < n:
|
||||
if self._eof:
|
||||
incomplete = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
raise exceptions.IncompleteReadError(incomplete, n)
|
||||
|
||||
await self._wait_for_data('readexactly')
|
||||
|
||||
if len(self._buffer) == n:
|
||||
data = bytes(self._buffer)
|
||||
self._buffer.clear()
|
||||
else:
|
||||
data = bytes(self._buffer[:n])
|
||||
del self._buffer[:n]
|
||||
self._maybe_resume_transport()
|
||||
return data
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self):
|
||||
val = await self.readline()
|
||||
if val == b'':
|
||||
raise StopAsyncIteration
|
||||
return val
|
241
Tool/Python39/Lib/asyncio/subprocess.py
Normal file
241
Tool/Python39/Lib/asyncio/subprocess.py
Normal file
@ -0,0 +1,241 @@
|
||||
__all__ = 'create_subprocess_exec', 'create_subprocess_shell'
|
||||
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
from . import events
|
||||
from . import protocols
|
||||
from . import streams
|
||||
from . import tasks
|
||||
from .log import logger
|
||||
|
||||
|
||||
PIPE = subprocess.PIPE
|
||||
STDOUT = subprocess.STDOUT
|
||||
DEVNULL = subprocess.DEVNULL
|
||||
|
||||
|
||||
class SubprocessStreamProtocol(streams.FlowControlMixin,
|
||||
protocols.SubprocessProtocol):
|
||||
"""Like StreamReaderProtocol, but for a subprocess."""
|
||||
|
||||
def __init__(self, limit, loop):
|
||||
super().__init__(loop=loop)
|
||||
self._limit = limit
|
||||
self.stdin = self.stdout = self.stderr = None
|
||||
self._transport = None
|
||||
self._process_exited = False
|
||||
self._pipe_fds = []
|
||||
self._stdin_closed = self._loop.create_future()
|
||||
|
||||
def __repr__(self):
|
||||
info = [self.__class__.__name__]
|
||||
if self.stdin is not None:
|
||||
info.append(f'stdin={self.stdin!r}')
|
||||
if self.stdout is not None:
|
||||
info.append(f'stdout={self.stdout!r}')
|
||||
if self.stderr is not None:
|
||||
info.append(f'stderr={self.stderr!r}')
|
||||
return '<{}>'.format(' '.join(info))
|
||||
|
||||
def connection_made(self, transport):
|
||||
self._transport = transport
|
||||
|
||||
stdout_transport = transport.get_pipe_transport(1)
|
||||
if stdout_transport is not None:
|
||||
self.stdout = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stdout.set_transport(stdout_transport)
|
||||
self._pipe_fds.append(1)
|
||||
|
||||
stderr_transport = transport.get_pipe_transport(2)
|
||||
if stderr_transport is not None:
|
||||
self.stderr = streams.StreamReader(limit=self._limit,
|
||||
loop=self._loop)
|
||||
self.stderr.set_transport(stderr_transport)
|
||||
self._pipe_fds.append(2)
|
||||
|
||||
stdin_transport = transport.get_pipe_transport(0)
|
||||
if stdin_transport is not None:
|
||||
self.stdin = streams.StreamWriter(stdin_transport,
|
||||
protocol=self,
|
||||
reader=None,
|
||||
loop=self._loop)
|
||||
|
||||
def pipe_data_received(self, fd, data):
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader is not None:
|
||||
reader.feed_data(data)
|
||||
|
||||
def pipe_connection_lost(self, fd, exc):
|
||||
if fd == 0:
|
||||
pipe = self.stdin
|
||||
if pipe is not None:
|
||||
pipe.close()
|
||||
self.connection_lost(exc)
|
||||
if exc is None:
|
||||
self._stdin_closed.set_result(None)
|
||||
else:
|
||||
self._stdin_closed.set_exception(exc)
|
||||
return
|
||||
if fd == 1:
|
||||
reader = self.stdout
|
||||
elif fd == 2:
|
||||
reader = self.stderr
|
||||
else:
|
||||
reader = None
|
||||
if reader is not None:
|
||||
if exc is None:
|
||||
reader.feed_eof()
|
||||
else:
|
||||
reader.set_exception(exc)
|
||||
|
||||
if fd in self._pipe_fds:
|
||||
self._pipe_fds.remove(fd)
|
||||
self._maybe_close_transport()
|
||||
|
||||
def process_exited(self):
|
||||
self._process_exited = True
|
||||
self._maybe_close_transport()
|
||||
|
||||
def _maybe_close_transport(self):
|
||||
if len(self._pipe_fds) == 0 and self._process_exited:
|
||||
self._transport.close()
|
||||
self._transport = None
|
||||
|
||||
def _get_close_waiter(self, stream):
|
||||
if stream is self.stdin:
|
||||
return self._stdin_closed
|
||||
|
||||
|
||||
class Process:
|
||||
def __init__(self, transport, protocol, loop):
|
||||
self._transport = transport
|
||||
self._protocol = protocol
|
||||
self._loop = loop
|
||||
self.stdin = protocol.stdin
|
||||
self.stdout = protocol.stdout
|
||||
self.stderr = protocol.stderr
|
||||
self.pid = transport.get_pid()
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{self.__class__.__name__} {self.pid}>'
|
||||
|
||||
@property
|
||||
def returncode(self):
|
||||
return self._transport.get_returncode()
|
||||
|
||||
async def wait(self):
|
||||
"""Wait until the process exit and return the process return code."""
|
||||
return await self._transport._wait()
|
||||
|
||||
def send_signal(self, signal):
|
||||
self._transport.send_signal(signal)
|
||||
|
||||
def terminate(self):
|
||||
self._transport.terminate()
|
||||
|
||||
def kill(self):
|
||||
self._transport.kill()
|
||||
|
||||
async def _feed_stdin(self, input):
|
||||
debug = self._loop.get_debug()
|
||||
self.stdin.write(input)
|
||||
if debug:
|
||||
logger.debug(
|
||||
'%r communicate: feed stdin (%s bytes)', self, len(input))
|
||||
try:
|
||||
await self.stdin.drain()
|
||||
except (BrokenPipeError, ConnectionResetError) as exc:
|
||||
# communicate() ignores BrokenPipeError and ConnectionResetError
|
||||
if debug:
|
||||
logger.debug('%r communicate: stdin got %r', self, exc)
|
||||
|
||||
if debug:
|
||||
logger.debug('%r communicate: close stdin', self)
|
||||
self.stdin.close()
|
||||
|
||||
async def _noop(self):
|
||||
return None
|
||||
|
||||
async def _read_stream(self, fd):
|
||||
transport = self._transport.get_pipe_transport(fd)
|
||||
if fd == 2:
|
||||
stream = self.stderr
|
||||
else:
|
||||
assert fd == 1
|
||||
stream = self.stdout
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: read %s', self, name)
|
||||
output = await stream.read()
|
||||
if self._loop.get_debug():
|
||||
name = 'stdout' if fd == 1 else 'stderr'
|
||||
logger.debug('%r communicate: close %s', self, name)
|
||||
transport.close()
|
||||
return output
|
||||
|
||||
async def communicate(self, input=None):
|
||||
if input is not None:
|
||||
stdin = self._feed_stdin(input)
|
||||
else:
|
||||
stdin = self._noop()
|
||||
if self.stdout is not None:
|
||||
stdout = self._read_stream(1)
|
||||
else:
|
||||
stdout = self._noop()
|
||||
if self.stderr is not None:
|
||||
stderr = self._read_stream(2)
|
||||
else:
|
||||
stderr = self._noop()
|
||||
stdin, stdout, stderr = await tasks._gather(stdin, stdout, stderr,
|
||||
loop=self._loop)
|
||||
await self.wait()
|
||||
return (stdout, stderr)
|
||||
|
||||
|
||||
async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
|
||||
loop=None, limit=streams._DEFAULT_LIMIT,
|
||||
**kwds):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8 "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = await loop.subprocess_shell(
|
||||
protocol_factory,
|
||||
cmd, stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
return Process(transport, protocol, loop)
|
||||
|
||||
|
||||
async def create_subprocess_exec(program, *args, stdin=None, stdout=None,
|
||||
stderr=None, loop=None,
|
||||
limit=streams._DEFAULT_LIMIT, **kwds):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8 "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
|
||||
loop=loop)
|
||||
transport, protocol = await loop.subprocess_exec(
|
||||
protocol_factory,
|
||||
program, *args,
|
||||
stdin=stdin, stdout=stdout,
|
||||
stderr=stderr, **kwds)
|
||||
return Process(transport, protocol, loop)
|
988
Tool/Python39/Lib/asyncio/tasks.py
Normal file
988
Tool/Python39/Lib/asyncio/tasks.py
Normal file
@ -0,0 +1,988 @@
|
||||
"""Support for tasks, coroutines and the scheduler."""
|
||||
|
||||
__all__ = (
|
||||
'Task', 'create_task',
|
||||
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
|
||||
'wait', 'wait_for', 'as_completed', 'sleep',
|
||||
'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe',
|
||||
'current_task', 'all_tasks',
|
||||
'_register_task', '_unregister_task', '_enter_task', '_leave_task',
|
||||
)
|
||||
|
||||
import concurrent.futures
|
||||
import contextvars
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import types
|
||||
import warnings
|
||||
import weakref
|
||||
|
||||
from . import base_tasks
|
||||
from . import coroutines
|
||||
from . import events
|
||||
from . import exceptions
|
||||
from . import futures
|
||||
from .coroutines import _is_coroutine
|
||||
|
||||
# Helper to generate new task names
|
||||
# This uses itertools.count() instead of a "+= 1" operation because the latter
|
||||
# is not thread safe. See bpo-11866 for a longer explanation.
|
||||
_task_name_counter = itertools.count(1).__next__
|
||||
|
||||
|
||||
def current_task(loop=None):
|
||||
"""Return a currently executed task."""
|
||||
if loop is None:
|
||||
loop = events.get_running_loop()
|
||||
return _current_tasks.get(loop)
|
||||
|
||||
|
||||
def all_tasks(loop=None):
|
||||
"""Return a set of all tasks for the loop."""
|
||||
if loop is None:
|
||||
loop = events.get_running_loop()
|
||||
# Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
|
||||
# thread while we do so. Therefore we cast it to list prior to filtering. The list
|
||||
# cast itself requires iteration, so we repeat it several times ignoring
|
||||
# RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
|
||||
# details.
|
||||
i = 0
|
||||
while True:
|
||||
try:
|
||||
tasks = list(_all_tasks)
|
||||
except RuntimeError:
|
||||
i += 1
|
||||
if i >= 1000:
|
||||
raise
|
||||
else:
|
||||
break
|
||||
return {t for t in tasks
|
||||
if futures._get_loop(t) is loop and not t.done()}
|
||||
|
||||
|
||||
def _all_tasks_compat(loop=None):
|
||||
# Different from "all_task()" by returning *all* Tasks, including
|
||||
# the completed ones. Used to implement deprecated "Tasks.all_task()"
|
||||
# method.
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
# Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another
|
||||
# thread while we do so. Therefore we cast it to list prior to filtering. The list
|
||||
# cast itself requires iteration, so we repeat it several times ignoring
|
||||
# RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for
|
||||
# details.
|
||||
i = 0
|
||||
while True:
|
||||
try:
|
||||
tasks = list(_all_tasks)
|
||||
except RuntimeError:
|
||||
i += 1
|
||||
if i >= 1000:
|
||||
raise
|
||||
else:
|
||||
break
|
||||
return {t for t in tasks if futures._get_loop(t) is loop}
|
||||
|
||||
|
||||
def _set_task_name(task, name):
|
||||
if name is not None:
|
||||
try:
|
||||
set_name = task.set_name
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
set_name(name)
|
||||
|
||||
|
||||
class Task(futures._PyFuture): # Inherit Python Task implementation
|
||||
# from a Python Future implementation.
|
||||
|
||||
"""A coroutine wrapped in a Future."""
|
||||
|
||||
# An important invariant maintained while a Task not done:
|
||||
#
|
||||
# - Either _fut_waiter is None, and _step() is scheduled;
|
||||
# - or _fut_waiter is some Future, and _step() is *not* scheduled.
|
||||
#
|
||||
# The only transition from the latter to the former is through
|
||||
# _wakeup(). When _fut_waiter is not None, one of its callbacks
|
||||
# must be _wakeup().
|
||||
|
||||
# If False, don't log a message if the task is destroyed whereas its
|
||||
# status is still pending
|
||||
_log_destroy_pending = True
|
||||
|
||||
def __init__(self, coro, *, loop=None, name=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
if not coroutines.iscoroutine(coro):
|
||||
# raise after Future.__init__(), attrs are required for __del__
|
||||
# prevent logging for pending task in __del__
|
||||
self._log_destroy_pending = False
|
||||
raise TypeError(f"a coroutine was expected, got {coro!r}")
|
||||
|
||||
if name is None:
|
||||
self._name = f'Task-{_task_name_counter()}'
|
||||
else:
|
||||
self._name = str(name)
|
||||
|
||||
self._must_cancel = False
|
||||
self._fut_waiter = None
|
||||
self._coro = coro
|
||||
self._context = contextvars.copy_context()
|
||||
|
||||
self._loop.call_soon(self.__step, context=self._context)
|
||||
_register_task(self)
|
||||
|
||||
def __del__(self):
|
||||
if self._state == futures._PENDING and self._log_destroy_pending:
|
||||
context = {
|
||||
'task': self,
|
||||
'message': 'Task was destroyed but it is pending!',
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
super().__del__()
|
||||
|
||||
def __class_getitem__(cls, type):
|
||||
return cls
|
||||
|
||||
def _repr_info(self):
|
||||
return base_tasks._task_repr_info(self)
|
||||
|
||||
def get_coro(self):
|
||||
return self._coro
|
||||
|
||||
def get_name(self):
|
||||
return self._name
|
||||
|
||||
def set_name(self, value):
|
||||
self._name = str(value)
|
||||
|
||||
def set_result(self, result):
|
||||
raise RuntimeError('Task does not support set_result operation')
|
||||
|
||||
def set_exception(self, exception):
|
||||
raise RuntimeError('Task does not support set_exception operation')
|
||||
|
||||
def get_stack(self, *, limit=None):
|
||||
"""Return the list of stack frames for this task's coroutine.
|
||||
|
||||
If the coroutine is not done, this returns the stack where it is
|
||||
suspended. If the coroutine has completed successfully or was
|
||||
cancelled, this returns an empty list. If the coroutine was
|
||||
terminated by an exception, this returns the list of traceback
|
||||
frames.
|
||||
|
||||
The frames are always ordered from oldest to newest.
|
||||
|
||||
The optional limit gives the maximum number of frames to
|
||||
return; by default all available frames are returned. Its
|
||||
meaning differs depending on whether a stack or a traceback is
|
||||
returned: the newest frames of a stack are returned, but the
|
||||
oldest frames of a traceback are returned. (This matches the
|
||||
behavior of the traceback module.)
|
||||
|
||||
For reasons beyond our control, only one stack frame is
|
||||
returned for a suspended coroutine.
|
||||
"""
|
||||
return base_tasks._task_get_stack(self, limit)
|
||||
|
||||
def print_stack(self, *, limit=None, file=None):
|
||||
"""Print the stack or traceback for this task's coroutine.
|
||||
|
||||
This produces output similar to that of the traceback module,
|
||||
for the frames retrieved by get_stack(). The limit argument
|
||||
is passed to get_stack(). The file argument is an I/O stream
|
||||
to which the output is written; by default output is written
|
||||
to sys.stderr.
|
||||
"""
|
||||
return base_tasks._task_print_stack(self, limit, file)
|
||||
|
||||
def cancel(self, msg=None):
|
||||
"""Request that this task cancel itself.
|
||||
|
||||
This arranges for a CancelledError to be thrown into the
|
||||
wrapped coroutine on the next cycle through the event loop.
|
||||
The coroutine then has a chance to clean up or even deny
|
||||
the request using try/except/finally.
|
||||
|
||||
Unlike Future.cancel, this does not guarantee that the
|
||||
task will be cancelled: the exception might be caught and
|
||||
acted upon, delaying cancellation of the task or preventing
|
||||
cancellation completely. The task may also return a value or
|
||||
raise a different exception.
|
||||
|
||||
Immediately after this method is called, Task.cancelled() will
|
||||
not return True (unless the task was already cancelled). A
|
||||
task will be marked as cancelled when the wrapped coroutine
|
||||
terminates with a CancelledError exception (even if cancel()
|
||||
was not called).
|
||||
"""
|
||||
self._log_traceback = False
|
||||
if self.done():
|
||||
return False
|
||||
if self._fut_waiter is not None:
|
||||
if self._fut_waiter.cancel(msg=msg):
|
||||
# Leave self._fut_waiter; it may be a Task that
|
||||
# catches and ignores the cancellation so we may have
|
||||
# to cancel it again later.
|
||||
return True
|
||||
# It must be the case that self.__step is already scheduled.
|
||||
self._must_cancel = True
|
||||
self._cancel_message = msg
|
||||
return True
|
||||
|
||||
def __step(self, exc=None):
|
||||
if self.done():
|
||||
raise exceptions.InvalidStateError(
|
||||
f'_step(): already done: {self!r}, {exc!r}')
|
||||
if self._must_cancel:
|
||||
if not isinstance(exc, exceptions.CancelledError):
|
||||
exc = self._make_cancelled_error()
|
||||
self._must_cancel = False
|
||||
coro = self._coro
|
||||
self._fut_waiter = None
|
||||
|
||||
_enter_task(self._loop, self)
|
||||
# Call either coro.throw(exc) or coro.send(None).
|
||||
try:
|
||||
if exc is None:
|
||||
# We use the `send` method directly, because coroutines
|
||||
# don't have `__iter__` and `__next__` methods.
|
||||
result = coro.send(None)
|
||||
else:
|
||||
result = coro.throw(exc)
|
||||
except StopIteration as exc:
|
||||
if self._must_cancel:
|
||||
# Task is cancelled right before coro stops.
|
||||
self._must_cancel = False
|
||||
super().cancel(msg=self._cancel_message)
|
||||
else:
|
||||
super().set_result(exc.value)
|
||||
except exceptions.CancelledError as exc:
|
||||
# Save the original exception so we can chain it later.
|
||||
self._cancelled_exc = exc
|
||||
super().cancel() # I.e., Future.cancel(self).
|
||||
except (KeyboardInterrupt, SystemExit) as exc:
|
||||
super().set_exception(exc)
|
||||
raise
|
||||
except BaseException as exc:
|
||||
super().set_exception(exc)
|
||||
else:
|
||||
blocking = getattr(result, '_asyncio_future_blocking', None)
|
||||
if blocking is not None:
|
||||
# Yielded Future must come from Future.__iter__().
|
||||
if futures._get_loop(result) is not self._loop:
|
||||
new_exc = RuntimeError(
|
||||
f'Task {self!r} got Future '
|
||||
f'{result!r} attached to a different loop')
|
||||
self._loop.call_soon(
|
||||
self.__step, new_exc, context=self._context)
|
||||
elif blocking:
|
||||
if result is self:
|
||||
new_exc = RuntimeError(
|
||||
f'Task cannot await on itself: {self!r}')
|
||||
self._loop.call_soon(
|
||||
self.__step, new_exc, context=self._context)
|
||||
else:
|
||||
result._asyncio_future_blocking = False
|
||||
result.add_done_callback(
|
||||
self.__wakeup, context=self._context)
|
||||
self._fut_waiter = result
|
||||
if self._must_cancel:
|
||||
if self._fut_waiter.cancel(
|
||||
msg=self._cancel_message):
|
||||
self._must_cancel = False
|
||||
else:
|
||||
new_exc = RuntimeError(
|
||||
f'yield was used instead of yield from '
|
||||
f'in task {self!r} with {result!r}')
|
||||
self._loop.call_soon(
|
||||
self.__step, new_exc, context=self._context)
|
||||
|
||||
elif result is None:
|
||||
# Bare yield relinquishes control for one event loop iteration.
|
||||
self._loop.call_soon(self.__step, context=self._context)
|
||||
elif inspect.isgenerator(result):
|
||||
# Yielding a generator is just wrong.
|
||||
new_exc = RuntimeError(
|
||||
f'yield was used instead of yield from for '
|
||||
f'generator in task {self!r} with {result!r}')
|
||||
self._loop.call_soon(
|
||||
self.__step, new_exc, context=self._context)
|
||||
else:
|
||||
# Yielding something else is an error.
|
||||
new_exc = RuntimeError(f'Task got bad yield: {result!r}')
|
||||
self._loop.call_soon(
|
||||
self.__step, new_exc, context=self._context)
|
||||
finally:
|
||||
_leave_task(self._loop, self)
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
def __wakeup(self, future):
|
||||
try:
|
||||
future.result()
|
||||
except BaseException as exc:
|
||||
# This may also be a cancellation.
|
||||
self.__step(exc)
|
||||
else:
|
||||
# Don't pass the value of `future.result()` explicitly,
|
||||
# as `Future.__iter__` and `Future.__await__` don't need it.
|
||||
# If we call `_step(value, None)` instead of `_step()`,
|
||||
# Python eval loop would use `.send(value)` method call,
|
||||
# instead of `__next__()`, which is slower for futures
|
||||
# that return non-generator iterators from their `__iter__`.
|
||||
self.__step()
|
||||
self = None # Needed to break cycles when an exception occurs.
|
||||
|
||||
|
||||
_PyTask = Task
|
||||
|
||||
|
||||
try:
|
||||
import _asyncio
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# _CTask is needed for tests.
|
||||
Task = _CTask = _asyncio.Task
|
||||
|
||||
|
||||
def create_task(coro, *, name=None):
|
||||
"""Schedule the execution of a coroutine object in a spawn task.
|
||||
|
||||
Return a Task object.
|
||||
"""
|
||||
loop = events.get_running_loop()
|
||||
task = loop.create_task(coro)
|
||||
_set_task_name(task, name)
|
||||
return task
|
||||
|
||||
|
||||
# wait() and as_completed() similar to those in PEP 3148.
|
||||
|
||||
FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
|
||||
FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
|
||||
ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
|
||||
|
||||
|
||||
async def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
|
||||
"""Wait for the Futures and coroutines given by fs to complete.
|
||||
|
||||
The fs iterable must not be empty.
|
||||
|
||||
Coroutines will be wrapped in Tasks.
|
||||
|
||||
Returns two sets of Future: (done, pending).
|
||||
|
||||
Usage:
|
||||
|
||||
done, pending = await asyncio.wait(fs)
|
||||
|
||||
Note: This does not raise TimeoutError! Futures that aren't done
|
||||
when the timeout occurs are returned in the second set.
|
||||
"""
|
||||
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
|
||||
raise TypeError(f"expect a list of futures, not {type(fs).__name__}")
|
||||
if not fs:
|
||||
raise ValueError('Set of coroutines/Futures is empty.')
|
||||
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
|
||||
raise ValueError(f'Invalid return_when value: {return_when}')
|
||||
|
||||
if loop is None:
|
||||
loop = events.get_running_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
fs = set(fs)
|
||||
|
||||
if any(coroutines.iscoroutine(f) for f in fs):
|
||||
warnings.warn("The explicit passing of coroutine objects to "
|
||||
"asyncio.wait() is deprecated since Python 3.8, and "
|
||||
"scheduled for removal in Python 3.11.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
fs = {ensure_future(f, loop=loop) for f in fs}
|
||||
|
||||
return await _wait(fs, timeout, return_when, loop)
|
||||
|
||||
|
||||
def _release_waiter(waiter, *args):
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
|
||||
async def wait_for(fut, timeout, *, loop=None):
|
||||
"""Wait for the single Future or coroutine to complete, with timeout.
|
||||
|
||||
Coroutine will be wrapped in Task.
|
||||
|
||||
Returns result of the Future or coroutine. When a timeout occurs,
|
||||
it cancels the task and raises TimeoutError. To avoid the task
|
||||
cancellation, wrap it in shield().
|
||||
|
||||
If the wait is cancelled, the task is also cancelled.
|
||||
|
||||
This function is a coroutine.
|
||||
"""
|
||||
if loop is None:
|
||||
loop = events.get_running_loop()
|
||||
else:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
if timeout is None:
|
||||
return await fut
|
||||
|
||||
if timeout <= 0:
|
||||
fut = ensure_future(fut, loop=loop)
|
||||
|
||||
if fut.done():
|
||||
return fut.result()
|
||||
|
||||
await _cancel_and_wait(fut, loop=loop)
|
||||
try:
|
||||
return fut.result()
|
||||
except exceptions.CancelledError as exc:
|
||||
raise exceptions.TimeoutError() from exc
|
||||
|
||||
waiter = loop.create_future()
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
||||
cb = functools.partial(_release_waiter, waiter)
|
||||
|
||||
fut = ensure_future(fut, loop=loop)
|
||||
fut.add_done_callback(cb)
|
||||
|
||||
try:
|
||||
# wait until the future completes or the timeout
|
||||
try:
|
||||
await waiter
|
||||
except exceptions.CancelledError:
|
||||
if fut.done():
|
||||
return fut.result()
|
||||
else:
|
||||
fut.remove_done_callback(cb)
|
||||
# We must ensure that the task is not running
|
||||
# after wait_for() returns.
|
||||
# See https://bugs.python.org/issue32751
|
||||
await _cancel_and_wait(fut, loop=loop)
|
||||
raise
|
||||
|
||||
if fut.done():
|
||||
return fut.result()
|
||||
else:
|
||||
fut.remove_done_callback(cb)
|
||||
# We must ensure that the task is not running
|
||||
# after wait_for() returns.
|
||||
# See https://bugs.python.org/issue32751
|
||||
await _cancel_and_wait(fut, loop=loop)
|
||||
# In case task cancellation failed with some
|
||||
# exception, we should re-raise it
|
||||
# See https://bugs.python.org/issue40607
|
||||
try:
|
||||
return fut.result()
|
||||
except exceptions.CancelledError as exc:
|
||||
raise exceptions.TimeoutError() from exc
|
||||
finally:
|
||||
timeout_handle.cancel()
|
||||
|
||||
|
||||
async def _wait(fs, timeout, return_when, loop):
|
||||
"""Internal helper for wait().
|
||||
|
||||
The fs argument must be a collection of Futures.
|
||||
"""
|
||||
assert fs, 'Set of Futures is empty.'
|
||||
waiter = loop.create_future()
|
||||
timeout_handle = None
|
||||
if timeout is not None:
|
||||
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
|
||||
counter = len(fs)
|
||||
|
||||
def _on_completion(f):
|
||||
nonlocal counter
|
||||
counter -= 1
|
||||
if (counter <= 0 or
|
||||
return_when == FIRST_COMPLETED or
|
||||
return_when == FIRST_EXCEPTION and (not f.cancelled() and
|
||||
f.exception() is not None)):
|
||||
if timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
if not waiter.done():
|
||||
waiter.set_result(None)
|
||||
|
||||
for f in fs:
|
||||
f.add_done_callback(_on_completion)
|
||||
|
||||
try:
|
||||
await waiter
|
||||
finally:
|
||||
if timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
for f in fs:
|
||||
f.remove_done_callback(_on_completion)
|
||||
|
||||
done, pending = set(), set()
|
||||
for f in fs:
|
||||
if f.done():
|
||||
done.add(f)
|
||||
else:
|
||||
pending.add(f)
|
||||
return done, pending
|
||||
|
||||
|
||||
async def _cancel_and_wait(fut, loop):
|
||||
"""Cancel the *fut* future or task and wait until it completes."""
|
||||
|
||||
waiter = loop.create_future()
|
||||
cb = functools.partial(_release_waiter, waiter)
|
||||
fut.add_done_callback(cb)
|
||||
|
||||
try:
|
||||
fut.cancel()
|
||||
# We cannot wait on *fut* directly to make
|
||||
# sure _cancel_and_wait itself is reliably cancellable.
|
||||
await waiter
|
||||
finally:
|
||||
fut.remove_done_callback(cb)
|
||||
|
||||
|
||||
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
|
||||
def as_completed(fs, *, loop=None, timeout=None):
|
||||
"""Return an iterator whose values are coroutines.
|
||||
|
||||
When waiting for the yielded coroutines you'll get the results (or
|
||||
exceptions!) of the original Futures (or coroutines), in the order
|
||||
in which and as soon as they complete.
|
||||
|
||||
This differs from PEP 3148; the proper way to use this is:
|
||||
|
||||
for f in as_completed(fs):
|
||||
result = await f # The 'await' may raise.
|
||||
# Use result.
|
||||
|
||||
If a timeout is specified, the 'await' will raise
|
||||
TimeoutError when the timeout occurs before all Futures are done.
|
||||
|
||||
Note: The futures 'f' are not necessarily members of fs.
|
||||
"""
|
||||
if futures.isfuture(fs) or coroutines.iscoroutine(fs):
|
||||
raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}")
|
||||
|
||||
if loop is not None:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
from .queues import Queue # Import here to avoid circular import problem.
|
||||
done = Queue(loop=loop)
|
||||
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
todo = {ensure_future(f, loop=loop) for f in set(fs)}
|
||||
timeout_handle = None
|
||||
|
||||
def _on_timeout():
|
||||
for f in todo:
|
||||
f.remove_done_callback(_on_completion)
|
||||
done.put_nowait(None) # Queue a dummy value for _wait_for_one().
|
||||
todo.clear() # Can't do todo.remove(f) in the loop.
|
||||
|
||||
def _on_completion(f):
|
||||
if not todo:
|
||||
return # _on_timeout() was here first.
|
||||
todo.remove(f)
|
||||
done.put_nowait(f)
|
||||
if not todo and timeout_handle is not None:
|
||||
timeout_handle.cancel()
|
||||
|
||||
async def _wait_for_one():
|
||||
f = await done.get()
|
||||
if f is None:
|
||||
# Dummy value from _on_timeout().
|
||||
raise exceptions.TimeoutError
|
||||
return f.result() # May raise f.exception().
|
||||
|
||||
for f in todo:
|
||||
f.add_done_callback(_on_completion)
|
||||
if todo and timeout is not None:
|
||||
timeout_handle = loop.call_later(timeout, _on_timeout)
|
||||
for _ in range(len(todo)):
|
||||
yield _wait_for_one()
|
||||
|
||||
|
||||
@types.coroutine
|
||||
def __sleep0():
|
||||
"""Skip one event loop run cycle.
|
||||
|
||||
This is a private helper for 'asyncio.sleep()', used
|
||||
when the 'delay' is set to 0. It uses a bare 'yield'
|
||||
expression (which Task.__step knows how to handle)
|
||||
instead of creating a Future object.
|
||||
"""
|
||||
yield
|
||||
|
||||
|
||||
async def sleep(delay, result=None, *, loop=None):
|
||||
"""Coroutine that completes after a given time (in seconds)."""
|
||||
if loop is not None:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
if delay <= 0:
|
||||
await __sleep0()
|
||||
return result
|
||||
|
||||
if loop is None:
|
||||
loop = events.get_running_loop()
|
||||
|
||||
future = loop.create_future()
|
||||
h = loop.call_later(delay,
|
||||
futures._set_result_unless_cancelled,
|
||||
future, result)
|
||||
try:
|
||||
return await future
|
||||
finally:
|
||||
h.cancel()
|
||||
|
||||
|
||||
def ensure_future(coro_or_future, *, loop=None):
|
||||
"""Wrap a coroutine or an awaitable in a future.
|
||||
|
||||
If the argument is a Future, it is returned directly.
|
||||
"""
|
||||
if coroutines.iscoroutine(coro_or_future):
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
task = loop.create_task(coro_or_future)
|
||||
if task._source_traceback:
|
||||
del task._source_traceback[-1]
|
||||
return task
|
||||
elif futures.isfuture(coro_or_future):
|
||||
if loop is not None and loop is not futures._get_loop(coro_or_future):
|
||||
raise ValueError('The future belongs to a different loop than '
|
||||
'the one specified as the loop argument')
|
||||
return coro_or_future
|
||||
elif inspect.isawaitable(coro_or_future):
|
||||
return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
|
||||
else:
|
||||
raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
|
||||
'required')
|
||||
|
||||
|
||||
@types.coroutine
|
||||
def _wrap_awaitable(awaitable):
|
||||
"""Helper for asyncio.ensure_future().
|
||||
|
||||
Wraps awaitable (an object with __await__) into a coroutine
|
||||
that will later be wrapped in a Task by ensure_future().
|
||||
"""
|
||||
return (yield from awaitable.__await__())
|
||||
|
||||
_wrap_awaitable._is_coroutine = _is_coroutine
|
||||
|
||||
|
||||
class _GatheringFuture(futures.Future):
|
||||
"""Helper for gather().
|
||||
|
||||
This overrides cancel() to cancel all the children and act more
|
||||
like Task.cancel(), which doesn't immediately mark itself as
|
||||
cancelled.
|
||||
"""
|
||||
|
||||
def __init__(self, children, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
self._children = children
|
||||
self._cancel_requested = False
|
||||
|
||||
def cancel(self, msg=None):
|
||||
if self.done():
|
||||
return False
|
||||
ret = False
|
||||
for child in self._children:
|
||||
if child.cancel(msg=msg):
|
||||
ret = True
|
||||
if ret:
|
||||
# If any child tasks were actually cancelled, we should
|
||||
# propagate the cancellation request regardless of
|
||||
# *return_exceptions* argument. See issue 32684.
|
||||
self._cancel_requested = True
|
||||
return ret
|
||||
|
||||
|
||||
def gather(*coros_or_futures, loop=None, return_exceptions=False):
|
||||
"""Return a future aggregating results from the given coroutines/futures.
|
||||
|
||||
Coroutines will be wrapped in a future and scheduled in the event
|
||||
loop. They will not necessarily be scheduled in the same order as
|
||||
passed in.
|
||||
|
||||
All futures must share the same event loop. If all the tasks are
|
||||
done successfully, the returned future's result is the list of
|
||||
results (in the order of the original sequence, not necessarily
|
||||
the order of results arrival). If *return_exceptions* is True,
|
||||
exceptions in the tasks are treated the same as successful
|
||||
results, and gathered in the result list; otherwise, the first
|
||||
raised exception will be immediately propagated to the returned
|
||||
future.
|
||||
|
||||
Cancellation: if the outer Future is cancelled, all children (that
|
||||
have not completed yet) are also cancelled. If any child is
|
||||
cancelled, this is treated as if it raised CancelledError --
|
||||
the outer Future is *not* cancelled in this case. (This is to
|
||||
prevent the cancellation of one child to cause other children to
|
||||
be cancelled.)
|
||||
|
||||
If *return_exceptions* is False, cancelling gather() after it
|
||||
has been marked done won't cancel any submitted awaitables.
|
||||
For instance, gather can be marked done after propagating an
|
||||
exception to the caller, therefore, calling ``gather.cancel()``
|
||||
after catching an exception (raised by one of the awaitables) from
|
||||
gather won't cancel any other awaitables.
|
||||
"""
|
||||
if loop is not None:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
|
||||
return _gather(*coros_or_futures, loop=loop, return_exceptions=return_exceptions)
|
||||
|
||||
|
||||
def _gather(*coros_or_futures, loop=None, return_exceptions=False):
|
||||
if not coros_or_futures:
|
||||
if loop is None:
|
||||
loop = events.get_event_loop()
|
||||
outer = loop.create_future()
|
||||
outer.set_result([])
|
||||
return outer
|
||||
|
||||
def _done_callback(fut):
|
||||
nonlocal nfinished
|
||||
nfinished += 1
|
||||
|
||||
if outer.done():
|
||||
if not fut.cancelled():
|
||||
# Mark exception retrieved.
|
||||
fut.exception()
|
||||
return
|
||||
|
||||
if not return_exceptions:
|
||||
if fut.cancelled():
|
||||
# Check if 'fut' is cancelled first, as
|
||||
# 'fut.exception()' will *raise* a CancelledError
|
||||
# instead of returning it.
|
||||
exc = fut._make_cancelled_error()
|
||||
outer.set_exception(exc)
|
||||
return
|
||||
else:
|
||||
exc = fut.exception()
|
||||
if exc is not None:
|
||||
outer.set_exception(exc)
|
||||
return
|
||||
|
||||
if nfinished == nfuts:
|
||||
# All futures are done; create a list of results
|
||||
# and set it to the 'outer' future.
|
||||
results = []
|
||||
|
||||
for fut in children:
|
||||
if fut.cancelled():
|
||||
# Check if 'fut' is cancelled first, as 'fut.exception()'
|
||||
# will *raise* a CancelledError instead of returning it.
|
||||
# Also, since we're adding the exception return value
|
||||
# to 'results' instead of raising it, don't bother
|
||||
# setting __context__. This also lets us preserve
|
||||
# calling '_make_cancelled_error()' at most once.
|
||||
res = exceptions.CancelledError(
|
||||
'' if fut._cancel_message is None else
|
||||
fut._cancel_message)
|
||||
else:
|
||||
res = fut.exception()
|
||||
if res is None:
|
||||
res = fut.result()
|
||||
results.append(res)
|
||||
|
||||
if outer._cancel_requested:
|
||||
# If gather is being cancelled we must propagate the
|
||||
# cancellation regardless of *return_exceptions* argument.
|
||||
# See issue 32684.
|
||||
exc = fut._make_cancelled_error()
|
||||
outer.set_exception(exc)
|
||||
else:
|
||||
outer.set_result(results)
|
||||
|
||||
arg_to_fut = {}
|
||||
children = []
|
||||
nfuts = 0
|
||||
nfinished = 0
|
||||
for arg in coros_or_futures:
|
||||
if arg not in arg_to_fut:
|
||||
fut = ensure_future(arg, loop=loop)
|
||||
if loop is None:
|
||||
loop = futures._get_loop(fut)
|
||||
if fut is not arg:
|
||||
# 'arg' was not a Future, therefore, 'fut' is a new
|
||||
# Future created specifically for 'arg'. Since the caller
|
||||
# can't control it, disable the "destroy pending task"
|
||||
# warning.
|
||||
fut._log_destroy_pending = False
|
||||
|
||||
nfuts += 1
|
||||
arg_to_fut[arg] = fut
|
||||
fut.add_done_callback(_done_callback)
|
||||
|
||||
else:
|
||||
# There's a duplicate Future object in coros_or_futures.
|
||||
fut = arg_to_fut[arg]
|
||||
|
||||
children.append(fut)
|
||||
|
||||
outer = _GatheringFuture(children, loop=loop)
|
||||
return outer
|
||||
|
||||
|
||||
def shield(arg, *, loop=None):
|
||||
"""Wait for a future, shielding it from cancellation.
|
||||
|
||||
The statement
|
||||
|
||||
res = await shield(something())
|
||||
|
||||
is exactly equivalent to the statement
|
||||
|
||||
res = await something()
|
||||
|
||||
*except* that if the coroutine containing it is cancelled, the
|
||||
task running in something() is not cancelled. From the POV of
|
||||
something(), the cancellation did not happen. But its caller is
|
||||
still cancelled, so the yield-from expression still raises
|
||||
CancelledError. Note: If something() is cancelled by other means
|
||||
this will still cancel shield().
|
||||
|
||||
If you want to completely ignore cancellation (not recommended)
|
||||
you can combine shield() with a try/except clause, as follows:
|
||||
|
||||
try:
|
||||
res = await shield(something())
|
||||
except CancelledError:
|
||||
res = None
|
||||
"""
|
||||
if loop is not None:
|
||||
warnings.warn("The loop argument is deprecated since Python 3.8, "
|
||||
"and scheduled for removal in Python 3.10.",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
inner = ensure_future(arg, loop=loop)
|
||||
if inner.done():
|
||||
# Shortcut.
|
||||
return inner
|
||||
loop = futures._get_loop(inner)
|
||||
outer = loop.create_future()
|
||||
|
||||
def _inner_done_callback(inner):
|
||||
if outer.cancelled():
|
||||
if not inner.cancelled():
|
||||
# Mark inner's result as retrieved.
|
||||
inner.exception()
|
||||
return
|
||||
|
||||
if inner.cancelled():
|
||||
outer.cancel()
|
||||
else:
|
||||
exc = inner.exception()
|
||||
if exc is not None:
|
||||
outer.set_exception(exc)
|
||||
else:
|
||||
outer.set_result(inner.result())
|
||||
|
||||
|
||||
def _outer_done_callback(outer):
|
||||
if not inner.done():
|
||||
inner.remove_done_callback(_inner_done_callback)
|
||||
|
||||
inner.add_done_callback(_inner_done_callback)
|
||||
outer.add_done_callback(_outer_done_callback)
|
||||
return outer
|
||||
|
||||
|
||||
def run_coroutine_threadsafe(coro, loop):
|
||||
"""Submit a coroutine object to a given event loop.
|
||||
|
||||
Return a concurrent.futures.Future to access the result.
|
||||
"""
|
||||
if not coroutines.iscoroutine(coro):
|
||||
raise TypeError('A coroutine object is required')
|
||||
future = concurrent.futures.Future()
|
||||
|
||||
def callback():
|
||||
try:
|
||||
futures._chain_future(ensure_future(coro, loop=loop), future)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
if future.set_running_or_notify_cancel():
|
||||
future.set_exception(exc)
|
||||
raise
|
||||
|
||||
loop.call_soon_threadsafe(callback)
|
||||
return future
|
||||
|
||||
|
||||
# WeakSet containing all alive tasks.
|
||||
_all_tasks = weakref.WeakSet()
|
||||
|
||||
# Dictionary containing tasks that are currently active in
|
||||
# all running event loops. {EventLoop: Task}
|
||||
_current_tasks = {}
|
||||
|
||||
|
||||
def _register_task(task):
|
||||
"""Register a new task in asyncio as executed by loop."""
|
||||
_all_tasks.add(task)
|
||||
|
||||
|
||||
def _enter_task(loop, task):
|
||||
current_task = _current_tasks.get(loop)
|
||||
if current_task is not None:
|
||||
raise RuntimeError(f"Cannot enter into task {task!r} while another "
|
||||
f"task {current_task!r} is being executed.")
|
||||
_current_tasks[loop] = task
|
||||
|
||||
|
||||
def _leave_task(loop, task):
|
||||
current_task = _current_tasks.get(loop)
|
||||
if current_task is not task:
|
||||
raise RuntimeError(f"Leaving task {task!r} does not match "
|
||||
f"the current task {current_task!r}.")
|
||||
del _current_tasks[loop]
|
||||
|
||||
|
||||
def _unregister_task(task):
|
||||
"""Unregister a task."""
|
||||
_all_tasks.discard(task)
|
||||
|
||||
|
||||
_py_register_task = _register_task
|
||||
_py_unregister_task = _unregister_task
|
||||
_py_enter_task = _enter_task
|
||||
_py_leave_task = _leave_task
|
||||
|
||||
|
||||
try:
|
||||
from _asyncio import (_register_task, _unregister_task,
|
||||
_enter_task, _leave_task,
|
||||
_all_tasks, _current_tasks)
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
_c_register_task = _register_task
|
||||
_c_unregister_task = _unregister_task
|
||||
_c_enter_task = _enter_task
|
||||
_c_leave_task = _leave_task
|
25
Tool/Python39/Lib/asyncio/threads.py
Normal file
25
Tool/Python39/Lib/asyncio/threads.py
Normal file
@ -0,0 +1,25 @@
|
||||
"""High-level support for working with threads in asyncio"""
|
||||
|
||||
import functools
|
||||
import contextvars
|
||||
|
||||
from . import events
|
||||
|
||||
|
||||
__all__ = "to_thread",
|
||||
|
||||
|
||||
async def to_thread(func, /, *args, **kwargs):
|
||||
"""Asynchronously run function *func* in a separate thread.
|
||||
|
||||
Any *args and **kwargs supplied for this function are directly passed
|
||||
to *func*. Also, the current :class:`contextvars.Context` is propagated,
|
||||
allowing context variables from the main thread to be accessed in the
|
||||
separate thread.
|
||||
|
||||
Return a coroutine that can be awaited to get the eventual result of *func*.
|
||||
"""
|
||||
loop = events.get_running_loop()
|
||||
ctx = contextvars.copy_context()
|
||||
func_call = functools.partial(ctx.run, func, *args, **kwargs)
|
||||
return await loop.run_in_executor(None, func_call)
|
329
Tool/Python39/Lib/asyncio/transports.py
Normal file
329
Tool/Python39/Lib/asyncio/transports.py
Normal file
@ -0,0 +1,329 @@
|
||||
"""Abstract Transport class."""
|
||||
|
||||
__all__ = (
|
||||
'BaseTransport', 'ReadTransport', 'WriteTransport',
|
||||
'Transport', 'DatagramTransport', 'SubprocessTransport',
|
||||
)
|
||||
|
||||
|
||||
class BaseTransport:
|
||||
"""Base class for transports."""
|
||||
|
||||
__slots__ = ('_extra',)
|
||||
|
||||
def __init__(self, extra=None):
|
||||
if extra is None:
|
||||
extra = {}
|
||||
self._extra = extra
|
||||
|
||||
def get_extra_info(self, name, default=None):
|
||||
"""Get optional transport information."""
|
||||
return self._extra.get(name, default)
|
||||
|
||||
def is_closing(self):
|
||||
"""Return True if the transport is closing or closed."""
|
||||
raise NotImplementedError
|
||||
|
||||
def close(self):
|
||||
"""Close the transport.
|
||||
|
||||
Buffered data will be flushed asynchronously. No more data
|
||||
will be received. After all buffered data is flushed, the
|
||||
protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_protocol(self, protocol):
|
||||
"""Set a new protocol."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_protocol(self):
|
||||
"""Return the current protocol."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ReadTransport(BaseTransport):
|
||||
"""Interface for read-only transports."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def is_reading(self):
|
||||
"""Return True if the transport is receiving."""
|
||||
raise NotImplementedError
|
||||
|
||||
def pause_reading(self):
|
||||
"""Pause the receiving end.
|
||||
|
||||
No data will be passed to the protocol's data_received()
|
||||
method until resume_reading() is called.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def resume_reading(self):
|
||||
"""Resume the receiving end.
|
||||
|
||||
Data received will once again be passed to the protocol's
|
||||
data_received() method.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class WriteTransport(BaseTransport):
|
||||
"""Interface for write-only transports."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
"""Set the high- and low-water limits for write flow control.
|
||||
|
||||
These two values control when to call the protocol's
|
||||
pause_writing() and resume_writing() methods. If specified,
|
||||
the low-water limit must be less than or equal to the
|
||||
high-water limit. Neither value can be negative.
|
||||
|
||||
The defaults are implementation-specific. If only the
|
||||
high-water limit is given, the low-water limit defaults to an
|
||||
implementation-specific value less than or equal to the
|
||||
high-water limit. Setting high to zero forces low to zero as
|
||||
well, and causes pause_writing() to be called whenever the
|
||||
buffer becomes non-empty. Setting low to zero causes
|
||||
resume_writing() to be called only once the buffer is empty.
|
||||
Use of zero for either limit is generally sub-optimal as it
|
||||
reduces opportunities for doing I/O and computation
|
||||
concurrently.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
"""Return the current size of the write buffer."""
|
||||
raise NotImplementedError
|
||||
|
||||
def write(self, data):
|
||||
"""Write some data bytes to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def writelines(self, list_of_data):
|
||||
"""Write a list (or any iterable) of data bytes to the transport.
|
||||
|
||||
The default implementation concatenates the arguments and
|
||||
calls write() on the result.
|
||||
"""
|
||||
data = b''.join(list_of_data)
|
||||
self.write(data)
|
||||
|
||||
def write_eof(self):
|
||||
"""Close the write end after flushing buffered data.
|
||||
|
||||
(This is like typing ^D into a UNIX program reading from stdin.)
|
||||
|
||||
Data may still be received.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def can_write_eof(self):
|
||||
"""Return True if this transport supports write_eof(), False if not."""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Transport(ReadTransport, WriteTransport):
|
||||
"""Interface representing a bidirectional transport.
|
||||
|
||||
There may be several implementations, but typically, the user does
|
||||
not implement new transports; rather, the platform provides some
|
||||
useful transports that are implemented using the platform's best
|
||||
practices.
|
||||
|
||||
The user never instantiates a transport directly; they call a
|
||||
utility function, passing it a protocol factory and other
|
||||
information necessary to create the transport and protocol. (E.g.
|
||||
EventLoop.create_connection() or EventLoop.create_server().)
|
||||
|
||||
The utility function will asynchronously create a transport and a
|
||||
protocol and hook them up by calling the protocol's
|
||||
connection_made() method, passing it the transport.
|
||||
|
||||
The implementation here raises NotImplemented for every method
|
||||
except writelines(), which calls write() in a loop.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class DatagramTransport(BaseTransport):
|
||||
"""Interface for datagram (UDP) transports."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def sendto(self, data, addr=None):
|
||||
"""Send data to the transport.
|
||||
|
||||
This does not block; it buffers the data and arranges for it
|
||||
to be sent out asynchronously.
|
||||
addr is target socket address.
|
||||
If addr is None use target address pointed on transport creation.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def abort(self):
|
||||
"""Close the transport immediately.
|
||||
|
||||
Buffered data will be lost. No more data will be received.
|
||||
The protocol's connection_lost() method will (eventually) be
|
||||
called with None as its argument.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SubprocessTransport(BaseTransport):
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def get_pid(self):
|
||||
"""Get subprocess id."""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_returncode(self):
|
||||
"""Get subprocess returncode.
|
||||
|
||||
See also
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def get_pipe_transport(self, fd):
|
||||
"""Get transport for pipe with number fd."""
|
||||
raise NotImplementedError
|
||||
|
||||
def send_signal(self, signal):
|
||||
"""Send signal to subprocess.
|
||||
|
||||
See also:
|
||||
docs.python.org/3/library/subprocess#subprocess.Popen.send_signal
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def terminate(self):
|
||||
"""Stop the subprocess.
|
||||
|
||||
Alias for close() method.
|
||||
|
||||
On Posix OSs the method sends SIGTERM to the subprocess.
|
||||
On Windows the Win32 API function TerminateProcess()
|
||||
is called to stop the subprocess.
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def kill(self):
|
||||
"""Kill the subprocess.
|
||||
|
||||
On Posix OSs the function sends SIGKILL to the subprocess.
|
||||
On Windows kill() is an alias for terminate().
|
||||
|
||||
See also:
|
||||
http://docs.python.org/3/library/subprocess#subprocess.Popen.kill
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _FlowControlMixin(Transport):
|
||||
"""All the logic for (write) flow control in a mix-in base class.
|
||||
|
||||
The subclass must implement get_write_buffer_size(). It must call
|
||||
_maybe_pause_protocol() whenever the write buffer size increases,
|
||||
and _maybe_resume_protocol() whenever it decreases. It may also
|
||||
override set_write_buffer_limits() (e.g. to specify different
|
||||
defaults).
|
||||
|
||||
The subclass constructor must call super().__init__(extra). This
|
||||
will call set_write_buffer_limits().
|
||||
|
||||
The user may call set_write_buffer_limits() and
|
||||
get_write_buffer_size(), and their protocol's pause_writing() and
|
||||
resume_writing() may be called.
|
||||
"""
|
||||
|
||||
__slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water')
|
||||
|
||||
def __init__(self, extra=None, loop=None):
|
||||
super().__init__(extra)
|
||||
assert loop is not None
|
||||
self._loop = loop
|
||||
self._protocol_paused = False
|
||||
self._set_write_buffer_limits()
|
||||
|
||||
def _maybe_pause_protocol(self):
|
||||
size = self.get_write_buffer_size()
|
||||
if size <= self._high_water:
|
||||
return
|
||||
if not self._protocol_paused:
|
||||
self._protocol_paused = True
|
||||
try:
|
||||
self._protocol.pause_writing()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.pause_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def _maybe_resume_protocol(self):
|
||||
if (self._protocol_paused and
|
||||
self.get_write_buffer_size() <= self._low_water):
|
||||
self._protocol_paused = False
|
||||
try:
|
||||
self._protocol.resume_writing()
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
self._loop.call_exception_handler({
|
||||
'message': 'protocol.resume_writing() failed',
|
||||
'exception': exc,
|
||||
'transport': self,
|
||||
'protocol': self._protocol,
|
||||
})
|
||||
|
||||
def get_write_buffer_limits(self):
|
||||
return (self._low_water, self._high_water)
|
||||
|
||||
def _set_write_buffer_limits(self, high=None, low=None):
|
||||
if high is None:
|
||||
if low is None:
|
||||
high = 64 * 1024
|
||||
else:
|
||||
high = 4 * low
|
||||
if low is None:
|
||||
low = high // 4
|
||||
|
||||
if not high >= low >= 0:
|
||||
raise ValueError(
|
||||
f'high ({high!r}) must be >= low ({low!r}) must be >= 0')
|
||||
|
||||
self._high_water = high
|
||||
self._low_water = low
|
||||
|
||||
def set_write_buffer_limits(self, high=None, low=None):
|
||||
self._set_write_buffer_limits(high=high, low=low)
|
||||
self._maybe_pause_protocol()
|
||||
|
||||
def get_write_buffer_size(self):
|
||||
raise NotImplementedError
|
206
Tool/Python39/Lib/asyncio/trsock.py
Normal file
206
Tool/Python39/Lib/asyncio/trsock.py
Normal file
@ -0,0 +1,206 @@
|
||||
import socket
|
||||
import warnings
|
||||
|
||||
|
||||
class TransportSocket:
|
||||
|
||||
"""A socket-like wrapper for exposing real transport sockets.
|
||||
|
||||
These objects can be safely returned by APIs like
|
||||
`transport.get_extra_info('socket')`. All potentially disruptive
|
||||
operations (like "socket.close()") are banned.
|
||||
"""
|
||||
|
||||
__slots__ = ('_sock',)
|
||||
|
||||
def __init__(self, sock: socket.socket):
|
||||
self._sock = sock
|
||||
|
||||
def _na(self, what):
|
||||
warnings.warn(
|
||||
f"Using {what} on sockets returned from get_extra_info('socket') "
|
||||
f"will be prohibited in asyncio 3.9. Please report your use case "
|
||||
f"to bugs.python.org.",
|
||||
DeprecationWarning, source=self)
|
||||
|
||||
@property
|
||||
def family(self):
|
||||
return self._sock.family
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self._sock.type
|
||||
|
||||
@property
|
||||
def proto(self):
|
||||
return self._sock.proto
|
||||
|
||||
def __repr__(self):
|
||||
s = (
|
||||
f"<asyncio.TransportSocket fd={self.fileno()}, "
|
||||
f"family={self.family!s}, type={self.type!s}, "
|
||||
f"proto={self.proto}"
|
||||
)
|
||||
|
||||
if self.fileno() != -1:
|
||||
try:
|
||||
laddr = self.getsockname()
|
||||
if laddr:
|
||||
s = f"{s}, laddr={laddr}"
|
||||
except socket.error:
|
||||
pass
|
||||
try:
|
||||
raddr = self.getpeername()
|
||||
if raddr:
|
||||
s = f"{s}, raddr={raddr}"
|
||||
except socket.error:
|
||||
pass
|
||||
|
||||
return f"{s}>"
|
||||
|
||||
def __getstate__(self):
|
||||
raise TypeError("Cannot serialize asyncio.TransportSocket object")
|
||||
|
||||
def fileno(self):
|
||||
return self._sock.fileno()
|
||||
|
||||
def dup(self):
|
||||
return self._sock.dup()
|
||||
|
||||
def get_inheritable(self):
|
||||
return self._sock.get_inheritable()
|
||||
|
||||
def shutdown(self, how):
|
||||
# asyncio doesn't currently provide a high-level transport API
|
||||
# to shutdown the connection.
|
||||
self._sock.shutdown(how)
|
||||
|
||||
def getsockopt(self, *args, **kwargs):
|
||||
return self._sock.getsockopt(*args, **kwargs)
|
||||
|
||||
def setsockopt(self, *args, **kwargs):
|
||||
self._sock.setsockopt(*args, **kwargs)
|
||||
|
||||
def getpeername(self):
|
||||
return self._sock.getpeername()
|
||||
|
||||
def getsockname(self):
|
||||
return self._sock.getsockname()
|
||||
|
||||
def getsockbyname(self):
|
||||
return self._sock.getsockbyname()
|
||||
|
||||
def accept(self):
|
||||
self._na('accept() method')
|
||||
return self._sock.accept()
|
||||
|
||||
def connect(self, *args, **kwargs):
|
||||
self._na('connect() method')
|
||||
return self._sock.connect(*args, **kwargs)
|
||||
|
||||
def connect_ex(self, *args, **kwargs):
|
||||
self._na('connect_ex() method')
|
||||
return self._sock.connect_ex(*args, **kwargs)
|
||||
|
||||
def bind(self, *args, **kwargs):
|
||||
self._na('bind() method')
|
||||
return self._sock.bind(*args, **kwargs)
|
||||
|
||||
def ioctl(self, *args, **kwargs):
|
||||
self._na('ioctl() method')
|
||||
return self._sock.ioctl(*args, **kwargs)
|
||||
|
||||
def listen(self, *args, **kwargs):
|
||||
self._na('listen() method')
|
||||
return self._sock.listen(*args, **kwargs)
|
||||
|
||||
def makefile(self):
|
||||
self._na('makefile() method')
|
||||
return self._sock.makefile()
|
||||
|
||||
def sendfile(self, *args, **kwargs):
|
||||
self._na('sendfile() method')
|
||||
return self._sock.sendfile(*args, **kwargs)
|
||||
|
||||
def close(self):
|
||||
self._na('close() method')
|
||||
return self._sock.close()
|
||||
|
||||
def detach(self):
|
||||
self._na('detach() method')
|
||||
return self._sock.detach()
|
||||
|
||||
def sendmsg_afalg(self, *args, **kwargs):
|
||||
self._na('sendmsg_afalg() method')
|
||||
return self._sock.sendmsg_afalg(*args, **kwargs)
|
||||
|
||||
def sendmsg(self, *args, **kwargs):
|
||||
self._na('sendmsg() method')
|
||||
return self._sock.sendmsg(*args, **kwargs)
|
||||
|
||||
def sendto(self, *args, **kwargs):
|
||||
self._na('sendto() method')
|
||||
return self._sock.sendto(*args, **kwargs)
|
||||
|
||||
def send(self, *args, **kwargs):
|
||||
self._na('send() method')
|
||||
return self._sock.send(*args, **kwargs)
|
||||
|
||||
def sendall(self, *args, **kwargs):
|
||||
self._na('sendall() method')
|
||||
return self._sock.sendall(*args, **kwargs)
|
||||
|
||||
def set_inheritable(self, *args, **kwargs):
|
||||
self._na('set_inheritable() method')
|
||||
return self._sock.set_inheritable(*args, **kwargs)
|
||||
|
||||
def share(self, process_id):
|
||||
self._na('share() method')
|
||||
return self._sock.share(process_id)
|
||||
|
||||
def recv_into(self, *args, **kwargs):
|
||||
self._na('recv_into() method')
|
||||
return self._sock.recv_into(*args, **kwargs)
|
||||
|
||||
def recvfrom_into(self, *args, **kwargs):
|
||||
self._na('recvfrom_into() method')
|
||||
return self._sock.recvfrom_into(*args, **kwargs)
|
||||
|
||||
def recvmsg_into(self, *args, **kwargs):
|
||||
self._na('recvmsg_into() method')
|
||||
return self._sock.recvmsg_into(*args, **kwargs)
|
||||
|
||||
def recvmsg(self, *args, **kwargs):
|
||||
self._na('recvmsg() method')
|
||||
return self._sock.recvmsg(*args, **kwargs)
|
||||
|
||||
def recvfrom(self, *args, **kwargs):
|
||||
self._na('recvfrom() method')
|
||||
return self._sock.recvfrom(*args, **kwargs)
|
||||
|
||||
def recv(self, *args, **kwargs):
|
||||
self._na('recv() method')
|
||||
return self._sock.recv(*args, **kwargs)
|
||||
|
||||
def settimeout(self, value):
|
||||
if value == 0:
|
||||
return
|
||||
raise ValueError(
|
||||
'settimeout(): only 0 timeout is allowed on transport sockets')
|
||||
|
||||
def gettimeout(self):
|
||||
return 0
|
||||
|
||||
def setblocking(self, flag):
|
||||
if not flag:
|
||||
return
|
||||
raise ValueError(
|
||||
'setblocking(): transport sockets cannot be blocking')
|
||||
|
||||
def __enter__(self):
|
||||
self._na('context manager protocol')
|
||||
return self._sock.__enter__()
|
||||
|
||||
def __exit__(self, *err):
|
||||
self._na('context manager protocol')
|
||||
return self._sock.__exit__(*err)
|
1470
Tool/Python39/Lib/asyncio/unix_events.py
Normal file
1470
Tool/Python39/Lib/asyncio/unix_events.py
Normal file
File diff suppressed because it is too large
Load Diff
913
Tool/Python39/Lib/asyncio/windows_events.py
Normal file
913
Tool/Python39/Lib/asyncio/windows_events.py
Normal file
@ -0,0 +1,913 @@
|
||||
"""Selector and proactor event loops for Windows."""
|
||||
|
||||
import sys
|
||||
|
||||
if sys.platform != 'win32': # pragma: no cover
|
||||
raise ImportError('win32 only')
|
||||
|
||||
import _overlapped
|
||||
import _winapi
|
||||
import errno
|
||||
import math
|
||||
import msvcrt
|
||||
import socket
|
||||
import struct
|
||||
import time
|
||||
import weakref
|
||||
|
||||
from . import events
|
||||
from . import base_subprocess
|
||||
from . import futures
|
||||
from . import exceptions
|
||||
from . import proactor_events
|
||||
from . import selector_events
|
||||
from . import tasks
|
||||
from . import windows_utils
|
||||
from .log import logger
|
||||
|
||||
|
||||
__all__ = (
|
||||
'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
|
||||
'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy',
|
||||
'WindowsProactorEventLoopPolicy',
|
||||
)
|
||||
|
||||
|
||||
NULL = 0
|
||||
INFINITE = 0xffffffff
|
||||
ERROR_CONNECTION_REFUSED = 1225
|
||||
ERROR_CONNECTION_ABORTED = 1236
|
||||
|
||||
# Initial delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_INIT_DELAY = 0.001
|
||||
|
||||
# Maximum delay in seconds for connect_pipe() before retrying to connect
|
||||
CONNECT_PIPE_MAX_DELAY = 0.100
|
||||
|
||||
|
||||
class _OverlappedFuture(futures.Future):
|
||||
"""Subclass of Future which represents an overlapped operation.
|
||||
|
||||
Cancelling it will immediately cancel the overlapped operation.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
self._ov = ov
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
if self._ov is not None:
|
||||
state = 'pending' if self._ov.pending else 'completed'
|
||||
info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>')
|
||||
return info
|
||||
|
||||
def _cancel_overlapped(self):
|
||||
if self._ov is None:
|
||||
return
|
||||
try:
|
||||
self._ov.cancel()
|
||||
except OSError as exc:
|
||||
context = {
|
||||
'message': 'Cancelling an overlapped future failed',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
self._ov = None
|
||||
|
||||
def cancel(self, msg=None):
|
||||
self._cancel_overlapped()
|
||||
return super().cancel(msg=msg)
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
self._cancel_overlapped()
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
self._ov = None
|
||||
|
||||
|
||||
class _BaseWaitHandleFuture(futures.Future):
|
||||
"""Subclass of Future which represents a wait handle."""
|
||||
|
||||
def __init__(self, ov, handle, wait_handle, *, loop=None):
|
||||
super().__init__(loop=loop)
|
||||
if self._source_traceback:
|
||||
del self._source_traceback[-1]
|
||||
# Keep a reference to the Overlapped object to keep it alive until the
|
||||
# wait is unregistered
|
||||
self._ov = ov
|
||||
self._handle = handle
|
||||
self._wait_handle = wait_handle
|
||||
|
||||
# Should we call UnregisterWaitEx() if the wait completes
|
||||
# or is cancelled?
|
||||
self._registered = True
|
||||
|
||||
def _poll(self):
|
||||
# non-blocking wait: use a timeout of 0 millisecond
|
||||
return (_winapi.WaitForSingleObject(self._handle, 0) ==
|
||||
_winapi.WAIT_OBJECT_0)
|
||||
|
||||
def _repr_info(self):
|
||||
info = super()._repr_info()
|
||||
info.append(f'handle={self._handle:#x}')
|
||||
if self._handle is not None:
|
||||
state = 'signaled' if self._poll() else 'waiting'
|
||||
info.append(state)
|
||||
if self._wait_handle is not None:
|
||||
info.append(f'wait_handle={self._wait_handle:#x}')
|
||||
return info
|
||||
|
||||
def _unregister_wait_cb(self, fut):
|
||||
# The wait was unregistered: it's not safe to destroy the Overlapped
|
||||
# object
|
||||
self._ov = None
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWait(wait_handle)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING means that the unregister is pending
|
||||
|
||||
self._unregister_wait_cb(None)
|
||||
|
||||
def cancel(self, msg=None):
|
||||
self._unregister_wait()
|
||||
return super().cancel(msg=msg)
|
||||
|
||||
def set_exception(self, exception):
|
||||
self._unregister_wait()
|
||||
super().set_exception(exception)
|
||||
|
||||
def set_result(self, result):
|
||||
self._unregister_wait()
|
||||
super().set_result(result)
|
||||
|
||||
|
||||
class _WaitCancelFuture(_BaseWaitHandleFuture):
|
||||
"""Subclass of Future which represents a wait for the cancellation of a
|
||||
_WaitHandleFuture using an event.
|
||||
"""
|
||||
|
||||
def __init__(self, ov, event, wait_handle, *, loop=None):
|
||||
super().__init__(ov, event, wait_handle, loop=loop)
|
||||
|
||||
self._done_callback = None
|
||||
|
||||
def cancel(self):
|
||||
raise RuntimeError("_WaitCancelFuture must not be cancelled")
|
||||
|
||||
def set_result(self, result):
|
||||
super().set_result(result)
|
||||
if self._done_callback is not None:
|
||||
self._done_callback(self)
|
||||
|
||||
def set_exception(self, exception):
|
||||
super().set_exception(exception)
|
||||
if self._done_callback is not None:
|
||||
self._done_callback(self)
|
||||
|
||||
|
||||
class _WaitHandleFuture(_BaseWaitHandleFuture):
|
||||
def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
|
||||
super().__init__(ov, handle, wait_handle, loop=loop)
|
||||
self._proactor = proactor
|
||||
self._unregister_proactor = True
|
||||
self._event = _overlapped.CreateEvent(None, True, False, None)
|
||||
self._event_fut = None
|
||||
|
||||
def _unregister_wait_cb(self, fut):
|
||||
if self._event is not None:
|
||||
_winapi.CloseHandle(self._event)
|
||||
self._event = None
|
||||
self._event_fut = None
|
||||
|
||||
# If the wait was cancelled, the wait may never be signalled, so
|
||||
# it's required to unregister it. Otherwise, IocpProactor.close() will
|
||||
# wait forever for an event which will never come.
|
||||
#
|
||||
# If the IocpProactor already received the event, it's safe to call
|
||||
# _unregister() because we kept a reference to the Overlapped object
|
||||
# which is used as a unique key.
|
||||
self._proactor._unregister(self._ov)
|
||||
self._proactor = None
|
||||
|
||||
super()._unregister_wait_cb(fut)
|
||||
|
||||
def _unregister_wait(self):
|
||||
if not self._registered:
|
||||
return
|
||||
self._registered = False
|
||||
|
||||
wait_handle = self._wait_handle
|
||||
self._wait_handle = None
|
||||
try:
|
||||
_overlapped.UnregisterWaitEx(wait_handle, self._event)
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_IO_PENDING:
|
||||
context = {
|
||||
'message': 'Failed to unregister the wait handle',
|
||||
'exception': exc,
|
||||
'future': self,
|
||||
}
|
||||
if self._source_traceback:
|
||||
context['source_traceback'] = self._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
return
|
||||
# ERROR_IO_PENDING is not an error, the wait was unregistered
|
||||
|
||||
self._event_fut = self._proactor._wait_cancel(self._event,
|
||||
self._unregister_wait_cb)
|
||||
|
||||
|
||||
class PipeServer(object):
|
||||
"""Class representing a pipe server.
|
||||
|
||||
This is much like a bound, listening socket.
|
||||
"""
|
||||
def __init__(self, address):
|
||||
self._address = address
|
||||
self._free_instances = weakref.WeakSet()
|
||||
# initialize the pipe attribute before calling _server_pipe_handle()
|
||||
# because this function can raise an exception and the destructor calls
|
||||
# the close() method
|
||||
self._pipe = None
|
||||
self._accept_pipe_future = None
|
||||
self._pipe = self._server_pipe_handle(True)
|
||||
|
||||
def _get_unconnected_pipe(self):
|
||||
# Create new instance and return previous one. This ensures
|
||||
# that (until the server is closed) there is always at least
|
||||
# one pipe handle for address. Therefore if a client attempt
|
||||
# to connect it will not fail with FileNotFoundError.
|
||||
tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
|
||||
return tmp
|
||||
|
||||
def _server_pipe_handle(self, first):
|
||||
# Return a wrapper for a new pipe handle.
|
||||
if self.closed():
|
||||
return None
|
||||
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
|
||||
if first:
|
||||
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
|
||||
h = _winapi.CreateNamedPipe(
|
||||
self._address, flags,
|
||||
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
|
||||
_winapi.PIPE_WAIT,
|
||||
_winapi.PIPE_UNLIMITED_INSTANCES,
|
||||
windows_utils.BUFSIZE, windows_utils.BUFSIZE,
|
||||
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
|
||||
pipe = windows_utils.PipeHandle(h)
|
||||
self._free_instances.add(pipe)
|
||||
return pipe
|
||||
|
||||
def closed(self):
|
||||
return (self._address is None)
|
||||
|
||||
def close(self):
|
||||
if self._accept_pipe_future is not None:
|
||||
self._accept_pipe_future.cancel()
|
||||
self._accept_pipe_future = None
|
||||
# Close all instances which have not been connected to by a client.
|
||||
if self._address is not None:
|
||||
for pipe in self._free_instances:
|
||||
pipe.close()
|
||||
self._pipe = None
|
||||
self._address = None
|
||||
self._free_instances.clear()
|
||||
|
||||
__del__ = close
|
||||
|
||||
|
||||
class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
|
||||
"""Windows version of selector event loop."""
|
||||
|
||||
|
||||
class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
|
||||
"""Windows version of proactor event loop using IOCP."""
|
||||
|
||||
def __init__(self, proactor=None):
|
||||
if proactor is None:
|
||||
proactor = IocpProactor()
|
||||
super().__init__(proactor)
|
||||
|
||||
def run_forever(self):
|
||||
try:
|
||||
assert self._self_reading_future is None
|
||||
self.call_soon(self._loop_self_reading)
|
||||
super().run_forever()
|
||||
finally:
|
||||
if self._self_reading_future is not None:
|
||||
ov = self._self_reading_future._ov
|
||||
self._self_reading_future.cancel()
|
||||
# self_reading_future was just cancelled so if it hasn't been
|
||||
# finished yet, it never will be (it's possible that it has
|
||||
# already finished and its callback is waiting in the queue,
|
||||
# where it could still happen if the event loop is restarted).
|
||||
# Unregister it otherwise IocpProactor.close will wait for it
|
||||
# forever
|
||||
if ov is not None:
|
||||
self._proactor._unregister(ov)
|
||||
self._self_reading_future = None
|
||||
|
||||
async def create_pipe_connection(self, protocol_factory, address):
|
||||
f = self._proactor.connect_pipe(address)
|
||||
pipe = await f
|
||||
protocol = protocol_factory()
|
||||
trans = self._make_duplex_pipe_transport(pipe, protocol,
|
||||
extra={'addr': address})
|
||||
return trans, protocol
|
||||
|
||||
async def start_serving_pipe(self, protocol_factory, address):
|
||||
server = PipeServer(address)
|
||||
|
||||
def loop_accept_pipe(f=None):
|
||||
pipe = None
|
||||
try:
|
||||
if f:
|
||||
pipe = f.result()
|
||||
server._free_instances.discard(pipe)
|
||||
|
||||
if server.closed():
|
||||
# A client connected before the server was closed:
|
||||
# drop the client (close the pipe) and exit
|
||||
pipe.close()
|
||||
return
|
||||
|
||||
protocol = protocol_factory()
|
||||
self._make_duplex_pipe_transport(
|
||||
pipe, protocol, extra={'addr': address})
|
||||
|
||||
pipe = server._get_unconnected_pipe()
|
||||
if pipe is None:
|
||||
return
|
||||
|
||||
f = self._proactor.accept_pipe(pipe)
|
||||
except OSError as exc:
|
||||
if pipe and pipe.fileno() != -1:
|
||||
self.call_exception_handler({
|
||||
'message': 'Pipe accept failed',
|
||||
'exception': exc,
|
||||
'pipe': pipe,
|
||||
})
|
||||
pipe.close()
|
||||
elif self._debug:
|
||||
logger.warning("Accept pipe failed on pipe %r",
|
||||
pipe, exc_info=True)
|
||||
except exceptions.CancelledError:
|
||||
if pipe:
|
||||
pipe.close()
|
||||
else:
|
||||
server._accept_pipe_future = f
|
||||
f.add_done_callback(loop_accept_pipe)
|
||||
|
||||
self.call_soon(loop_accept_pipe)
|
||||
return [server]
|
||||
|
||||
async def _make_subprocess_transport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
extra=None, **kwargs):
|
||||
waiter = self.create_future()
|
||||
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
|
||||
stdin, stdout, stderr, bufsize,
|
||||
waiter=waiter, extra=extra,
|
||||
**kwargs)
|
||||
try:
|
||||
await waiter
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
raise
|
||||
except BaseException:
|
||||
transp.close()
|
||||
await transp._wait()
|
||||
raise
|
||||
|
||||
return transp
|
||||
|
||||
|
||||
class IocpProactor:
|
||||
"""Proactor implementation using IOCP."""
|
||||
|
||||
def __init__(self, concurrency=0xffffffff):
|
||||
self._loop = None
|
||||
self._results = []
|
||||
self._iocp = _overlapped.CreateIoCompletionPort(
|
||||
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
|
||||
self._cache = {}
|
||||
self._registered = weakref.WeakSet()
|
||||
self._unregistered = []
|
||||
self._stopped_serving = weakref.WeakSet()
|
||||
|
||||
def _check_closed(self):
|
||||
if self._iocp is None:
|
||||
raise RuntimeError('IocpProactor is closed')
|
||||
|
||||
def __repr__(self):
|
||||
info = ['overlapped#=%s' % len(self._cache),
|
||||
'result#=%s' % len(self._results)]
|
||||
if self._iocp is None:
|
||||
info.append('closed')
|
||||
return '<%s %s>' % (self.__class__.__name__, " ".join(info))
|
||||
|
||||
def set_loop(self, loop):
|
||||
self._loop = loop
|
||||
|
||||
def select(self, timeout=None):
|
||||
if not self._results:
|
||||
self._poll(timeout)
|
||||
tmp = self._results
|
||||
self._results = []
|
||||
return tmp
|
||||
|
||||
def _result(self, value):
|
||||
fut = self._loop.create_future()
|
||||
fut.set_result(value)
|
||||
return fut
|
||||
|
||||
def recv(self, conn, nbytes, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSARecv(conn.fileno(), nbytes, flags)
|
||||
else:
|
||||
ov.ReadFile(conn.fileno(), nbytes)
|
||||
except BrokenPipeError:
|
||||
return self._result(b'')
|
||||
|
||||
def finish_recv(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
||||
_overlapped.ERROR_OPERATION_ABORTED):
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_recv)
|
||||
|
||||
def recv_into(self, conn, buf, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSARecvInto(conn.fileno(), buf, flags)
|
||||
else:
|
||||
ov.ReadFileInto(conn.fileno(), buf)
|
||||
except BrokenPipeError:
|
||||
return self._result(0)
|
||||
|
||||
def finish_recv(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
||||
_overlapped.ERROR_OPERATION_ABORTED):
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_recv)
|
||||
|
||||
def recvfrom(self, conn, nbytes, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
try:
|
||||
ov.WSARecvFrom(conn.fileno(), nbytes, flags)
|
||||
except BrokenPipeError:
|
||||
return self._result((b'', None))
|
||||
|
||||
def finish_recv(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
||||
_overlapped.ERROR_OPERATION_ABORTED):
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_recv)
|
||||
|
||||
def sendto(self, conn, buf, flags=0, addr=None):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
|
||||
ov.WSASendTo(conn.fileno(), buf, flags, addr)
|
||||
|
||||
def finish_send(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
||||
_overlapped.ERROR_OPERATION_ABORTED):
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_send)
|
||||
|
||||
def send(self, conn, buf, flags=0):
|
||||
self._register_with_iocp(conn)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
if isinstance(conn, socket.socket):
|
||||
ov.WSASend(conn.fileno(), buf, flags)
|
||||
else:
|
||||
ov.WriteFile(conn.fileno(), buf)
|
||||
|
||||
def finish_send(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
||||
_overlapped.ERROR_OPERATION_ABORTED):
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
|
||||
return self._register(ov, conn, finish_send)
|
||||
|
||||
def accept(self, listener):
|
||||
self._register_with_iocp(listener)
|
||||
conn = self._get_accept_socket(listener.family)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.AcceptEx(listener.fileno(), conn.fileno())
|
||||
|
||||
def finish_accept(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
|
||||
buf = struct.pack('@P', listener.fileno())
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
|
||||
conn.settimeout(listener.gettimeout())
|
||||
return conn, conn.getpeername()
|
||||
|
||||
async def accept_coro(future, conn):
|
||||
# Coroutine closing the accept socket if the future is cancelled
|
||||
try:
|
||||
await future
|
||||
except exceptions.CancelledError:
|
||||
conn.close()
|
||||
raise
|
||||
|
||||
future = self._register(ov, listener, finish_accept)
|
||||
coro = accept_coro(future, conn)
|
||||
tasks.ensure_future(coro, loop=self._loop)
|
||||
return future
|
||||
|
||||
def connect(self, conn, address):
|
||||
if conn.type == socket.SOCK_DGRAM:
|
||||
# WSAConnect will complete immediately for UDP sockets so we don't
|
||||
# need to register any IOCP operation
|
||||
_overlapped.WSAConnect(conn.fileno(), address)
|
||||
fut = self._loop.create_future()
|
||||
fut.set_result(None)
|
||||
return fut
|
||||
|
||||
self._register_with_iocp(conn)
|
||||
# The socket needs to be locally bound before we call ConnectEx().
|
||||
try:
|
||||
_overlapped.BindLocal(conn.fileno(), conn.family)
|
||||
except OSError as e:
|
||||
if e.winerror != errno.WSAEINVAL:
|
||||
raise
|
||||
# Probably already locally bound; check using getsockname().
|
||||
if conn.getsockname()[1] == 0:
|
||||
raise
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
ov.ConnectEx(conn.fileno(), address)
|
||||
|
||||
def finish_connect(trans, key, ov):
|
||||
ov.getresult()
|
||||
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
|
||||
conn.setsockopt(socket.SOL_SOCKET,
|
||||
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
|
||||
return conn
|
||||
|
||||
return self._register(ov, conn, finish_connect)
|
||||
|
||||
def sendfile(self, sock, file, offset, count):
|
||||
self._register_with_iocp(sock)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
offset_low = offset & 0xffff_ffff
|
||||
offset_high = (offset >> 32) & 0xffff_ffff
|
||||
ov.TransmitFile(sock.fileno(),
|
||||
msvcrt.get_osfhandle(file.fileno()),
|
||||
offset_low, offset_high,
|
||||
count, 0, 0)
|
||||
|
||||
def finish_sendfile(trans, key, ov):
|
||||
try:
|
||||
return ov.getresult()
|
||||
except OSError as exc:
|
||||
if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED,
|
||||
_overlapped.ERROR_OPERATION_ABORTED):
|
||||
raise ConnectionResetError(*exc.args)
|
||||
else:
|
||||
raise
|
||||
return self._register(ov, sock, finish_sendfile)
|
||||
|
||||
def accept_pipe(self, pipe):
|
||||
self._register_with_iocp(pipe)
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
connected = ov.ConnectNamedPipe(pipe.fileno())
|
||||
|
||||
if connected:
|
||||
# ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means
|
||||
# that the pipe is connected. There is no need to wait for the
|
||||
# completion of the connection.
|
||||
return self._result(pipe)
|
||||
|
||||
def finish_accept_pipe(trans, key, ov):
|
||||
ov.getresult()
|
||||
return pipe
|
||||
|
||||
return self._register(ov, pipe, finish_accept_pipe)
|
||||
|
||||
async def connect_pipe(self, address):
|
||||
delay = CONNECT_PIPE_INIT_DELAY
|
||||
while True:
|
||||
# Unfortunately there is no way to do an overlapped connect to
|
||||
# a pipe. Call CreateFile() in a loop until it doesn't fail with
|
||||
# ERROR_PIPE_BUSY.
|
||||
try:
|
||||
handle = _overlapped.ConnectPipe(address)
|
||||
break
|
||||
except OSError as exc:
|
||||
if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
|
||||
raise
|
||||
|
||||
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
|
||||
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
|
||||
await tasks.sleep(delay)
|
||||
|
||||
return windows_utils.PipeHandle(handle)
|
||||
|
||||
def wait_for_handle(self, handle, timeout=None):
|
||||
"""Wait for a handle.
|
||||
|
||||
Return a Future object. The result of the future is True if the wait
|
||||
completed, or False if the wait did not complete (on timeout).
|
||||
"""
|
||||
return self._wait_for_handle(handle, timeout, False)
|
||||
|
||||
def _wait_cancel(self, event, done_callback):
|
||||
fut = self._wait_for_handle(event, None, True)
|
||||
# add_done_callback() cannot be used because the wait may only complete
|
||||
# in IocpProactor.close(), while the event loop is not running.
|
||||
fut._done_callback = done_callback
|
||||
return fut
|
||||
|
||||
def _wait_for_handle(self, handle, timeout, _is_cancel):
|
||||
self._check_closed()
|
||||
|
||||
if timeout is None:
|
||||
ms = _winapi.INFINITE
|
||||
else:
|
||||
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
|
||||
# We only create ov so we can use ov.address as a key for the cache.
|
||||
ov = _overlapped.Overlapped(NULL)
|
||||
wait_handle = _overlapped.RegisterWaitWithQueue(
|
||||
handle, self._iocp, ov.address, ms)
|
||||
if _is_cancel:
|
||||
f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop)
|
||||
else:
|
||||
f = _WaitHandleFuture(ov, handle, wait_handle, self,
|
||||
loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
|
||||
def finish_wait_for_handle(trans, key, ov):
|
||||
# Note that this second wait means that we should only use
|
||||
# this with handles types where a successful wait has no
|
||||
# effect. So events or processes are all right, but locks
|
||||
# or semaphores are not. Also note if the handle is
|
||||
# signalled and then quickly reset, then we may return
|
||||
# False even though we have not timed out.
|
||||
return f._poll()
|
||||
|
||||
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
|
||||
return f
|
||||
|
||||
def _register_with_iocp(self, obj):
|
||||
# To get notifications of finished ops on this objects sent to the
|
||||
# completion port, were must register the handle.
|
||||
if obj not in self._registered:
|
||||
self._registered.add(obj)
|
||||
_overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
|
||||
# XXX We could also use SetFileCompletionNotificationModes()
|
||||
# to avoid sending notifications to completion port of ops
|
||||
# that succeed immediately.
|
||||
|
||||
def _register(self, ov, obj, callback):
|
||||
self._check_closed()
|
||||
|
||||
# Return a future which will be set with the result of the
|
||||
# operation when it completes. The future's value is actually
|
||||
# the value returned by callback().
|
||||
f = _OverlappedFuture(ov, loop=self._loop)
|
||||
if f._source_traceback:
|
||||
del f._source_traceback[-1]
|
||||
if not ov.pending:
|
||||
# The operation has completed, so no need to postpone the
|
||||
# work. We cannot take this short cut if we need the
|
||||
# NumberOfBytes, CompletionKey values returned by
|
||||
# PostQueuedCompletionStatus().
|
||||
try:
|
||||
value = callback(None, None, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
else:
|
||||
f.set_result(value)
|
||||
# Even if GetOverlappedResult() was called, we have to wait for the
|
||||
# notification of the completion in GetQueuedCompletionStatus().
|
||||
# Register the overlapped operation to keep a reference to the
|
||||
# OVERLAPPED object, otherwise the memory is freed and Windows may
|
||||
# read uninitialized memory.
|
||||
|
||||
# Register the overlapped operation for later. Note that
|
||||
# we only store obj to prevent it from being garbage
|
||||
# collected too early.
|
||||
self._cache[ov.address] = (f, ov, obj, callback)
|
||||
return f
|
||||
|
||||
def _unregister(self, ov):
|
||||
"""Unregister an overlapped object.
|
||||
|
||||
Call this method when its future has been cancelled. The event can
|
||||
already be signalled (pending in the proactor event queue). It is also
|
||||
safe if the event is never signalled (because it was cancelled).
|
||||
"""
|
||||
self._check_closed()
|
||||
self._unregistered.append(ov)
|
||||
|
||||
def _get_accept_socket(self, family):
|
||||
s = socket.socket(family)
|
||||
s.settimeout(0)
|
||||
return s
|
||||
|
||||
def _poll(self, timeout=None):
|
||||
if timeout is None:
|
||||
ms = INFINITE
|
||||
elif timeout < 0:
|
||||
raise ValueError("negative timeout")
|
||||
else:
|
||||
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
|
||||
# round away from zero to wait *at least* timeout seconds.
|
||||
ms = math.ceil(timeout * 1e3)
|
||||
if ms >= INFINITE:
|
||||
raise ValueError("timeout too big")
|
||||
|
||||
while True:
|
||||
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
|
||||
if status is None:
|
||||
break
|
||||
ms = 0
|
||||
|
||||
err, transferred, key, address = status
|
||||
try:
|
||||
f, ov, obj, callback = self._cache.pop(address)
|
||||
except KeyError:
|
||||
if self._loop.get_debug():
|
||||
self._loop.call_exception_handler({
|
||||
'message': ('GetQueuedCompletionStatus() returned an '
|
||||
'unexpected event'),
|
||||
'status': ('err=%s transferred=%s key=%#x address=%#x'
|
||||
% (err, transferred, key, address)),
|
||||
})
|
||||
|
||||
# key is either zero, or it is used to return a pipe
|
||||
# handle which should be closed to avoid a leak.
|
||||
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
|
||||
_winapi.CloseHandle(key)
|
||||
continue
|
||||
|
||||
if obj in self._stopped_serving:
|
||||
f.cancel()
|
||||
# Don't call the callback if _register() already read the result or
|
||||
# if the overlapped has been cancelled
|
||||
elif not f.done():
|
||||
try:
|
||||
value = callback(transferred, key, ov)
|
||||
except OSError as e:
|
||||
f.set_exception(e)
|
||||
self._results.append(f)
|
||||
else:
|
||||
f.set_result(value)
|
||||
self._results.append(f)
|
||||
|
||||
# Remove unregistered futures
|
||||
for ov in self._unregistered:
|
||||
self._cache.pop(ov.address, None)
|
||||
self._unregistered.clear()
|
||||
|
||||
def _stop_serving(self, obj):
|
||||
# obj is a socket or pipe handle. It will be closed in
|
||||
# BaseProactorEventLoop._stop_serving() which will make any
|
||||
# pending operations fail quickly.
|
||||
self._stopped_serving.add(obj)
|
||||
|
||||
def close(self):
|
||||
if self._iocp is None:
|
||||
# already closed
|
||||
return
|
||||
|
||||
# Cancel remaining registered operations.
|
||||
for address, (fut, ov, obj, callback) in list(self._cache.items()):
|
||||
if fut.cancelled():
|
||||
# Nothing to do with cancelled futures
|
||||
pass
|
||||
elif isinstance(fut, _WaitCancelFuture):
|
||||
# _WaitCancelFuture must not be cancelled
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
fut.cancel()
|
||||
except OSError as exc:
|
||||
if self._loop is not None:
|
||||
context = {
|
||||
'message': 'Cancelling a future failed',
|
||||
'exception': exc,
|
||||
'future': fut,
|
||||
}
|
||||
if fut._source_traceback:
|
||||
context['source_traceback'] = fut._source_traceback
|
||||
self._loop.call_exception_handler(context)
|
||||
|
||||
# Wait until all cancelled overlapped complete: don't exit with running
|
||||
# overlapped to prevent a crash. Display progress every second if the
|
||||
# loop is still running.
|
||||
msg_update = 1.0
|
||||
start_time = time.monotonic()
|
||||
next_msg = start_time + msg_update
|
||||
while self._cache:
|
||||
if next_msg <= time.monotonic():
|
||||
logger.debug('%r is running after closing for %.1f seconds',
|
||||
self, time.monotonic() - start_time)
|
||||
next_msg = time.monotonic() + msg_update
|
||||
|
||||
# handle a few events, or timeout
|
||||
self._poll(msg_update)
|
||||
|
||||
self._results = []
|
||||
|
||||
_winapi.CloseHandle(self._iocp)
|
||||
self._iocp = None
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
|
||||
|
||||
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
|
||||
self._proc = windows_utils.Popen(
|
||||
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
|
||||
bufsize=bufsize, **kwargs)
|
||||
|
||||
def callback(f):
|
||||
returncode = self._proc.poll()
|
||||
self._process_exited(returncode)
|
||||
|
||||
f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
|
||||
f.add_done_callback(callback)
|
||||
|
||||
|
||||
SelectorEventLoop = _WindowsSelectorEventLoop
|
||||
|
||||
|
||||
class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
_loop_factory = SelectorEventLoop
|
||||
|
||||
|
||||
class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
|
||||
_loop_factory = ProactorEventLoop
|
||||
|
||||
|
||||
DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user