venv added, updated
This commit is contained in:
76
myenv/lib/python3.12/site-packages/anyio/__init__.py
Normal file
76
myenv/lib/python3.12/site-packages/anyio/__init__.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from ._core._eventloop import current_time as current_time
|
||||
from ._core._eventloop import get_all_backends as get_all_backends
|
||||
from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
|
||||
from ._core._eventloop import run as run
|
||||
from ._core._eventloop import sleep as sleep
|
||||
from ._core._eventloop import sleep_forever as sleep_forever
|
||||
from ._core._eventloop import sleep_until as sleep_until
|
||||
from ._core._exceptions import BrokenResourceError as BrokenResourceError
|
||||
from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
|
||||
from ._core._exceptions import BusyResourceError as BusyResourceError
|
||||
from ._core._exceptions import ClosedResourceError as ClosedResourceError
|
||||
from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
|
||||
from ._core._exceptions import EndOfStream as EndOfStream
|
||||
from ._core._exceptions import IncompleteRead as IncompleteRead
|
||||
from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
|
||||
from ._core._exceptions import WouldBlock as WouldBlock
|
||||
from ._core._fileio import AsyncFile as AsyncFile
|
||||
from ._core._fileio import Path as Path
|
||||
from ._core._fileio import open_file as open_file
|
||||
from ._core._fileio import wrap_file as wrap_file
|
||||
from ._core._resources import aclose_forcefully as aclose_forcefully
|
||||
from ._core._signals import open_signal_receiver as open_signal_receiver
|
||||
from ._core._sockets import connect_tcp as connect_tcp
|
||||
from ._core._sockets import connect_unix as connect_unix
|
||||
from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
|
||||
from ._core._sockets import (
|
||||
create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
|
||||
)
|
||||
from ._core._sockets import create_tcp_listener as create_tcp_listener
|
||||
from ._core._sockets import create_udp_socket as create_udp_socket
|
||||
from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
|
||||
from ._core._sockets import create_unix_listener as create_unix_listener
|
||||
from ._core._sockets import getaddrinfo as getaddrinfo
|
||||
from ._core._sockets import getnameinfo as getnameinfo
|
||||
from ._core._sockets import wait_socket_readable as wait_socket_readable
|
||||
from ._core._sockets import wait_socket_writable as wait_socket_writable
|
||||
from ._core._streams import create_memory_object_stream as create_memory_object_stream
|
||||
from ._core._subprocesses import open_process as open_process
|
||||
from ._core._subprocesses import run_process as run_process
|
||||
from ._core._synchronization import CapacityLimiter as CapacityLimiter
|
||||
from ._core._synchronization import (
|
||||
CapacityLimiterStatistics as CapacityLimiterStatistics,
|
||||
)
|
||||
from ._core._synchronization import Condition as Condition
|
||||
from ._core._synchronization import ConditionStatistics as ConditionStatistics
|
||||
from ._core._synchronization import Event as Event
|
||||
from ._core._synchronization import EventStatistics as EventStatistics
|
||||
from ._core._synchronization import Lock as Lock
|
||||
from ._core._synchronization import LockStatistics as LockStatistics
|
||||
from ._core._synchronization import ResourceGuard as ResourceGuard
|
||||
from ._core._synchronization import Semaphore as Semaphore
|
||||
from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
|
||||
from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
|
||||
from ._core._tasks import CancelScope as CancelScope
|
||||
from ._core._tasks import create_task_group as create_task_group
|
||||
from ._core._tasks import current_effective_deadline as current_effective_deadline
|
||||
from ._core._tasks import fail_after as fail_after
|
||||
from ._core._tasks import move_on_after as move_on_after
|
||||
from ._core._testing import TaskInfo as TaskInfo
|
||||
from ._core._testing import get_current_task as get_current_task
|
||||
from ._core._testing import get_running_tasks as get_running_tasks
|
||||
from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
|
||||
from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
|
||||
from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
|
||||
from ._core._typedattr import typed_attribute as typed_attribute
|
||||
|
||||
# Re-export imports so they look like they live directly in this package
|
||||
key: str
|
||||
value: Any
|
||||
for key, value in list(locals().items()):
|
||||
if getattr(value, "__module__", "").startswith("anyio."):
|
||||
value.__module__ = __name__
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
2511
myenv/lib/python3.12/site-packages/anyio/_backends/_asyncio.py
Normal file
2511
myenv/lib/python3.12/site-packages/anyio/_backends/_asyncio.py
Normal file
File diff suppressed because it is too large
Load Diff
1177
myenv/lib/python3.12/site-packages/anyio/_backends/_trio.py
Normal file
1177
myenv/lib/python3.12/site-packages/anyio/_backends/_trio.py
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
166
myenv/lib/python3.12/site-packages/anyio/_core/_eventloop.py
Normal file
166
myenv/lib/python3.12/site-packages/anyio/_core/_eventloop.py
Normal file
@@ -0,0 +1,166 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import sys
|
||||
import threading
|
||||
from collections.abc import Awaitable, Callable, Generator
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
|
||||
import sniffio
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..abc import AsyncBackend
|
||||
|
||||
# This must be updated when new backends are introduced
|
||||
BACKENDS = "asyncio", "trio"
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
threadlocals = threading.local()
|
||||
loaded_backends: dict[str, type[AsyncBackend]] = {}
|
||||
|
||||
|
||||
def run(
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
backend: str = "asyncio",
|
||||
backend_options: dict[str, Any] | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Run the given coroutine function in an asynchronous event loop.
|
||||
|
||||
The current thread must not be already running an event loop.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to ``func``
|
||||
:param backend: name of the asynchronous event loop implementation – currently
|
||||
either ``asyncio`` or ``trio``
|
||||
:param backend_options: keyword arguments to call the backend ``run()``
|
||||
implementation with (documented :ref:`here <backend options>`)
|
||||
:return: the return value of the coroutine function
|
||||
:raises RuntimeError: if an asynchronous event loop is already running in this
|
||||
thread
|
||||
:raises LookupError: if the named backend is not found
|
||||
|
||||
"""
|
||||
try:
|
||||
asynclib_name = sniffio.current_async_library()
|
||||
except sniffio.AsyncLibraryNotFoundError:
|
||||
pass
|
||||
else:
|
||||
raise RuntimeError(f"Already running {asynclib_name} in this thread")
|
||||
|
||||
try:
|
||||
async_backend = get_async_backend(backend)
|
||||
except ImportError as exc:
|
||||
raise LookupError(f"No such backend: {backend}") from exc
|
||||
|
||||
token = None
|
||||
if sniffio.current_async_library_cvar.get(None) is None:
|
||||
# Since we're in control of the event loop, we can cache the name of the async
|
||||
# library
|
||||
token = sniffio.current_async_library_cvar.set(backend)
|
||||
|
||||
try:
|
||||
backend_options = backend_options or {}
|
||||
return async_backend.run(func, args, {}, backend_options)
|
||||
finally:
|
||||
if token:
|
||||
sniffio.current_async_library_cvar.reset(token)
|
||||
|
||||
|
||||
async def sleep(delay: float) -> None:
|
||||
"""
|
||||
Pause the current task for the specified duration.
|
||||
|
||||
:param delay: the duration, in seconds
|
||||
|
||||
"""
|
||||
return await get_async_backend().sleep(delay)
|
||||
|
||||
|
||||
async def sleep_forever() -> None:
|
||||
"""
|
||||
Pause the current task until it's cancelled.
|
||||
|
||||
This is a shortcut for ``sleep(math.inf)``.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
"""
|
||||
await sleep(math.inf)
|
||||
|
||||
|
||||
async def sleep_until(deadline: float) -> None:
|
||||
"""
|
||||
Pause the current task until the given time.
|
||||
|
||||
:param deadline: the absolute time to wake up at (according to the internal
|
||||
monotonic clock of the event loop)
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
"""
|
||||
now = current_time()
|
||||
await sleep(max(deadline - now, 0))
|
||||
|
||||
|
||||
def current_time() -> float:
|
||||
"""
|
||||
Return the current value of the event loop's internal clock.
|
||||
|
||||
:return: the clock value (seconds)
|
||||
|
||||
"""
|
||||
return get_async_backend().current_time()
|
||||
|
||||
|
||||
def get_all_backends() -> tuple[str, ...]:
|
||||
"""Return a tuple of the names of all built-in backends."""
|
||||
return BACKENDS
|
||||
|
||||
|
||||
def get_cancelled_exc_class() -> type[BaseException]:
|
||||
"""Return the current async library's cancellation exception class."""
|
||||
return get_async_backend().cancelled_exception_class()
|
||||
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
|
||||
@contextmanager
|
||||
def claim_worker_thread(
|
||||
backend_class: type[AsyncBackend], token: object
|
||||
) -> Generator[Any, None, None]:
|
||||
threadlocals.current_async_backend = backend_class
|
||||
threadlocals.current_token = token
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del threadlocals.current_async_backend
|
||||
del threadlocals.current_token
|
||||
|
||||
|
||||
def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:
|
||||
if asynclib_name is None:
|
||||
asynclib_name = sniffio.current_async_library()
|
||||
|
||||
# We use our own dict instead of sys.modules to get the already imported back-end
|
||||
# class because the appropriate modules in sys.modules could potentially be only
|
||||
# partially initialized
|
||||
try:
|
||||
return loaded_backends[asynclib_name]
|
||||
except KeyError:
|
||||
module = import_module(f"anyio._backends._{asynclib_name}")
|
||||
loaded_backends[asynclib_name] = module.backend_class
|
||||
return module.backend_class
|
||||
@@ -0,0 +1,73 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class BrokenResourceError(Exception):
|
||||
"""
|
||||
Raised when trying to use a resource that has been rendered unusable due to external
|
||||
causes (e.g. a send stream whose peer has disconnected).
|
||||
"""
|
||||
|
||||
|
||||
class BrokenWorkerProcess(Exception):
|
||||
"""
|
||||
Raised by :func:`run_sync_in_process` if the worker process terminates abruptly or
|
||||
otherwise misbehaves.
|
||||
"""
|
||||
|
||||
|
||||
class BusyResourceError(Exception):
|
||||
"""
|
||||
Raised when two tasks are trying to read from or write to the same resource
|
||||
concurrently.
|
||||
"""
|
||||
|
||||
def __init__(self, action: str):
|
||||
super().__init__(f"Another task is already {action} this resource")
|
||||
|
||||
|
||||
class ClosedResourceError(Exception):
|
||||
"""Raised when trying to use a resource that has been closed."""
|
||||
|
||||
|
||||
class DelimiterNotFound(Exception):
|
||||
"""
|
||||
Raised during
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
|
||||
maximum number of bytes has been read without the delimiter being found.
|
||||
"""
|
||||
|
||||
def __init__(self, max_bytes: int) -> None:
|
||||
super().__init__(
|
||||
f"The delimiter was not found among the first {max_bytes} bytes"
|
||||
)
|
||||
|
||||
|
||||
class EndOfStream(Exception):
|
||||
"""
|
||||
Raised when trying to read from a stream that has been closed from the other end.
|
||||
"""
|
||||
|
||||
|
||||
class IncompleteRead(Exception):
|
||||
"""
|
||||
Raised during
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
|
||||
connection is closed before the requested amount of bytes has been read.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
"The stream was closed before the read operation could be completed"
|
||||
)
|
||||
|
||||
|
||||
class TypedAttributeLookupError(LookupError):
|
||||
"""
|
||||
Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
|
||||
is not found and no default value has been given.
|
||||
"""
|
||||
|
||||
|
||||
class WouldBlock(Exception):
|
||||
"""Raised by ``X_nowait`` functions if ``X()`` would block."""
|
||||
637
myenv/lib/python3.12/site-packages/anyio/_core/_fileio.py
Normal file
637
myenv/lib/python3.12/site-packages/anyio/_core/_fileio.py
Normal file
@@ -0,0 +1,637 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
from collections.abc import Callable, Iterable, Iterator, Sequence
|
||||
from dataclasses import dataclass
|
||||
from functools import partial
|
||||
from os import PathLike
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AnyStr,
|
||||
AsyncIterator,
|
||||
Final,
|
||||
Generic,
|
||||
overload,
|
||||
)
|
||||
|
||||
from .. import to_thread
|
||||
from ..abc import AsyncResource
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
|
||||
else:
|
||||
ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
|
||||
|
||||
|
||||
class AsyncFile(AsyncResource, Generic[AnyStr]):
|
||||
"""
|
||||
An asynchronous file object.
|
||||
|
||||
This class wraps a standard file object and provides async friendly versions of the
|
||||
following blocking methods (where available on the original file object):
|
||||
|
||||
* read
|
||||
* read1
|
||||
* readline
|
||||
* readlines
|
||||
* readinto
|
||||
* readinto1
|
||||
* write
|
||||
* writelines
|
||||
* truncate
|
||||
* seek
|
||||
* tell
|
||||
* flush
|
||||
|
||||
All other methods are directly passed through.
|
||||
|
||||
This class supports the asynchronous context manager protocol which closes the
|
||||
underlying file at the end of the context block.
|
||||
|
||||
This class also supports asynchronous iteration::
|
||||
|
||||
async with await open_file(...) as f:
|
||||
async for line in f:
|
||||
print(line)
|
||||
"""
|
||||
|
||||
def __init__(self, fp: IO[AnyStr]) -> None:
|
||||
self._fp: Any = fp
|
||||
|
||||
def __getattr__(self, name: str) -> object:
|
||||
return getattr(self._fp, name)
|
||||
|
||||
@property
|
||||
def wrapped(self) -> IO[AnyStr]:
|
||||
"""The wrapped file object."""
|
||||
return self._fp
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[AnyStr]:
|
||||
while True:
|
||||
line = await self.readline()
|
||||
if line:
|
||||
yield line
|
||||
else:
|
||||
break
|
||||
|
||||
async def aclose(self) -> None:
|
||||
return await to_thread.run_sync(self._fp.close)
|
||||
|
||||
async def read(self, size: int = -1) -> AnyStr:
|
||||
return await to_thread.run_sync(self._fp.read, size)
|
||||
|
||||
async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
|
||||
return await to_thread.run_sync(self._fp.read1, size)
|
||||
|
||||
async def readline(self) -> AnyStr:
|
||||
return await to_thread.run_sync(self._fp.readline)
|
||||
|
||||
async def readlines(self) -> list[AnyStr]:
|
||||
return await to_thread.run_sync(self._fp.readlines)
|
||||
|
||||
async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
|
||||
return await to_thread.run_sync(self._fp.readinto, b)
|
||||
|
||||
async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
|
||||
return await to_thread.run_sync(self._fp.readinto1, b)
|
||||
|
||||
@overload
|
||||
async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ...
|
||||
|
||||
@overload
|
||||
async def write(self: AsyncFile[str], b: str) -> int: ...
|
||||
|
||||
async def write(self, b: ReadableBuffer | str) -> int:
|
||||
return await to_thread.run_sync(self._fp.write, b)
|
||||
|
||||
@overload
|
||||
async def writelines(
|
||||
self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ...
|
||||
|
||||
async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
|
||||
return await to_thread.run_sync(self._fp.writelines, lines)
|
||||
|
||||
async def truncate(self, size: int | None = None) -> int:
|
||||
return await to_thread.run_sync(self._fp.truncate, size)
|
||||
|
||||
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
|
||||
return await to_thread.run_sync(self._fp.seek, offset, whence)
|
||||
|
||||
async def tell(self) -> int:
|
||||
return await to_thread.run_sync(self._fp.tell)
|
||||
|
||||
async def flush(self) -> None:
|
||||
return await to_thread.run_sync(self._fp.flush)
|
||||
|
||||
|
||||
@overload
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: OpenBinaryMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
closefd: bool = ...,
|
||||
opener: Callable[[str, int], int] | None = ...,
|
||||
) -> AsyncFile[bytes]: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: OpenTextMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
closefd: bool = ...,
|
||||
opener: Callable[[str, int], int] | None = ...,
|
||||
) -> AsyncFile[str]: ...
|
||||
|
||||
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: str = "r",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
closefd: bool = True,
|
||||
opener: Callable[[str, int], int] | None = None,
|
||||
) -> AsyncFile[Any]:
|
||||
"""
|
||||
Open a file asynchronously.
|
||||
|
||||
The arguments are exactly the same as for the builtin :func:`open`.
|
||||
|
||||
:return: an asynchronous file object
|
||||
|
||||
"""
|
||||
fp = await to_thread.run_sync(
|
||||
open, file, mode, buffering, encoding, errors, newline, closefd, opener
|
||||
)
|
||||
return AsyncFile(fp)
|
||||
|
||||
|
||||
def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
|
||||
"""
|
||||
Wrap an existing file as an asynchronous file.
|
||||
|
||||
:param file: an existing file-like object
|
||||
:return: an asynchronous file object
|
||||
|
||||
"""
|
||||
return AsyncFile(file)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class _PathIterator(AsyncIterator["Path"]):
|
||||
iterator: Iterator[PathLike[str]]
|
||||
|
||||
async def __anext__(self) -> Path:
|
||||
nextval = await to_thread.run_sync(
|
||||
next, self.iterator, None, abandon_on_cancel=True
|
||||
)
|
||||
if nextval is None:
|
||||
raise StopAsyncIteration from None
|
||||
|
||||
return Path(nextval)
|
||||
|
||||
|
||||
class Path:
|
||||
"""
|
||||
An asynchronous version of :class:`pathlib.Path`.
|
||||
|
||||
This class cannot be substituted for :class:`pathlib.Path` or
|
||||
:class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
|
||||
interface.
|
||||
|
||||
It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
|
||||
the deprecated :meth:`~pathlib.Path.link_to` method.
|
||||
|
||||
Any methods that do disk I/O need to be awaited on. These methods are:
|
||||
|
||||
* :meth:`~pathlib.Path.absolute`
|
||||
* :meth:`~pathlib.Path.chmod`
|
||||
* :meth:`~pathlib.Path.cwd`
|
||||
* :meth:`~pathlib.Path.exists`
|
||||
* :meth:`~pathlib.Path.expanduser`
|
||||
* :meth:`~pathlib.Path.group`
|
||||
* :meth:`~pathlib.Path.hardlink_to`
|
||||
* :meth:`~pathlib.Path.home`
|
||||
* :meth:`~pathlib.Path.is_block_device`
|
||||
* :meth:`~pathlib.Path.is_char_device`
|
||||
* :meth:`~pathlib.Path.is_dir`
|
||||
* :meth:`~pathlib.Path.is_fifo`
|
||||
* :meth:`~pathlib.Path.is_file`
|
||||
* :meth:`~pathlib.Path.is_mount`
|
||||
* :meth:`~pathlib.Path.lchmod`
|
||||
* :meth:`~pathlib.Path.lstat`
|
||||
* :meth:`~pathlib.Path.mkdir`
|
||||
* :meth:`~pathlib.Path.open`
|
||||
* :meth:`~pathlib.Path.owner`
|
||||
* :meth:`~pathlib.Path.read_bytes`
|
||||
* :meth:`~pathlib.Path.read_text`
|
||||
* :meth:`~pathlib.Path.readlink`
|
||||
* :meth:`~pathlib.Path.rename`
|
||||
* :meth:`~pathlib.Path.replace`
|
||||
* :meth:`~pathlib.Path.rmdir`
|
||||
* :meth:`~pathlib.Path.samefile`
|
||||
* :meth:`~pathlib.Path.stat`
|
||||
* :meth:`~pathlib.Path.touch`
|
||||
* :meth:`~pathlib.Path.unlink`
|
||||
* :meth:`~pathlib.Path.write_bytes`
|
||||
* :meth:`~pathlib.Path.write_text`
|
||||
|
||||
Additionally, the following methods return an async iterator yielding
|
||||
:class:`~.Path` objects:
|
||||
|
||||
* :meth:`~pathlib.Path.glob`
|
||||
* :meth:`~pathlib.Path.iterdir`
|
||||
* :meth:`~pathlib.Path.rglob`
|
||||
"""
|
||||
|
||||
__slots__ = "_path", "__weakref__"
|
||||
|
||||
__weakref__: Any
|
||||
|
||||
def __init__(self, *args: str | PathLike[str]) -> None:
|
||||
self._path: Final[pathlib.Path] = pathlib.Path(*args)
|
||||
|
||||
def __fspath__(self) -> str:
|
||||
return self._path.__fspath__()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self._path.__str__()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}({self.as_posix()!r})"
|
||||
|
||||
def __bytes__(self) -> bytes:
|
||||
return self._path.__bytes__()
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return self._path.__hash__()
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__eq__(target)
|
||||
|
||||
def __lt__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__lt__(target)
|
||||
|
||||
def __le__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__le__(target)
|
||||
|
||||
def __gt__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__gt__(target)
|
||||
|
||||
def __ge__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__ge__(target)
|
||||
|
||||
def __truediv__(self, other: str | PathLike[str]) -> Path:
|
||||
return Path(self._path / other)
|
||||
|
||||
def __rtruediv__(self, other: str | PathLike[str]) -> Path:
|
||||
return Path(other) / self
|
||||
|
||||
@property
|
||||
def parts(self) -> tuple[str, ...]:
|
||||
return self._path.parts
|
||||
|
||||
@property
|
||||
def drive(self) -> str:
|
||||
return self._path.drive
|
||||
|
||||
@property
|
||||
def root(self) -> str:
|
||||
return self._path.root
|
||||
|
||||
@property
|
||||
def anchor(self) -> str:
|
||||
return self._path.anchor
|
||||
|
||||
@property
|
||||
def parents(self) -> Sequence[Path]:
|
||||
return tuple(Path(p) for p in self._path.parents)
|
||||
|
||||
@property
|
||||
def parent(self) -> Path:
|
||||
return Path(self._path.parent)
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self._path.name
|
||||
|
||||
@property
|
||||
def suffix(self) -> str:
|
||||
return self._path.suffix
|
||||
|
||||
@property
|
||||
def suffixes(self) -> list[str]:
|
||||
return self._path.suffixes
|
||||
|
||||
@property
|
||||
def stem(self) -> str:
|
||||
return self._path.stem
|
||||
|
||||
async def absolute(self) -> Path:
|
||||
path = await to_thread.run_sync(self._path.absolute)
|
||||
return Path(path)
|
||||
|
||||
def as_posix(self) -> str:
|
||||
return self._path.as_posix()
|
||||
|
||||
def as_uri(self) -> str:
|
||||
return self._path.as_uri()
|
||||
|
||||
def match(self, path_pattern: str) -> bool:
|
||||
return self._path.match(path_pattern)
|
||||
|
||||
def is_relative_to(self, other: str | PathLike[str]) -> bool:
|
||||
try:
|
||||
self.relative_to(other)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
async def is_junction(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_junction)
|
||||
|
||||
async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
|
||||
func = partial(os.chmod, follow_symlinks=follow_symlinks)
|
||||
return await to_thread.run_sync(func, self._path, mode)
|
||||
|
||||
@classmethod
|
||||
async def cwd(cls) -> Path:
|
||||
path = await to_thread.run_sync(pathlib.Path.cwd)
|
||||
return cls(path)
|
||||
|
||||
async def exists(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
|
||||
|
||||
async def expanduser(self) -> Path:
|
||||
return Path(
|
||||
await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
|
||||
)
|
||||
|
||||
def glob(self, pattern: str) -> AsyncIterator[Path]:
|
||||
gen = self._path.glob(pattern)
|
||||
return _PathIterator(gen)
|
||||
|
||||
async def group(self) -> str:
|
||||
return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
|
||||
|
||||
async def hardlink_to(
|
||||
self, target: str | bytes | PathLike[str] | PathLike[bytes]
|
||||
) -> None:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(os.link, target, self)
|
||||
|
||||
@classmethod
|
||||
async def home(cls) -> Path:
|
||||
home_path = await to_thread.run_sync(pathlib.Path.home)
|
||||
return cls(home_path)
|
||||
|
||||
def is_absolute(self) -> bool:
|
||||
return self._path.is_absolute()
|
||||
|
||||
async def is_block_device(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
self._path.is_block_device, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def is_char_device(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
self._path.is_char_device, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def is_dir(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
|
||||
|
||||
async def is_fifo(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
|
||||
|
||||
async def is_file(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
|
||||
|
||||
async def is_mount(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
os.path.ismount, self._path, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
def is_reserved(self) -> bool:
|
||||
return self._path.is_reserved()
|
||||
|
||||
async def is_socket(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
|
||||
|
||||
async def is_symlink(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
|
||||
|
||||
def iterdir(self) -> AsyncIterator[Path]:
|
||||
gen = self._path.iterdir()
|
||||
return _PathIterator(gen)
|
||||
|
||||
def joinpath(self, *args: str | PathLike[str]) -> Path:
|
||||
return Path(self._path.joinpath(*args))
|
||||
|
||||
async def lchmod(self, mode: int) -> None:
|
||||
await to_thread.run_sync(self._path.lchmod, mode)
|
||||
|
||||
async def lstat(self) -> os.stat_result:
|
||||
return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
|
||||
|
||||
async def mkdir(
|
||||
self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
|
||||
) -> None:
|
||||
await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
|
||||
|
||||
@overload
|
||||
async def open(
|
||||
self,
|
||||
mode: OpenBinaryMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
) -> AsyncFile[bytes]: ...
|
||||
|
||||
@overload
|
||||
async def open(
|
||||
self,
|
||||
mode: OpenTextMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
) -> AsyncFile[str]: ...
|
||||
|
||||
async def open(
|
||||
self,
|
||||
mode: str = "r",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
) -> AsyncFile[Any]:
|
||||
fp = await to_thread.run_sync(
|
||||
self._path.open, mode, buffering, encoding, errors, newline
|
||||
)
|
||||
return AsyncFile(fp)
|
||||
|
||||
async def owner(self) -> str:
|
||||
return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
|
||||
|
||||
async def read_bytes(self) -> bytes:
|
||||
return await to_thread.run_sync(self._path.read_bytes)
|
||||
|
||||
async def read_text(
|
||||
self, encoding: str | None = None, errors: str | None = None
|
||||
) -> str:
|
||||
return await to_thread.run_sync(self._path.read_text, encoding, errors)
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
def relative_to(
|
||||
self, *other: str | PathLike[str], walk_up: bool = False
|
||||
) -> Path:
|
||||
return Path(self._path.relative_to(*other, walk_up=walk_up))
|
||||
|
||||
else:
|
||||
|
||||
def relative_to(self, *other: str | PathLike[str]) -> Path:
|
||||
return Path(self._path.relative_to(*other))
|
||||
|
||||
async def readlink(self) -> Path:
|
||||
target = await to_thread.run_sync(os.readlink, self._path)
|
||||
return Path(target)
|
||||
|
||||
async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.rename, target)
|
||||
return Path(target)
|
||||
|
||||
async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.replace, target)
|
||||
return Path(target)
|
||||
|
||||
async def resolve(self, strict: bool = False) -> Path:
|
||||
func = partial(self._path.resolve, strict=strict)
|
||||
return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
|
||||
|
||||
def rglob(self, pattern: str) -> AsyncIterator[Path]:
|
||||
gen = self._path.rglob(pattern)
|
||||
return _PathIterator(gen)
|
||||
|
||||
async def rmdir(self) -> None:
|
||||
await to_thread.run_sync(self._path.rmdir)
|
||||
|
||||
async def samefile(self, other_path: str | PathLike[str]) -> bool:
|
||||
if isinstance(other_path, Path):
|
||||
other_path = other_path._path
|
||||
|
||||
return await to_thread.run_sync(
|
||||
self._path.samefile, other_path, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
|
||||
func = partial(os.stat, follow_symlinks=follow_symlinks)
|
||||
return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
|
||||
|
||||
async def symlink_to(
|
||||
self,
|
||||
target: str | bytes | PathLike[str] | PathLike[bytes],
|
||||
target_is_directory: bool = False,
|
||||
) -> None:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
|
||||
|
||||
async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
|
||||
await to_thread.run_sync(self._path.touch, mode, exist_ok)
|
||||
|
||||
async def unlink(self, missing_ok: bool = False) -> None:
|
||||
try:
|
||||
await to_thread.run_sync(self._path.unlink)
|
||||
except FileNotFoundError:
|
||||
if not missing_ok:
|
||||
raise
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
async def walk(
|
||||
self,
|
||||
top_down: bool = True,
|
||||
on_error: Callable[[OSError], object] | None = None,
|
||||
follow_symlinks: bool = False,
|
||||
) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
|
||||
def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
|
||||
try:
|
||||
return next(gen)
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
gen = self._path.walk(top_down, on_error, follow_symlinks)
|
||||
while True:
|
||||
value = await to_thread.run_sync(get_next_value)
|
||||
if value is None:
|
||||
return
|
||||
|
||||
root, dirs, paths = value
|
||||
yield Path(root), dirs, paths
|
||||
|
||||
def with_name(self, name: str) -> Path:
|
||||
return Path(self._path.with_name(name))
|
||||
|
||||
def with_stem(self, stem: str) -> Path:
|
||||
return Path(self._path.with_name(stem + self._path.suffix))
|
||||
|
||||
def with_suffix(self, suffix: str) -> Path:
|
||||
return Path(self._path.with_suffix(suffix))
|
||||
|
||||
def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
|
||||
return Path(*pathsegments)
|
||||
|
||||
async def write_bytes(self, data: bytes) -> int:
|
||||
return await to_thread.run_sync(self._path.write_bytes, data)
|
||||
|
||||
async def write_text(
|
||||
self,
|
||||
data: str,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
) -> int:
|
||||
# Path.write_text() does not support the "newline" parameter before Python 3.10
|
||||
def sync_write_text() -> int:
|
||||
with self._path.open(
|
||||
"w", encoding=encoding, errors=errors, newline=newline
|
||||
) as fp:
|
||||
return fp.write(data)
|
||||
|
||||
return await to_thread.run_sync(sync_write_text)
|
||||
|
||||
|
||||
PathLike.register(Path)
|
||||
18
myenv/lib/python3.12/site-packages/anyio/_core/_resources.py
Normal file
18
myenv/lib/python3.12/site-packages/anyio/_core/_resources.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ..abc import AsyncResource
|
||||
from ._tasks import CancelScope
|
||||
|
||||
|
||||
async def aclose_forcefully(resource: AsyncResource) -> None:
|
||||
"""
|
||||
Close an asynchronous resource in a cancelled scope.
|
||||
|
||||
Doing this closes the resource without waiting on anything.
|
||||
|
||||
:param resource: the resource to close
|
||||
|
||||
"""
|
||||
with CancelScope() as scope:
|
||||
scope.cancel()
|
||||
await resource.aclose()
|
||||
25
myenv/lib/python3.12/site-packages/anyio/_core/_signals.py
Normal file
25
myenv/lib/python3.12/site-packages/anyio/_core/_signals.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator
|
||||
from signal import Signals
|
||||
from typing import ContextManager
|
||||
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
def open_signal_receiver(*signals: Signals) -> ContextManager[AsyncIterator[Signals]]:
|
||||
"""
|
||||
Start receiving operating system signals.
|
||||
|
||||
:param signals: signals to receive (e.g. ``signal.SIGINT``)
|
||||
:return: an asynchronous context manager for an asynchronous iterator which yields
|
||||
signal numbers
|
||||
|
||||
.. warning:: Windows does not support signals natively so it is best to avoid
|
||||
relying on this in cross-platform applications.
|
||||
|
||||
.. warning:: On asyncio, this permanently replaces any previous signal handler for
|
||||
the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
|
||||
|
||||
"""
|
||||
return get_async_backend().open_signal_receiver(*signals)
|
||||
711
myenv/lib/python3.12/site-packages/anyio/_core/_sockets.py
Normal file
711
myenv/lib/python3.12/site-packages/anyio/_core/_sockets.py
Normal file
@@ -0,0 +1,711 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import stat
|
||||
import sys
|
||||
from collections.abc import Awaitable
|
||||
from ipaddress import IPv6Address, ip_address
|
||||
from os import PathLike, chmod
|
||||
from socket import AddressFamily, SocketKind
|
||||
from typing import Any, Literal, cast, overload
|
||||
|
||||
from .. import to_thread
|
||||
from ..abc import (
|
||||
ConnectedUDPSocket,
|
||||
ConnectedUNIXDatagramSocket,
|
||||
IPAddressType,
|
||||
IPSockAddrType,
|
||||
SocketListener,
|
||||
SocketStream,
|
||||
UDPSocket,
|
||||
UNIXDatagramSocket,
|
||||
UNIXSocketStream,
|
||||
)
|
||||
from ..streams.stapled import MultiListener
|
||||
from ..streams.tls import TLSStream
|
||||
from ._eventloop import get_async_backend
|
||||
from ._resources import aclose_forcefully
|
||||
from ._synchronization import Event
|
||||
from ._tasks import create_task_group, move_on_after
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from exceptiongroup import ExceptionGroup
|
||||
|
||||
IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
|
||||
|
||||
AnyIPAddressFamily = Literal[
|
||||
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
|
||||
]
|
||||
IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
|
||||
|
||||
|
||||
# tls_hostname given
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# ssl_context given
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
ssl_context: ssl.SSLContext,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# tls=True
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
tls: Literal[True],
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# tls=False
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
tls: Literal[False],
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> SocketStream: ...
|
||||
|
||||
|
||||
# No TLS arguments
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> SocketStream: ...
|
||||
|
||||
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
tls: bool = False,
|
||||
ssl_context: ssl.SSLContext | None = None,
|
||||
tls_standard_compatible: bool = True,
|
||||
tls_hostname: str | None = None,
|
||||
happy_eyeballs_delay: float = 0.25,
|
||||
) -> SocketStream | TLSStream:
|
||||
"""
|
||||
Connect to a host using the TCP protocol.
|
||||
|
||||
This function implements the stateless version of the Happy Eyeballs algorithm (RFC
|
||||
6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
|
||||
each one is tried until one connection attempt succeeds. If the first attempt does
|
||||
not connected within 250 milliseconds, a second attempt is started using the next
|
||||
address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
|
||||
available) is tried first.
|
||||
|
||||
When the connection has been established, a TLS handshake will be done if either
|
||||
``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
|
||||
|
||||
:param remote_host: the IP address or host name to connect to
|
||||
:param remote_port: port on the target host to connect to
|
||||
:param local_host: the interface address or name to bind the socket to before
|
||||
connecting
|
||||
:param tls: ``True`` to do a TLS handshake with the connected stream and return a
|
||||
:class:`~anyio.streams.tls.TLSStream` instead
|
||||
:param ssl_context: the SSL context object to use (if omitted, a default context is
|
||||
created)
|
||||
:param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake
|
||||
before closing the stream and requires that the server does this as well.
|
||||
Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
|
||||
Some protocols, such as HTTP, require this option to be ``False``.
|
||||
See :meth:`~ssl.SSLContext.wrap_socket` for details.
|
||||
:param tls_hostname: host name to check the server certificate against (defaults to
|
||||
the value of ``remote_host``)
|
||||
:param happy_eyeballs_delay: delay (in seconds) before starting the next connection
|
||||
attempt
|
||||
:return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
|
||||
:raises OSError: if the connection attempt fails
|
||||
|
||||
"""
|
||||
# Placed here due to https://github.com/python/mypy/issues/7057
|
||||
connected_stream: SocketStream | None = None
|
||||
|
||||
async def try_connect(remote_host: str, event: Event) -> None:
|
||||
nonlocal connected_stream
|
||||
try:
|
||||
stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
|
||||
except OSError as exc:
|
||||
oserrors.append(exc)
|
||||
return
|
||||
else:
|
||||
if connected_stream is None:
|
||||
connected_stream = stream
|
||||
tg.cancel_scope.cancel()
|
||||
else:
|
||||
await stream.aclose()
|
||||
finally:
|
||||
event.set()
|
||||
|
||||
asynclib = get_async_backend()
|
||||
local_address: IPSockAddrType | None = None
|
||||
family = socket.AF_UNSPEC
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(str(local_host), None)
|
||||
family, *_, local_address = gai_res[0]
|
||||
|
||||
target_host = str(remote_host)
|
||||
try:
|
||||
addr_obj = ip_address(remote_host)
|
||||
except ValueError:
|
||||
# getaddrinfo() will raise an exception if name resolution fails
|
||||
gai_res = await getaddrinfo(
|
||||
target_host, remote_port, family=family, type=socket.SOCK_STREAM
|
||||
)
|
||||
|
||||
# Organize the list so that the first address is an IPv6 address (if available)
|
||||
# and the second one is an IPv4 addresses. The rest can be in whatever order.
|
||||
v6_found = v4_found = False
|
||||
target_addrs: list[tuple[socket.AddressFamily, str]] = []
|
||||
for af, *rest, sa in gai_res:
|
||||
if af == socket.AF_INET6 and not v6_found:
|
||||
v6_found = True
|
||||
target_addrs.insert(0, (af, sa[0]))
|
||||
elif af == socket.AF_INET and not v4_found and v6_found:
|
||||
v4_found = True
|
||||
target_addrs.insert(1, (af, sa[0]))
|
||||
else:
|
||||
target_addrs.append((af, sa[0]))
|
||||
else:
|
||||
if isinstance(addr_obj, IPv6Address):
|
||||
target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
|
||||
else:
|
||||
target_addrs = [(socket.AF_INET, addr_obj.compressed)]
|
||||
|
||||
oserrors: list[OSError] = []
|
||||
async with create_task_group() as tg:
|
||||
for i, (af, addr) in enumerate(target_addrs):
|
||||
event = Event()
|
||||
tg.start_soon(try_connect, addr, event)
|
||||
with move_on_after(happy_eyeballs_delay):
|
||||
await event.wait()
|
||||
|
||||
if connected_stream is None:
|
||||
cause = (
|
||||
oserrors[0]
|
||||
if len(oserrors) == 1
|
||||
else ExceptionGroup("multiple connection attempts failed", oserrors)
|
||||
)
|
||||
raise OSError("All connection attempts failed") from cause
|
||||
|
||||
if tls or tls_hostname or ssl_context:
|
||||
try:
|
||||
return await TLSStream.wrap(
|
||||
connected_stream,
|
||||
server_side=False,
|
||||
hostname=tls_hostname or str(remote_host),
|
||||
ssl_context=ssl_context,
|
||||
standard_compatible=tls_standard_compatible,
|
||||
)
|
||||
except BaseException:
|
||||
await aclose_forcefully(connected_stream)
|
||||
raise
|
||||
|
||||
return connected_stream
|
||||
|
||||
|
||||
async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream:
|
||||
"""
|
||||
Connect to the given UNIX socket.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path to the socket
|
||||
:return: a socket stream object
|
||||
|
||||
"""
|
||||
path = os.fspath(path)
|
||||
return await get_async_backend().connect_unix(path)
|
||||
|
||||
|
||||
async def create_tcp_listener(
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
|
||||
backlog: int = 65536,
|
||||
reuse_port: bool = False,
|
||||
) -> MultiListener[SocketStream]:
|
||||
"""
|
||||
Create a TCP socket listener.
|
||||
|
||||
:param local_port: port number to listen on
|
||||
:param local_host: IP address of the interface to listen on. If omitted, listen on
|
||||
all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
|
||||
family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
|
||||
:param family: address family (used if ``local_host`` was omitted)
|
||||
:param backlog: maximum number of queued incoming connections (up to a maximum of
|
||||
2**16, or 65536)
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a list of listener objects
|
||||
|
||||
"""
|
||||
asynclib = get_async_backend()
|
||||
backlog = min(backlog, 65536)
|
||||
local_host = str(local_host) if local_host is not None else None
|
||||
gai_res = await getaddrinfo(
|
||||
local_host,
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
listeners: list[SocketListener] = []
|
||||
try:
|
||||
# The set() is here to work around a glibc bug:
|
||||
# https://sourceware.org/bugzilla/show_bug.cgi?id=14969
|
||||
sockaddr: tuple[str, int] | tuple[str, int, int, int]
|
||||
for fam, kind, *_, sockaddr in sorted(set(gai_res)):
|
||||
# Workaround for an uvloop bug where we don't get the correct scope ID for
|
||||
# IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
|
||||
# getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
|
||||
if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
|
||||
continue
|
||||
|
||||
raw_socket = socket.socket(fam)
|
||||
raw_socket.setblocking(False)
|
||||
|
||||
# For Windows, enable exclusive address use. For others, enable address
|
||||
# reuse.
|
||||
if sys.platform == "win32":
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
|
||||
else:
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
if reuse_port:
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
|
||||
# If only IPv6 was requested, disable dual stack operation
|
||||
if fam == socket.AF_INET6:
|
||||
raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
|
||||
|
||||
# Workaround for #554
|
||||
if "%" in sockaddr[0]:
|
||||
addr, scope_id = sockaddr[0].split("%", 1)
|
||||
sockaddr = (addr, sockaddr[1], 0, int(scope_id))
|
||||
|
||||
raw_socket.bind(sockaddr)
|
||||
raw_socket.listen(backlog)
|
||||
listener = asynclib.create_tcp_listener(raw_socket)
|
||||
listeners.append(listener)
|
||||
except BaseException:
|
||||
for listener in listeners:
|
||||
await listener.aclose()
|
||||
|
||||
raise
|
||||
|
||||
return MultiListener(listeners)
|
||||
|
||||
|
||||
async def create_unix_listener(
|
||||
path: str | bytes | PathLike[Any],
|
||||
*,
|
||||
mode: int | None = None,
|
||||
backlog: int = 65536,
|
||||
) -> SocketListener:
|
||||
"""
|
||||
Create a UNIX socket listener.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path of the socket
|
||||
:param mode: permissions to set on the socket
|
||||
:param backlog: maximum number of queued incoming connections (up to a maximum of
|
||||
2**16, or 65536)
|
||||
:return: a listener object
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
If a socket already exists on the file system in the given path, it will be
|
||||
removed first.
|
||||
|
||||
"""
|
||||
backlog = min(backlog, 65536)
|
||||
raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM)
|
||||
try:
|
||||
raw_socket.listen(backlog)
|
||||
return get_async_backend().create_unix_listener(raw_socket)
|
||||
except BaseException:
|
||||
raw_socket.close()
|
||||
raise
|
||||
|
||||
|
||||
async def create_udp_socket(
|
||||
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
reuse_port: bool = False,
|
||||
) -> UDPSocket:
|
||||
"""
|
||||
Create a UDP socket.
|
||||
|
||||
If ``port`` has been given, the socket will be bound to this port on the local
|
||||
machine, making this socket suitable for providing UDP based services.
|
||||
|
||||
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
|
||||
determined from ``local_host`` if omitted
|
||||
:param local_host: IP address or host name of the local interface to bind to
|
||||
:param local_port: local port to bind to
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a UDP socket
|
||||
|
||||
"""
|
||||
if family is AddressFamily.AF_UNSPEC and not local_host:
|
||||
raise ValueError('Either "family" or "local_host" must be given')
|
||||
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(
|
||||
str(local_host),
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SOCK_DGRAM,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
local_address = gai_res[0][-1]
|
||||
elif family is AddressFamily.AF_INET6:
|
||||
local_address = ("::", 0)
|
||||
else:
|
||||
local_address = ("0.0.0.0", 0)
|
||||
|
||||
sock = await get_async_backend().create_udp_socket(
|
||||
family, local_address, None, reuse_port
|
||||
)
|
||||
return cast(UDPSocket, sock)
|
||||
|
||||
|
||||
async def create_connected_udp_socket(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
reuse_port: bool = False,
|
||||
) -> ConnectedUDPSocket:
|
||||
"""
|
||||
Create a connected UDP socket.
|
||||
|
||||
Connected UDP sockets can only communicate with the specified remote host/port, an
|
||||
any packets sent from other sources are dropped.
|
||||
|
||||
:param remote_host: remote host to set as the default target
|
||||
:param remote_port: port on the remote host to set as the default target
|
||||
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
|
||||
determined from ``local_host`` or ``remote_host`` if omitted
|
||||
:param local_host: IP address or host name of the local interface to bind to
|
||||
:param local_port: local port to bind to
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a connected UDP socket
|
||||
|
||||
"""
|
||||
local_address = None
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(
|
||||
str(local_host),
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SOCK_DGRAM,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
local_address = gai_res[0][-1]
|
||||
|
||||
gai_res = await getaddrinfo(
|
||||
str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
remote_address = gai_res[0][-1]
|
||||
|
||||
sock = await get_async_backend().create_udp_socket(
|
||||
family, local_address, remote_address, reuse_port
|
||||
)
|
||||
return cast(ConnectedUDPSocket, sock)
|
||||
|
||||
|
||||
async def create_unix_datagram_socket(
|
||||
*,
|
||||
local_path: None | str | bytes | PathLike[Any] = None,
|
||||
local_mode: int | None = None,
|
||||
) -> UNIXDatagramSocket:
|
||||
"""
|
||||
Create a UNIX datagram socket.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
If ``local_path`` has been given, the socket will be bound to this path, making this
|
||||
socket suitable for receiving datagrams from other processes. Other processes can
|
||||
send datagrams to this socket only if ``local_path`` is set.
|
||||
|
||||
If a socket already exists on the file system in the ``local_path``, it will be
|
||||
removed first.
|
||||
|
||||
:param local_path: the path on which to bind to
|
||||
:param local_mode: permissions to set on the local socket
|
||||
:return: a UNIX datagram socket
|
||||
|
||||
"""
|
||||
raw_socket = await setup_unix_local_socket(
|
||||
local_path, local_mode, socket.SOCK_DGRAM
|
||||
)
|
||||
return await get_async_backend().create_unix_datagram_socket(raw_socket, None)
|
||||
|
||||
|
||||
async def create_connected_unix_datagram_socket(
|
||||
remote_path: str | bytes | PathLike[Any],
|
||||
*,
|
||||
local_path: None | str | bytes | PathLike[Any] = None,
|
||||
local_mode: int | None = None,
|
||||
) -> ConnectedUNIXDatagramSocket:
|
||||
"""
|
||||
Create a connected UNIX datagram socket.
|
||||
|
||||
Connected datagram sockets can only communicate with the specified remote path.
|
||||
|
||||
If ``local_path`` has been given, the socket will be bound to this path, making
|
||||
this socket suitable for receiving datagrams from other processes. Other processes
|
||||
can send datagrams to this socket only if ``local_path`` is set.
|
||||
|
||||
If a socket already exists on the file system in the ``local_path``, it will be
|
||||
removed first.
|
||||
|
||||
:param remote_path: the path to set as the default target
|
||||
:param local_path: the path on which to bind to
|
||||
:param local_mode: permissions to set on the local socket
|
||||
:return: a connected UNIX datagram socket
|
||||
|
||||
"""
|
||||
remote_path = os.fspath(remote_path)
|
||||
raw_socket = await setup_unix_local_socket(
|
||||
local_path, local_mode, socket.SOCK_DGRAM
|
||||
)
|
||||
return await get_async_backend().create_unix_datagram_socket(
|
||||
raw_socket, remote_path
|
||||
)
|
||||
|
||||
|
||||
async def getaddrinfo(
|
||||
host: bytes | str | None,
|
||||
port: str | int | None,
|
||||
*,
|
||||
family: int | AddressFamily = 0,
|
||||
type: int | SocketKind = 0,
|
||||
proto: int = 0,
|
||||
flags: int = 0,
|
||||
) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]:
|
||||
"""
|
||||
Look up a numeric IP address given a host name.
|
||||
|
||||
Internationalized domain names are translated according to the (non-transitional)
|
||||
IDNA 2008 standard.
|
||||
|
||||
.. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
|
||||
(host, port), unlike what :func:`socket.getaddrinfo` does.
|
||||
|
||||
:param host: host name
|
||||
:param port: port number
|
||||
:param family: socket family (`'AF_INET``, ...)
|
||||
:param type: socket type (``SOCK_STREAM``, ...)
|
||||
:param proto: protocol number
|
||||
:param flags: flags to pass to upstream ``getaddrinfo()``
|
||||
:return: list of tuples containing (family, type, proto, canonname, sockaddr)
|
||||
|
||||
.. seealso:: :func:`socket.getaddrinfo`
|
||||
|
||||
"""
|
||||
# Handle unicode hostnames
|
||||
if isinstance(host, str):
|
||||
try:
|
||||
encoded_host: bytes | None = host.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
import idna
|
||||
|
||||
encoded_host = idna.encode(host, uts46=True)
|
||||
else:
|
||||
encoded_host = host
|
||||
|
||||
gai_res = await get_async_backend().getaddrinfo(
|
||||
encoded_host, port, family=family, type=type, proto=proto, flags=flags
|
||||
)
|
||||
return [
|
||||
(family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
|
||||
for family, type, proto, canonname, sockaddr in gai_res
|
||||
]
|
||||
|
||||
|
||||
def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
|
||||
"""
|
||||
Look up the host name of an IP address.
|
||||
|
||||
:param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
|
||||
:param flags: flags to pass to upstream ``getnameinfo()``
|
||||
:return: a tuple of (host name, service name)
|
||||
|
||||
.. seealso:: :func:`socket.getnameinfo`
|
||||
|
||||
"""
|
||||
return get_async_backend().getnameinfo(sockaddr, flags)
|
||||
|
||||
|
||||
def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
|
||||
"""
|
||||
Wait until the given socket has data to be read.
|
||||
|
||||
This does **NOT** work on Windows when using the asyncio backend with a proactor
|
||||
event loop (default on py3.8+).
|
||||
|
||||
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param sock: a socket object
|
||||
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
|
||||
socket to become readable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
|
||||
to become readable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_socket_readable(sock)
|
||||
|
||||
|
||||
def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
|
||||
"""
|
||||
Wait until the given socket can be written to.
|
||||
|
||||
This does **NOT** work on Windows when using the asyncio backend with a proactor
|
||||
event loop (default on py3.8+).
|
||||
|
||||
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param sock: a socket object
|
||||
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
|
||||
socket to become writable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
|
||||
to become writable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_socket_writable(sock)
|
||||
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
|
||||
def convert_ipv6_sockaddr(
|
||||
sockaddr: tuple[str, int, int, int] | tuple[str, int],
|
||||
) -> tuple[str, int]:
|
||||
"""
|
||||
Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
|
||||
|
||||
If the scope ID is nonzero, it is added to the address, separated with ``%``.
|
||||
Otherwise the flow id and scope id are simply cut off from the tuple.
|
||||
Any other kinds of socket addresses are returned as-is.
|
||||
|
||||
:param sockaddr: the result of :meth:`~socket.socket.getsockname`
|
||||
:return: the converted socket address
|
||||
|
||||
"""
|
||||
# This is more complicated than it should be because of MyPy
|
||||
if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
|
||||
host, port, flowinfo, scope_id = sockaddr
|
||||
if scope_id:
|
||||
# PyPy (as of v7.3.11) leaves the interface name in the result, so
|
||||
# we discard it and only get the scope ID from the end
|
||||
# (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
|
||||
host = host.split("%")[0]
|
||||
|
||||
# Add scope_id to the address
|
||||
return f"{host}%{scope_id}", port
|
||||
else:
|
||||
return host, port
|
||||
else:
|
||||
return sockaddr
|
||||
|
||||
|
||||
async def setup_unix_local_socket(
|
||||
path: None | str | bytes | PathLike[Any],
|
||||
mode: int | None,
|
||||
socktype: int,
|
||||
) -> socket.socket:
|
||||
"""
|
||||
Create a UNIX local socket object, deleting the socket at the given path if it
|
||||
exists.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path of the socket
|
||||
:param mode: permissions to set on the socket
|
||||
:param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM
|
||||
|
||||
"""
|
||||
path_str: str | bytes | None
|
||||
if path is not None:
|
||||
path_str = os.fspath(path)
|
||||
|
||||
# Copied from pathlib...
|
||||
try:
|
||||
stat_result = os.stat(path)
|
||||
except OSError as e:
|
||||
if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EBADF, errno.ELOOP):
|
||||
raise
|
||||
else:
|
||||
if stat.S_ISSOCK(stat_result.st_mode):
|
||||
os.unlink(path)
|
||||
else:
|
||||
path_str = None
|
||||
|
||||
raw_socket = socket.socket(socket.AF_UNIX, socktype)
|
||||
raw_socket.setblocking(False)
|
||||
|
||||
if path_str is not None:
|
||||
try:
|
||||
await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True)
|
||||
if mode is not None:
|
||||
await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True)
|
||||
except BaseException:
|
||||
raw_socket.close()
|
||||
raise
|
||||
|
||||
return raw_socket
|
||||
52
myenv/lib/python3.12/site-packages/anyio/_core/_streams.py
Normal file
52
myenv/lib/python3.12/site-packages/anyio/_core/_streams.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from typing import Tuple, TypeVar
|
||||
from warnings import warn
|
||||
|
||||
from ..streams.memory import (
|
||||
MemoryObjectReceiveStream,
|
||||
MemoryObjectSendStream,
|
||||
MemoryObjectStreamState,
|
||||
)
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
|
||||
|
||||
class create_memory_object_stream(
|
||||
Tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
|
||||
):
|
||||
"""
|
||||
Create a memory object stream.
|
||||
|
||||
The stream's item type can be annotated like
|
||||
:func:`create_memory_object_stream[T_Item]`.
|
||||
|
||||
:param max_buffer_size: number of items held in the buffer until ``send()`` starts
|
||||
blocking
|
||||
:param item_type: old way of marking the streams with the right generic type for
|
||||
static typing (does nothing on AnyIO 4)
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use ``create_memory_object_stream[YourItemType](...)`` instead.
|
||||
:return: a tuple of (send stream, receive stream)
|
||||
|
||||
"""
|
||||
|
||||
def __new__( # type: ignore[misc]
|
||||
cls, max_buffer_size: float = 0, item_type: object = None
|
||||
) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
|
||||
if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
|
||||
raise ValueError("max_buffer_size must be either an integer or math.inf")
|
||||
if max_buffer_size < 0:
|
||||
raise ValueError("max_buffer_size cannot be negative")
|
||||
if item_type is not None:
|
||||
warn(
|
||||
"The item_type argument has been deprecated in AnyIO 4.0. "
|
||||
"Use create_memory_object_stream[YourItemType](...) instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
state = MemoryObjectStreamState[T_Item](max_buffer_size)
|
||||
return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
|
||||
140
myenv/lib/python3.12/site-packages/anyio/_core/_subprocesses.py
Normal file
140
myenv/lib/python3.12/site-packages/anyio/_core/_subprocesses.py
Normal file
@@ -0,0 +1,140 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterable, Mapping, Sequence
|
||||
from io import BytesIO
|
||||
from os import PathLike
|
||||
from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess
|
||||
from typing import IO, Any, cast
|
||||
|
||||
from ..abc import Process
|
||||
from ._eventloop import get_async_backend
|
||||
from ._tasks import create_task_group
|
||||
|
||||
|
||||
async def run_process(
|
||||
command: str | bytes | Sequence[str | bytes],
|
||||
*,
|
||||
input: bytes | None = None,
|
||||
stdout: int | IO[Any] | None = PIPE,
|
||||
stderr: int | IO[Any] | None = PIPE,
|
||||
check: bool = True,
|
||||
cwd: str | bytes | PathLike[str] | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
start_new_session: bool = False,
|
||||
) -> CompletedProcess[bytes]:
|
||||
"""
|
||||
Run an external command in a subprocess and wait until it completes.
|
||||
|
||||
.. seealso:: :func:`subprocess.run`
|
||||
|
||||
:param command: either a string to pass to the shell, or an iterable of strings
|
||||
containing the executable name or path and its arguments
|
||||
:param input: bytes passed to the standard input of the subprocess
|
||||
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
a file-like object, or `None`
|
||||
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
:data:`subprocess.STDOUT`, a file-like object, or `None`
|
||||
:param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
|
||||
process terminates with a return code other than 0
|
||||
:param cwd: If not ``None``, change the working directory to this before running the
|
||||
command
|
||||
:param env: if not ``None``, this mapping replaces the inherited environment
|
||||
variables from the parent process
|
||||
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
||||
child process prior to the execution of the subprocess. (POSIX only)
|
||||
:return: an object representing the completed process
|
||||
:raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
|
||||
exits with a nonzero return code
|
||||
|
||||
"""
|
||||
|
||||
async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
|
||||
buffer = BytesIO()
|
||||
async for chunk in stream:
|
||||
buffer.write(chunk)
|
||||
|
||||
stream_contents[index] = buffer.getvalue()
|
||||
|
||||
async with await open_process(
|
||||
command,
|
||||
stdin=PIPE if input else DEVNULL,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
start_new_session=start_new_session,
|
||||
) as process:
|
||||
stream_contents: list[bytes | None] = [None, None]
|
||||
async with create_task_group() as tg:
|
||||
if process.stdout:
|
||||
tg.start_soon(drain_stream, process.stdout, 0)
|
||||
|
||||
if process.stderr:
|
||||
tg.start_soon(drain_stream, process.stderr, 1)
|
||||
|
||||
if process.stdin and input:
|
||||
await process.stdin.send(input)
|
||||
await process.stdin.aclose()
|
||||
|
||||
await process.wait()
|
||||
|
||||
output, errors = stream_contents
|
||||
if check and process.returncode != 0:
|
||||
raise CalledProcessError(cast(int, process.returncode), command, output, errors)
|
||||
|
||||
return CompletedProcess(command, cast(int, process.returncode), output, errors)
|
||||
|
||||
|
||||
async def open_process(
|
||||
command: str | bytes | Sequence[str | bytes],
|
||||
*,
|
||||
stdin: int | IO[Any] | None = PIPE,
|
||||
stdout: int | IO[Any] | None = PIPE,
|
||||
stderr: int | IO[Any] | None = PIPE,
|
||||
cwd: str | bytes | PathLike[str] | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
start_new_session: bool = False,
|
||||
) -> Process:
|
||||
"""
|
||||
Start an external command in a subprocess.
|
||||
|
||||
.. seealso:: :class:`subprocess.Popen`
|
||||
|
||||
:param command: either a string to pass to the shell, or an iterable of strings
|
||||
containing the executable name or path and its arguments
|
||||
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
|
||||
file-like object, or ``None``
|
||||
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
a file-like object, or ``None``
|
||||
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
:data:`subprocess.STDOUT`, a file-like object, or ``None``
|
||||
:param cwd: If not ``None``, the working directory is changed before executing
|
||||
:param env: If env is not ``None``, it must be a mapping that defines the
|
||||
environment variables for the new process
|
||||
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
||||
child process prior to the execution of the subprocess. (POSIX only)
|
||||
:return: an asynchronous process object
|
||||
|
||||
"""
|
||||
if isinstance(command, (str, bytes)):
|
||||
return await get_async_backend().open_process(
|
||||
command,
|
||||
shell=True,
|
||||
stdin=stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
start_new_session=start_new_session,
|
||||
)
|
||||
else:
|
||||
return await get_async_backend().open_process(
|
||||
command,
|
||||
shell=False,
|
||||
stdin=stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
start_new_session=start_new_session,
|
||||
)
|
||||
@@ -0,0 +1,649 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from types import TracebackType
|
||||
|
||||
from sniffio import AsyncLibraryNotFoundError
|
||||
|
||||
from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
|
||||
from ._eventloop import get_async_backend
|
||||
from ._exceptions import BusyResourceError, WouldBlock
|
||||
from ._tasks import CancelScope
|
||||
from ._testing import TaskInfo, get_current_task
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EventStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CapacityLimiterStatistics:
|
||||
"""
|
||||
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
|
||||
:ivar float total_tokens: total number of available tokens
|
||||
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from
|
||||
this limiter
|
||||
:ivar int tasks_waiting: number of tasks waiting on
|
||||
:meth:`~.CapacityLimiter.acquire` or
|
||||
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
|
||||
"""
|
||||
|
||||
borrowed_tokens: int
|
||||
total_tokens: float
|
||||
borrowers: tuple[object, ...]
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LockStatistics:
|
||||
"""
|
||||
:ivar bool locked: flag indicating if this lock is locked or not
|
||||
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the
|
||||
lock is not held by any task)
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
|
||||
"""
|
||||
|
||||
locked: bool
|
||||
owner: TaskInfo | None
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ConditionStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
|
||||
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying
|
||||
:class:`~.Lock`
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
lock_statistics: LockStatistics
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SemaphoreStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
|
||||
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
class Event:
|
||||
def __new__(cls) -> Event:
|
||||
try:
|
||||
return get_async_backend().create_event()
|
||||
except AsyncLibraryNotFoundError:
|
||||
return EventAdapter()
|
||||
|
||||
def set(self) -> None:
|
||||
"""Set the flag, notifying all listeners."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_set(self) -> bool:
|
||||
"""Return ``True`` if the flag is set, ``False`` if not."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def wait(self) -> None:
|
||||
"""
|
||||
Wait until the flag has been set.
|
||||
|
||||
If the flag has already been set when this method is called, it returns
|
||||
immediately.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> EventStatistics:
|
||||
"""Return statistics about the current state of this event."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class EventAdapter(Event):
|
||||
_internal_event: Event | None = None
|
||||
|
||||
def __new__(cls) -> EventAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
@property
|
||||
def _event(self) -> Event:
|
||||
if self._internal_event is None:
|
||||
self._internal_event = get_async_backend().create_event()
|
||||
|
||||
return self._internal_event
|
||||
|
||||
def set(self) -> None:
|
||||
self._event.set()
|
||||
|
||||
def is_set(self) -> bool:
|
||||
return self._internal_event is not None and self._internal_event.is_set()
|
||||
|
||||
async def wait(self) -> None:
|
||||
await self._event.wait()
|
||||
|
||||
def statistics(self) -> EventStatistics:
|
||||
if self._internal_event is None:
|
||||
return EventStatistics(tasks_waiting=0)
|
||||
|
||||
return self._internal_event.statistics()
|
||||
|
||||
|
||||
class Lock:
|
||||
_owner_task: TaskInfo | None = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._waiters: deque[tuple[TaskInfo, Event]] = deque()
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the lock."""
|
||||
await checkpoint_if_cancelled()
|
||||
try:
|
||||
self.acquire_nowait()
|
||||
except WouldBlock:
|
||||
task = get_current_task()
|
||||
event = Event()
|
||||
token = task, event
|
||||
self._waiters.append(token)
|
||||
try:
|
||||
await event.wait()
|
||||
except BaseException:
|
||||
if not event.is_set():
|
||||
self._waiters.remove(token)
|
||||
elif self._owner_task == task:
|
||||
self.release()
|
||||
|
||||
raise
|
||||
|
||||
assert self._owner_task == task
|
||||
else:
|
||||
try:
|
||||
await cancel_shielded_checkpoint()
|
||||
except BaseException:
|
||||
self.release()
|
||||
raise
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
task = get_current_task()
|
||||
if self._owner_task == task:
|
||||
raise RuntimeError("Attempted to acquire an already held Lock")
|
||||
|
||||
if self._owner_task is not None:
|
||||
raise WouldBlock
|
||||
|
||||
self._owner_task = task
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the lock."""
|
||||
if self._owner_task != get_current_task():
|
||||
raise RuntimeError("The current task is not holding this lock")
|
||||
|
||||
if self._waiters:
|
||||
self._owner_task, event = self._waiters.popleft()
|
||||
event.set()
|
||||
else:
|
||||
del self._owner_task
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is currently held."""
|
||||
return self._owner_task is not None
|
||||
|
||||
def statistics(self) -> LockStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this lock.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
|
||||
|
||||
|
||||
class Condition:
|
||||
_owner_task: TaskInfo | None = None
|
||||
|
||||
def __init__(self, lock: Lock | None = None):
|
||||
self._lock = lock or Lock()
|
||||
self._waiters: deque[Event] = deque()
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
def _check_acquired(self) -> None:
|
||||
if self._owner_task != get_current_task():
|
||||
raise RuntimeError("The current task is not holding the underlying lock")
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the underlying lock."""
|
||||
await self._lock.acquire()
|
||||
self._owner_task = get_current_task()
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the underlying lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
self._lock.acquire_nowait()
|
||||
self._owner_task = get_current_task()
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the underlying lock."""
|
||||
self._lock.release()
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is set."""
|
||||
return self._lock.locked()
|
||||
|
||||
def notify(self, n: int = 1) -> None:
|
||||
"""Notify exactly n listeners."""
|
||||
self._check_acquired()
|
||||
for _ in range(n):
|
||||
try:
|
||||
event = self._waiters.popleft()
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
event.set()
|
||||
|
||||
def notify_all(self) -> None:
|
||||
"""Notify all the listeners."""
|
||||
self._check_acquired()
|
||||
for event in self._waiters:
|
||||
event.set()
|
||||
|
||||
self._waiters.clear()
|
||||
|
||||
async def wait(self) -> None:
|
||||
"""Wait for a notification."""
|
||||
await checkpoint()
|
||||
event = Event()
|
||||
self._waiters.append(event)
|
||||
self.release()
|
||||
try:
|
||||
await event.wait()
|
||||
except BaseException:
|
||||
if not event.is_set():
|
||||
self._waiters.remove(event)
|
||||
|
||||
raise
|
||||
finally:
|
||||
with CancelScope(shield=True):
|
||||
await self.acquire()
|
||||
|
||||
def statistics(self) -> ConditionStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this condition.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return ConditionStatistics(len(self._waiters), self._lock.statistics())
|
||||
|
||||
|
||||
class Semaphore:
|
||||
def __init__(self, initial_value: int, *, max_value: int | None = None):
|
||||
if not isinstance(initial_value, int):
|
||||
raise TypeError("initial_value must be an integer")
|
||||
if initial_value < 0:
|
||||
raise ValueError("initial_value must be >= 0")
|
||||
if max_value is not None:
|
||||
if not isinstance(max_value, int):
|
||||
raise TypeError("max_value must be an integer or None")
|
||||
if max_value < initial_value:
|
||||
raise ValueError(
|
||||
"max_value must be equal to or higher than initial_value"
|
||||
)
|
||||
|
||||
self._value = initial_value
|
||||
self._max_value = max_value
|
||||
self._waiters: deque[Event] = deque()
|
||||
|
||||
async def __aenter__(self) -> Semaphore:
|
||||
await self.acquire()
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Decrement the semaphore value, blocking if necessary."""
|
||||
await checkpoint_if_cancelled()
|
||||
try:
|
||||
self.acquire_nowait()
|
||||
except WouldBlock:
|
||||
event = Event()
|
||||
self._waiters.append(event)
|
||||
try:
|
||||
await event.wait()
|
||||
except BaseException:
|
||||
if not event.is_set():
|
||||
self._waiters.remove(event)
|
||||
else:
|
||||
self.release()
|
||||
|
||||
raise
|
||||
else:
|
||||
try:
|
||||
await cancel_shielded_checkpoint()
|
||||
except BaseException:
|
||||
self.release()
|
||||
raise
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the underlying lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
if self._value == 0:
|
||||
raise WouldBlock
|
||||
|
||||
self._value -= 1
|
||||
|
||||
def release(self) -> None:
|
||||
"""Increment the semaphore value."""
|
||||
if self._max_value is not None and self._value == self._max_value:
|
||||
raise ValueError("semaphore released too many times")
|
||||
|
||||
if self._waiters:
|
||||
self._waiters.popleft().set()
|
||||
else:
|
||||
self._value += 1
|
||||
|
||||
@property
|
||||
def value(self) -> int:
|
||||
"""The current value of the semaphore."""
|
||||
return self._value
|
||||
|
||||
@property
|
||||
def max_value(self) -> int | None:
|
||||
"""The maximum value of the semaphore."""
|
||||
return self._max_value
|
||||
|
||||
def statistics(self) -> SemaphoreStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this semaphore.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return SemaphoreStatistics(len(self._waiters))
|
||||
|
||||
|
||||
class CapacityLimiter:
|
||||
def __new__(cls, total_tokens: float) -> CapacityLimiter:
|
||||
try:
|
||||
return get_async_backend().create_capacity_limiter(total_tokens)
|
||||
except AsyncLibraryNotFoundError:
|
||||
return CapacityLimiterAdapter(total_tokens)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def total_tokens(self) -> float:
|
||||
"""
|
||||
The total number of tokens available for borrowing.
|
||||
|
||||
This is a read-write property. If the total number of tokens is increased, the
|
||||
proportionate number of tasks waiting on this limiter will be granted their
|
||||
tokens.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The property is now writable.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@total_tokens.setter
|
||||
def total_tokens(self, value: float) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def borrowed_tokens(self) -> int:
|
||||
"""The number of tokens that have currently been borrowed."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def available_tokens(self) -> float:
|
||||
"""The number of tokens currently available to be borrowed"""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire a token for the current task without waiting for one to become
|
||||
available.
|
||||
|
||||
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
||||
"""
|
||||
Acquire a token without waiting for one to become available.
|
||||
|
||||
:param borrower: the entity borrowing a token
|
||||
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""
|
||||
Acquire a token for the current task, waiting if necessary for one to become
|
||||
available.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
||||
"""
|
||||
Acquire a token, waiting if necessary for one to become available.
|
||||
|
||||
:param borrower: the entity borrowing a token
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release(self) -> None:
|
||||
"""
|
||||
Release the token held by the current task.
|
||||
|
||||
:raises RuntimeError: if the current task has not borrowed a token from this
|
||||
limiter.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release_on_behalf_of(self, borrower: object) -> None:
|
||||
"""
|
||||
Release the token held by the given borrower.
|
||||
|
||||
:raises RuntimeError: if the borrower has not borrowed a token from this
|
||||
limiter.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> CapacityLimiterStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this limiter.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class CapacityLimiterAdapter(CapacityLimiter):
|
||||
_internal_limiter: CapacityLimiter | None = None
|
||||
|
||||
def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(self, total_tokens: float) -> None:
|
||||
self.total_tokens = total_tokens
|
||||
|
||||
@property
|
||||
def _limiter(self) -> CapacityLimiter:
|
||||
if self._internal_limiter is None:
|
||||
self._internal_limiter = get_async_backend().create_capacity_limiter(
|
||||
self._total_tokens
|
||||
)
|
||||
|
||||
return self._internal_limiter
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self._limiter.__aenter__()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
@property
|
||||
def total_tokens(self) -> float:
|
||||
if self._internal_limiter is None:
|
||||
return self._total_tokens
|
||||
|
||||
return self._internal_limiter.total_tokens
|
||||
|
||||
@total_tokens.setter
|
||||
def total_tokens(self, value: float) -> None:
|
||||
if not isinstance(value, int) and value is not math.inf:
|
||||
raise TypeError("total_tokens must be an int or math.inf")
|
||||
elif value < 1:
|
||||
raise ValueError("total_tokens must be >= 1")
|
||||
|
||||
if self._internal_limiter is None:
|
||||
self._total_tokens = value
|
||||
return
|
||||
|
||||
self._limiter.total_tokens = value
|
||||
|
||||
@property
|
||||
def borrowed_tokens(self) -> int:
|
||||
if self._internal_limiter is None:
|
||||
return 0
|
||||
|
||||
return self._internal_limiter.borrowed_tokens
|
||||
|
||||
@property
|
||||
def available_tokens(self) -> float:
|
||||
if self._internal_limiter is None:
|
||||
return self._total_tokens
|
||||
|
||||
return self._internal_limiter.available_tokens
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
self._limiter.acquire_nowait()
|
||||
|
||||
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
||||
self._limiter.acquire_on_behalf_of_nowait(borrower)
|
||||
|
||||
async def acquire(self) -> None:
|
||||
await self._limiter.acquire()
|
||||
|
||||
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
||||
await self._limiter.acquire_on_behalf_of(borrower)
|
||||
|
||||
def release(self) -> None:
|
||||
self._limiter.release()
|
||||
|
||||
def release_on_behalf_of(self, borrower: object) -> None:
|
||||
self._limiter.release_on_behalf_of(borrower)
|
||||
|
||||
def statistics(self) -> CapacityLimiterStatistics:
|
||||
if self._internal_limiter is None:
|
||||
return CapacityLimiterStatistics(
|
||||
borrowed_tokens=0,
|
||||
total_tokens=self.total_tokens,
|
||||
borrowers=(),
|
||||
tasks_waiting=0,
|
||||
)
|
||||
|
||||
return self._internal_limiter.statistics()
|
||||
|
||||
|
||||
class ResourceGuard:
|
||||
"""
|
||||
A context manager for ensuring that a resource is only used by a single task at a
|
||||
time.
|
||||
|
||||
Entering this context manager while the previous has not exited it yet will trigger
|
||||
:exc:`BusyResourceError`.
|
||||
|
||||
:param action: the action to guard against (visible in the :exc:`BusyResourceError`
|
||||
when triggered, e.g. "Another task is already {action} this resource")
|
||||
|
||||
.. versionadded:: 4.1
|
||||
"""
|
||||
|
||||
__slots__ = "action", "_guarded"
|
||||
|
||||
def __init__(self, action: str = "using"):
|
||||
self.action: str = action
|
||||
self._guarded = False
|
||||
|
||||
def __enter__(self) -> None:
|
||||
if self._guarded:
|
||||
raise BusyResourceError(self.action)
|
||||
|
||||
self._guarded = True
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
self._guarded = False
|
||||
return None
|
||||
158
myenv/lib/python3.12/site-packages/anyio/_core/_tasks.py
Normal file
158
myenv/lib/python3.12/site-packages/anyio/_core/_tasks.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
from types import TracebackType
|
||||
|
||||
from ..abc._tasks import TaskGroup, TaskStatus
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
class _IgnoredTaskStatus(TaskStatus[object]):
|
||||
def started(self, value: object = None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
TASK_STATUS_IGNORED = _IgnoredTaskStatus()
|
||||
|
||||
|
||||
class CancelScope:
|
||||
"""
|
||||
Wraps a unit of work that can be made separately cancellable.
|
||||
|
||||
:param deadline: The time (clock value) when this scope is cancelled automatically
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
"""
|
||||
|
||||
def __new__(
|
||||
cls, *, deadline: float = math.inf, shield: bool = False
|
||||
) -> CancelScope:
|
||||
return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Cancel this scope immediately."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def deadline(self) -> float:
|
||||
"""
|
||||
The time (clock value) when this scope is cancelled automatically.
|
||||
|
||||
Will be ``float('inf')`` if no timeout has been set.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@deadline.setter
|
||||
def deadline(self, value: float) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def cancel_called(self) -> bool:
|
||||
"""``True`` if :meth:`cancel` has been called."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def cancelled_caught(self) -> bool:
|
||||
"""
|
||||
``True`` if this scope suppressed a cancellation exception it itself raised.
|
||||
|
||||
This is typically used to check if any work was interrupted, or to see if the
|
||||
scope was cancelled due to its deadline being reached. The value will, however,
|
||||
only be ``True`` if the cancellation was triggered by the scope itself (and not
|
||||
an outer scope).
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def shield(self) -> bool:
|
||||
"""
|
||||
``True`` if this scope is shielded from external cancellation.
|
||||
|
||||
While a scope is shielded, it will not receive cancellations from outside.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@shield.setter
|
||||
def shield(self, value: bool) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
def __enter__(self) -> CancelScope:
|
||||
raise NotImplementedError
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@contextmanager
|
||||
def fail_after(
|
||||
delay: float | None, shield: bool = False
|
||||
) -> Generator[CancelScope, None, None]:
|
||||
"""
|
||||
Create a context manager which raises a :class:`TimeoutError` if does not finish in
|
||||
time.
|
||||
|
||||
:param delay: maximum allowed time (in seconds) before raising the exception, or
|
||||
``None`` to disable the timeout
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
:return: a context manager that yields a cancel scope
|
||||
:rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
|
||||
|
||||
"""
|
||||
current_time = get_async_backend().current_time
|
||||
deadline = (current_time() + delay) if delay is not None else math.inf
|
||||
with get_async_backend().create_cancel_scope(
|
||||
deadline=deadline, shield=shield
|
||||
) as cancel_scope:
|
||||
yield cancel_scope
|
||||
|
||||
if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
|
||||
raise TimeoutError
|
||||
|
||||
|
||||
def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
|
||||
"""
|
||||
Create a cancel scope with a deadline that expires after the given delay.
|
||||
|
||||
:param delay: maximum allowed time (in seconds) before exiting the context block, or
|
||||
``None`` to disable the timeout
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
:return: a cancel scope
|
||||
|
||||
"""
|
||||
deadline = (
|
||||
(get_async_backend().current_time() + delay) if delay is not None else math.inf
|
||||
)
|
||||
return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
|
||||
|
||||
|
||||
def current_effective_deadline() -> float:
|
||||
"""
|
||||
Return the nearest deadline among all the cancel scopes effective for the current
|
||||
task.
|
||||
|
||||
:return: a clock value from the event loop's internal clock (or ``float('inf')`` if
|
||||
there is no deadline in effect, or ``float('-inf')`` if the current scope has
|
||||
been cancelled)
|
||||
:rtype: float
|
||||
|
||||
"""
|
||||
return get_async_backend().current_effective_deadline()
|
||||
|
||||
|
||||
def create_task_group() -> TaskGroup:
|
||||
"""
|
||||
Create a task group.
|
||||
|
||||
:return: a task group
|
||||
|
||||
"""
|
||||
return get_async_backend().create_task_group()
|
||||
78
myenv/lib/python3.12/site-packages/anyio/_core/_testing.py
Normal file
78
myenv/lib/python3.12/site-packages/anyio/_core/_testing.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable, Generator
|
||||
from typing import Any, cast
|
||||
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
class TaskInfo:
|
||||
"""
|
||||
Represents an asynchronous task.
|
||||
|
||||
:ivar int id: the unique identifier of the task
|
||||
:ivar parent_id: the identifier of the parent task, if any
|
||||
:vartype parent_id: Optional[int]
|
||||
:ivar str name: the description of the task (if any)
|
||||
:ivar ~collections.abc.Coroutine coro: the coroutine object of the task
|
||||
"""
|
||||
|
||||
__slots__ = "_name", "id", "parent_id", "name", "coro"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id: int,
|
||||
parent_id: int | None,
|
||||
name: str | None,
|
||||
coro: Generator[Any, Any, Any] | Awaitable[Any],
|
||||
):
|
||||
func = get_current_task
|
||||
self._name = f"{func.__module__}.{func.__qualname__}"
|
||||
self.id: int = id
|
||||
self.parent_id: int | None = parent_id
|
||||
self.name: str | None = name
|
||||
self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, TaskInfo):
|
||||
return self.id == other.id
|
||||
|
||||
return NotImplemented
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.id)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
|
||||
|
||||
def has_pending_cancellation(self) -> bool:
|
||||
"""
|
||||
Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
|
||||
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
def get_current_task() -> TaskInfo:
|
||||
"""
|
||||
Return the current task.
|
||||
|
||||
:return: a representation of the current task
|
||||
|
||||
"""
|
||||
return get_async_backend().get_current_task()
|
||||
|
||||
|
||||
def get_running_tasks() -> list[TaskInfo]:
|
||||
"""
|
||||
Return a list of running tasks in the current event loop.
|
||||
|
||||
:return: a list of task info objects
|
||||
|
||||
"""
|
||||
return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
|
||||
|
||||
|
||||
async def wait_all_tasks_blocked() -> None:
|
||||
"""Wait until all other tasks are waiting for something."""
|
||||
await get_async_backend().wait_all_tasks_blocked()
|
||||
81
myenv/lib/python3.12/site-packages/anyio/_core/_typedattr.py
Normal file
81
myenv/lib/python3.12/site-packages/anyio/_core/_typedattr.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import Any, TypeVar, final, overload
|
||||
|
||||
from ._exceptions import TypedAttributeLookupError
|
||||
|
||||
T_Attr = TypeVar("T_Attr")
|
||||
T_Default = TypeVar("T_Default")
|
||||
undefined = object()
|
||||
|
||||
|
||||
def typed_attribute() -> Any:
|
||||
"""Return a unique object, used to mark typed attributes."""
|
||||
return object()
|
||||
|
||||
|
||||
class TypedAttributeSet:
|
||||
"""
|
||||
Superclass for typed attribute collections.
|
||||
|
||||
Checks that every public attribute of every subclass has a type annotation.
|
||||
"""
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
|
||||
for attrname in dir(cls):
|
||||
if not attrname.startswith("_") and attrname not in annotations:
|
||||
raise TypeError(
|
||||
f"Attribute {attrname!r} is missing its type annotation"
|
||||
)
|
||||
|
||||
super().__init_subclass__()
|
||||
|
||||
|
||||
class TypedAttributeProvider:
|
||||
"""Base class for classes that wish to provide typed extra attributes."""
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
|
||||
"""
|
||||
A mapping of the extra attributes to callables that return the corresponding
|
||||
values.
|
||||
|
||||
If the provider wraps another provider, the attributes from that wrapper should
|
||||
also be included in the returned mapping (but the wrapper may override the
|
||||
callables from the wrapped instance).
|
||||
|
||||
"""
|
||||
return {}
|
||||
|
||||
@overload
|
||||
def extra(self, attribute: T_Attr) -> T_Attr: ...
|
||||
|
||||
@overload
|
||||
def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...
|
||||
|
||||
@final
|
||||
def extra(self, attribute: Any, default: object = undefined) -> object:
|
||||
"""
|
||||
extra(attribute, default=undefined)
|
||||
|
||||
Return the value of the given typed extra attribute.
|
||||
|
||||
:param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
|
||||
look for
|
||||
:param default: the value that should be returned if no value is found for the
|
||||
attribute
|
||||
:raises ~anyio.TypedAttributeLookupError: if the search failed and no default
|
||||
value was given
|
||||
|
||||
"""
|
||||
try:
|
||||
getter = self.extra_attributes[attribute]
|
||||
except KeyError:
|
||||
if default is undefined:
|
||||
raise TypedAttributeLookupError("Attribute not found") from None
|
||||
else:
|
||||
return default
|
||||
|
||||
return getter()
|
||||
57
myenv/lib/python3.12/site-packages/anyio/abc/__init__.py
Normal file
57
myenv/lib/python3.12/site-packages/anyio/abc/__init__.py
Normal file
@@ -0,0 +1,57 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from ._eventloop import AsyncBackend as AsyncBackend
|
||||
from ._resources import AsyncResource as AsyncResource
|
||||
from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket
|
||||
from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket
|
||||
from ._sockets import IPAddressType as IPAddressType
|
||||
from ._sockets import IPSockAddrType as IPSockAddrType
|
||||
from ._sockets import SocketAttribute as SocketAttribute
|
||||
from ._sockets import SocketListener as SocketListener
|
||||
from ._sockets import SocketStream as SocketStream
|
||||
from ._sockets import UDPPacketType as UDPPacketType
|
||||
from ._sockets import UDPSocket as UDPSocket
|
||||
from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType
|
||||
from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket
|
||||
from ._sockets import UNIXSocketStream as UNIXSocketStream
|
||||
from ._streams import AnyByteReceiveStream as AnyByteReceiveStream
|
||||
from ._streams import AnyByteSendStream as AnyByteSendStream
|
||||
from ._streams import AnyByteStream as AnyByteStream
|
||||
from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream
|
||||
from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream
|
||||
from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream
|
||||
from ._streams import ByteReceiveStream as ByteReceiveStream
|
||||
from ._streams import ByteSendStream as ByteSendStream
|
||||
from ._streams import ByteStream as ByteStream
|
||||
from ._streams import Listener as Listener
|
||||
from ._streams import ObjectReceiveStream as ObjectReceiveStream
|
||||
from ._streams import ObjectSendStream as ObjectSendStream
|
||||
from ._streams import ObjectStream as ObjectStream
|
||||
from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream
|
||||
from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream
|
||||
from ._streams import UnreliableObjectStream as UnreliableObjectStream
|
||||
from ._subprocesses import Process as Process
|
||||
from ._tasks import TaskGroup as TaskGroup
|
||||
from ._tasks import TaskStatus as TaskStatus
|
||||
from ._testing import TestRunner as TestRunner
|
||||
|
||||
# Re-exported here, for backwards compatibility
|
||||
# isort: off
|
||||
from .._core._synchronization import (
|
||||
CapacityLimiter as CapacityLimiter,
|
||||
Condition as Condition,
|
||||
Event as Event,
|
||||
Lock as Lock,
|
||||
Semaphore as Semaphore,
|
||||
)
|
||||
from .._core._tasks import CancelScope as CancelScope
|
||||
from ..from_thread import BlockingPortal as BlockingPortal
|
||||
|
||||
# Re-export imports so they look like they live directly in this package
|
||||
key: str
|
||||
value: Any
|
||||
for key, value in list(locals().items()):
|
||||
if getattr(value, "__module__", "").startswith("anyio.abc."):
|
||||
value.__module__ = __name__
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
390
myenv/lib/python3.12/site-packages/anyio/abc/_eventloop.py
Normal file
390
myenv/lib/python3.12/site-packages/anyio/abc/_eventloop.py
Normal file
@@ -0,0 +1,390 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import sys
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import AsyncIterator, Awaitable, Mapping
|
||||
from os import PathLike
|
||||
from signal import Signals
|
||||
from socket import AddressFamily, SocketKind, socket
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
ContextManager,
|
||||
Sequence,
|
||||
TypeVar,
|
||||
overload,
|
||||
)
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Literal
|
||||
|
||||
from .._core._synchronization import CapacityLimiter, Event
|
||||
from .._core._tasks import CancelScope
|
||||
from .._core._testing import TaskInfo
|
||||
from ..from_thread import BlockingPortal
|
||||
from ._sockets import (
|
||||
ConnectedUDPSocket,
|
||||
ConnectedUNIXDatagramSocket,
|
||||
IPSockAddrType,
|
||||
SocketListener,
|
||||
SocketStream,
|
||||
UDPSocket,
|
||||
UNIXDatagramSocket,
|
||||
UNIXSocketStream,
|
||||
)
|
||||
from ._subprocesses import Process
|
||||
from ._tasks import TaskGroup
|
||||
from ._testing import TestRunner
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
class AsyncBackend(metaclass=ABCMeta):
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
options: dict[str, Any],
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Run the given coroutine function in an asynchronous event loop.
|
||||
|
||||
The current thread must not be already running an event loop.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to ``func``
|
||||
:param kwargs: positional arguments to ``func``
|
||||
:param options: keyword arguments to call the backend ``run()`` implementation
|
||||
with
|
||||
:return: the return value of the coroutine function
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_token(cls) -> object:
|
||||
"""
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_time(cls) -> float:
|
||||
"""
|
||||
Return the current value of the event loop's internal clock.
|
||||
|
||||
:return: the clock value (seconds)
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def cancelled_exception_class(cls) -> type[BaseException]:
|
||||
"""Return the exception class that is raised in a task if it's cancelled."""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def checkpoint(cls) -> None:
|
||||
"""
|
||||
Check if the task has been cancelled, and allow rescheduling of other tasks.
|
||||
|
||||
This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
|
||||
:meth:`cancel_shielded_checkpoint`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def checkpoint_if_cancelled(cls) -> None:
|
||||
"""
|
||||
Check if the current task group has been cancelled.
|
||||
|
||||
This will check if the task has been cancelled, but will not allow other tasks
|
||||
to be scheduled if not.
|
||||
|
||||
"""
|
||||
if cls.current_effective_deadline() == -math.inf:
|
||||
await cls.checkpoint()
|
||||
|
||||
@classmethod
|
||||
async def cancel_shielded_checkpoint(cls) -> None:
|
||||
"""
|
||||
Allow the rescheduling of other tasks.
|
||||
|
||||
This will give other tasks the opportunity to run, but without checking if the
|
||||
current task group has been cancelled, unlike with :meth:`checkpoint`.
|
||||
|
||||
"""
|
||||
with cls.create_cancel_scope(shield=True):
|
||||
await cls.sleep(0)
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def sleep(cls, delay: float) -> None:
|
||||
"""
|
||||
Pause the current task for the specified duration.
|
||||
|
||||
:param delay: the duration, in seconds
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_cancel_scope(
|
||||
cls, *, deadline: float = math.inf, shield: bool = False
|
||||
) -> CancelScope:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_effective_deadline(cls) -> float:
|
||||
"""
|
||||
Return the nearest deadline among all the cancel scopes effective for the
|
||||
current task.
|
||||
|
||||
:return:
|
||||
- a clock value from the event loop's internal clock
|
||||
- ``inf`` if there is no deadline in effect
|
||||
- ``-inf`` if the current scope has been cancelled
|
||||
:rtype: float
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_task_group(cls) -> TaskGroup:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_event(cls) -> Event:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def run_sync_in_worker_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
abandon_on_cancel: bool = False,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def check_cancelled(cls) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run_async_from_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
token: object,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run_sync_from_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
token: object,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_blocking_portal(cls) -> BlockingPortal:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def open_process(
|
||||
cls,
|
||||
command: str | bytes,
|
||||
*,
|
||||
shell: Literal[True],
|
||||
stdin: int | IO[Any] | None,
|
||||
stdout: int | IO[Any] | None,
|
||||
stderr: int | IO[Any] | None,
|
||||
cwd: str | bytes | PathLike[str] | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
start_new_session: bool = False,
|
||||
) -> Process:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def open_process(
|
||||
cls,
|
||||
command: Sequence[str | bytes],
|
||||
*,
|
||||
shell: Literal[False],
|
||||
stdin: int | IO[Any] | None,
|
||||
stdout: int | IO[Any] | None,
|
||||
stderr: int | IO[Any] | None,
|
||||
cwd: str | bytes | PathLike[str] | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
start_new_session: bool = False,
|
||||
) -> Process:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def open_process(
|
||||
cls,
|
||||
command: str | bytes | Sequence[str | bytes],
|
||||
*,
|
||||
shell: bool,
|
||||
stdin: int | IO[Any] | None,
|
||||
stdout: int | IO[Any] | None,
|
||||
stderr: int | IO[Any] | None,
|
||||
cwd: str | bytes | PathLike[str] | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
start_new_session: bool = False,
|
||||
) -> Process:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def connect_tcp(
|
||||
cls, host: str, port: int, local_address: IPSockAddrType | None = None
|
||||
) -> SocketStream:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_tcp_listener(cls, sock: socket) -> SocketListener:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_unix_listener(cls, sock: socket) -> SocketListener:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def create_udp_socket(
|
||||
cls,
|
||||
family: AddressFamily,
|
||||
local_address: IPSockAddrType | None,
|
||||
remote_address: IPSockAddrType | None,
|
||||
reuse_port: bool,
|
||||
) -> UDPSocket | ConnectedUDPSocket:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: None
|
||||
) -> UNIXDatagramSocket: ...
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: str | bytes
|
||||
) -> ConnectedUNIXDatagramSocket: ...
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: str | bytes | None
|
||||
) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def getaddrinfo(
|
||||
cls,
|
||||
host: bytes | str | None,
|
||||
port: str | int | None,
|
||||
*,
|
||||
family: int | AddressFamily = 0,
|
||||
type: int | SocketKind = 0,
|
||||
proto: int = 0,
|
||||
flags: int = 0,
|
||||
) -> list[
|
||||
tuple[
|
||||
AddressFamily,
|
||||
SocketKind,
|
||||
int,
|
||||
str,
|
||||
tuple[str, int] | tuple[str, int, int, int],
|
||||
]
|
||||
]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def getnameinfo(
|
||||
cls, sockaddr: IPSockAddrType, flags: int = 0
|
||||
) -> tuple[str, str]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_socket_readable(cls, sock: socket) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_socket_writable(cls, sock: socket) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_default_thread_limiter(cls) -> CapacityLimiter:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def open_signal_receiver(
|
||||
cls, *signals: Signals
|
||||
) -> ContextManager[AsyncIterator[Signals]]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def get_current_task(cls) -> TaskInfo:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def get_running_tasks(cls) -> Sequence[TaskInfo]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_all_tasks_blocked(cls) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
|
||||
pass
|
||||
33
myenv/lib/python3.12/site-packages/anyio/abc/_resources.py
Normal file
33
myenv/lib/python3.12/site-packages/anyio/abc/_resources.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from types import TracebackType
|
||||
from typing import TypeVar
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class AsyncResource(metaclass=ABCMeta):
|
||||
"""
|
||||
Abstract base class for all closeable asynchronous resources.
|
||||
|
||||
Works as an asynchronous context manager which returns the instance itself on enter,
|
||||
and calls :meth:`aclose` on exit.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
async def __aenter__(self: T) -> T:
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
await self.aclose()
|
||||
|
||||
@abstractmethod
|
||||
async def aclose(self) -> None:
|
||||
"""Close the resource."""
|
||||
194
myenv/lib/python3.12/site-packages/anyio/abc/_sockets.py
Normal file
194
myenv/lib/python3.12/site-packages/anyio/abc/_sockets.py
Normal file
@@ -0,0 +1,194 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Callable, Collection, Mapping
|
||||
from contextlib import AsyncExitStack
|
||||
from io import IOBase
|
||||
from ipaddress import IPv4Address, IPv6Address
|
||||
from socket import AddressFamily
|
||||
from types import TracebackType
|
||||
from typing import Any, Tuple, TypeVar, Union
|
||||
|
||||
from .._core._typedattr import (
|
||||
TypedAttributeProvider,
|
||||
TypedAttributeSet,
|
||||
typed_attribute,
|
||||
)
|
||||
from ._streams import ByteStream, Listener, UnreliableObjectStream
|
||||
from ._tasks import TaskGroup
|
||||
|
||||
IPAddressType = Union[str, IPv4Address, IPv6Address]
|
||||
IPSockAddrType = Tuple[str, int]
|
||||
SockAddrType = Union[IPSockAddrType, str]
|
||||
UDPPacketType = Tuple[bytes, IPSockAddrType]
|
||||
UNIXDatagramPacketType = Tuple[bytes, str]
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
|
||||
|
||||
class _NullAsyncContextManager:
|
||||
async def __aenter__(self) -> None:
|
||||
pass
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
return None
|
||||
|
||||
|
||||
class SocketAttribute(TypedAttributeSet):
|
||||
#: the address family of the underlying socket
|
||||
family: AddressFamily = typed_attribute()
|
||||
#: the local socket address of the underlying socket
|
||||
local_address: SockAddrType = typed_attribute()
|
||||
#: for IP addresses, the local port the underlying socket is bound to
|
||||
local_port: int = typed_attribute()
|
||||
#: the underlying stdlib socket object
|
||||
raw_socket: socket.socket = typed_attribute()
|
||||
#: the remote address the underlying socket is connected to
|
||||
remote_address: SockAddrType = typed_attribute()
|
||||
#: for IP addresses, the remote port the underlying socket is connected to
|
||||
remote_port: int = typed_attribute()
|
||||
|
||||
|
||||
class _SocketProvider(TypedAttributeProvider):
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
from .._core._sockets import convert_ipv6_sockaddr as convert
|
||||
|
||||
attributes: dict[Any, Callable[[], Any]] = {
|
||||
SocketAttribute.family: lambda: self._raw_socket.family,
|
||||
SocketAttribute.local_address: lambda: convert(
|
||||
self._raw_socket.getsockname()
|
||||
),
|
||||
SocketAttribute.raw_socket: lambda: self._raw_socket,
|
||||
}
|
||||
try:
|
||||
peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
|
||||
except OSError:
|
||||
peername = None
|
||||
|
||||
# Provide the remote address for connected sockets
|
||||
if peername is not None:
|
||||
attributes[SocketAttribute.remote_address] = lambda: peername
|
||||
|
||||
# Provide local and remote ports for IP based sockets
|
||||
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
|
||||
attributes[SocketAttribute.local_port] = (
|
||||
lambda: self._raw_socket.getsockname()[1]
|
||||
)
|
||||
if peername is not None:
|
||||
remote_port = peername[1]
|
||||
attributes[SocketAttribute.remote_port] = lambda: remote_port
|
||||
|
||||
return attributes
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _raw_socket(self) -> socket.socket:
|
||||
pass
|
||||
|
||||
|
||||
class SocketStream(ByteStream, _SocketProvider):
|
||||
"""
|
||||
Transports bytes over a socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
|
||||
class UNIXSocketStream(SocketStream):
|
||||
@abstractmethod
|
||||
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
|
||||
"""
|
||||
Send file descriptors along with a message to the peer.
|
||||
|
||||
:param message: a non-empty bytestring
|
||||
:param fds: a collection of files (either numeric file descriptors or open file
|
||||
or socket objects)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
|
||||
"""
|
||||
Receive file descriptors along with a message from the peer.
|
||||
|
||||
:param msglen: length of the message to expect from the peer
|
||||
:param maxfds: maximum number of file descriptors to expect from the peer
|
||||
:return: a tuple of (message, file descriptors)
|
||||
"""
|
||||
|
||||
|
||||
class SocketListener(Listener[SocketStream], _SocketProvider):
|
||||
"""
|
||||
Listens to incoming socket connections.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def accept(self) -> SocketStream:
|
||||
"""Accept an incoming connection."""
|
||||
|
||||
async def serve(
|
||||
self,
|
||||
handler: Callable[[SocketStream], Any],
|
||||
task_group: TaskGroup | None = None,
|
||||
) -> None:
|
||||
from .. import create_task_group
|
||||
|
||||
async with AsyncExitStack() as stack:
|
||||
if task_group is None:
|
||||
task_group = await stack.enter_async_context(create_task_group())
|
||||
|
||||
while True:
|
||||
stream = await self.accept()
|
||||
task_group.start_soon(handler, stream)
|
||||
|
||||
|
||||
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
|
||||
"""
|
||||
Represents an unconnected UDP socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
async def sendto(self, data: bytes, host: str, port: int) -> None:
|
||||
"""
|
||||
Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).
|
||||
|
||||
"""
|
||||
return await self.send((data, (host, port)))
|
||||
|
||||
|
||||
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
|
||||
"""
|
||||
Represents an connected UDP socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
|
||||
class UNIXDatagramSocket(
|
||||
UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider
|
||||
):
|
||||
"""
|
||||
Represents an unconnected Unix datagram socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
async def sendto(self, data: bytes, path: str) -> None:
|
||||
"""Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""
|
||||
return await self.send((data, path))
|
||||
|
||||
|
||||
class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):
|
||||
"""
|
||||
Represents a connected Unix datagram socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
203
myenv/lib/python3.12/site-packages/anyio/abc/_streams.py
Normal file
203
myenv/lib/python3.12/site-packages/anyio/abc/_streams.py
Normal file
@@ -0,0 +1,203 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Generic, TypeVar, Union
|
||||
|
||||
from .._core._exceptions import EndOfStream
|
||||
from .._core._typedattr import TypedAttributeProvider
|
||||
from ._resources import AsyncResource
|
||||
from ._tasks import TaskGroup
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
|
||||
|
||||
class UnreliableObjectReceiveStream(
|
||||
Generic[T_co], AsyncResource, TypedAttributeProvider
|
||||
):
|
||||
"""
|
||||
An interface for receiving objects.
|
||||
|
||||
This interface makes no guarantees that the received messages arrive in the order in
|
||||
which they were sent, or that no messages are missed.
|
||||
|
||||
Asynchronously iterating over objects of this type will yield objects matching the
|
||||
given type parameter.
|
||||
"""
|
||||
|
||||
def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> T_co:
|
||||
try:
|
||||
return await self.receive()
|
||||
except EndOfStream:
|
||||
raise StopAsyncIteration
|
||||
|
||||
@abstractmethod
|
||||
async def receive(self) -> T_co:
|
||||
"""
|
||||
Receive the next item.
|
||||
|
||||
:raises ~anyio.ClosedResourceError: if the receive stream has been explicitly
|
||||
closed
|
||||
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
||||
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
||||
due to external causes
|
||||
"""
|
||||
|
||||
|
||||
class UnreliableObjectSendStream(
|
||||
Generic[T_contra], AsyncResource, TypedAttributeProvider
|
||||
):
|
||||
"""
|
||||
An interface for sending objects.
|
||||
|
||||
This interface makes no guarantees that the messages sent will reach the
|
||||
recipient(s) in the same order in which they were sent, or at all.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item to the peer(s).
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if the send stream has been explicitly
|
||||
closed
|
||||
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
||||
due to external causes
|
||||
"""
|
||||
|
||||
|
||||
class UnreliableObjectStream(
|
||||
UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]
|
||||
):
|
||||
"""
|
||||
A bidirectional message stream which does not guarantee the order or reliability of
|
||||
message delivery.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):
|
||||
"""
|
||||
A receive message stream which guarantees that messages are received in the same
|
||||
order in which they were sent, and that no messages are missed.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectSendStream(UnreliableObjectSendStream[T_contra]):
|
||||
"""
|
||||
A send message stream which guarantees that messages are delivered in the same order
|
||||
in which they were sent, without missing any messages in the middle.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectStream(
|
||||
ObjectReceiveStream[T_Item],
|
||||
ObjectSendStream[T_Item],
|
||||
UnreliableObjectStream[T_Item],
|
||||
):
|
||||
"""
|
||||
A bidirectional message stream which guarantees the order and reliability of message
|
||||
delivery.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def send_eof(self) -> None:
|
||||
"""
|
||||
Send an end-of-file indication to the peer.
|
||||
|
||||
You should not try to send any further data to this stream after calling this
|
||||
method. This method is idempotent (does nothing on successive calls).
|
||||
"""
|
||||
|
||||
|
||||
class ByteReceiveStream(AsyncResource, TypedAttributeProvider):
|
||||
"""
|
||||
An interface for receiving bytes from a single peer.
|
||||
|
||||
Iterating this byte stream will yield a byte string of arbitrary length, but no more
|
||||
than 65536 bytes.
|
||||
"""
|
||||
|
||||
def __aiter__(self) -> ByteReceiveStream:
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> bytes:
|
||||
try:
|
||||
return await self.receive()
|
||||
except EndOfStream:
|
||||
raise StopAsyncIteration
|
||||
|
||||
@abstractmethod
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
"""
|
||||
Receive at most ``max_bytes`` bytes from the peer.
|
||||
|
||||
.. note:: Implementors of this interface should not return an empty
|
||||
:class:`bytes` object, and users should ignore them.
|
||||
|
||||
:param max_bytes: maximum number of bytes to receive
|
||||
:return: the received bytes
|
||||
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
||||
"""
|
||||
|
||||
|
||||
class ByteSendStream(AsyncResource, TypedAttributeProvider):
|
||||
"""An interface for sending bytes to a single peer."""
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, item: bytes) -> None:
|
||||
"""
|
||||
Send the given bytes to the peer.
|
||||
|
||||
:param item: the bytes to send
|
||||
"""
|
||||
|
||||
|
||||
class ByteStream(ByteReceiveStream, ByteSendStream):
|
||||
"""A bidirectional byte stream."""
|
||||
|
||||
@abstractmethod
|
||||
async def send_eof(self) -> None:
|
||||
"""
|
||||
Send an end-of-file indication to the peer.
|
||||
|
||||
You should not try to send any further data to this stream after calling this
|
||||
method. This method is idempotent (does nothing on successive calls).
|
||||
"""
|
||||
|
||||
|
||||
#: Type alias for all unreliable bytes-oriented receive streams.
|
||||
AnyUnreliableByteReceiveStream = Union[
|
||||
UnreliableObjectReceiveStream[bytes], ByteReceiveStream
|
||||
]
|
||||
#: Type alias for all unreliable bytes-oriented send streams.
|
||||
AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream]
|
||||
#: Type alias for all unreliable bytes-oriented streams.
|
||||
AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream]
|
||||
#: Type alias for all bytes-oriented receive streams.
|
||||
AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream]
|
||||
#: Type alias for all bytes-oriented send streams.
|
||||
AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream]
|
||||
#: Type alias for all bytes-oriented streams.
|
||||
AnyByteStream = Union[ObjectStream[bytes], ByteStream]
|
||||
|
||||
|
||||
class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):
|
||||
"""An interface for objects that let you accept incoming connections."""
|
||||
|
||||
@abstractmethod
|
||||
async def serve(
|
||||
self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Accept incoming connections as they come in and start tasks to handle them.
|
||||
|
||||
:param handler: a callable that will be used to handle each accepted connection
|
||||
:param task_group: the task group that will be used to start tasks for handling
|
||||
each accepted connection (if omitted, an ad-hoc task group will be created)
|
||||
"""
|
||||
@@ -0,0 +1,79 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from signal import Signals
|
||||
|
||||
from ._resources import AsyncResource
|
||||
from ._streams import ByteReceiveStream, ByteSendStream
|
||||
|
||||
|
||||
class Process(AsyncResource):
|
||||
"""An asynchronous version of :class:`subprocess.Popen`."""
|
||||
|
||||
@abstractmethod
|
||||
async def wait(self) -> int:
|
||||
"""
|
||||
Wait until the process exits.
|
||||
|
||||
:return: the exit code of the process
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def terminate(self) -> None:
|
||||
"""
|
||||
Terminates the process, gracefully if possible.
|
||||
|
||||
On Windows, this calls ``TerminateProcess()``.
|
||||
On POSIX systems, this sends ``SIGTERM`` to the process.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.terminate`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def kill(self) -> None:
|
||||
"""
|
||||
Kills the process.
|
||||
|
||||
On Windows, this calls ``TerminateProcess()``.
|
||||
On POSIX systems, this sends ``SIGKILL`` to the process.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.kill`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def send_signal(self, signal: Signals) -> None:
|
||||
"""
|
||||
Send a signal to the subprocess.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.send_signal`
|
||||
|
||||
:param signal: the signal number (e.g. :data:`signal.SIGHUP`)
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def pid(self) -> int:
|
||||
"""The process ID of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def returncode(self) -> int | None:
|
||||
"""
|
||||
The return code of the process. If the process has not yet terminated, this will
|
||||
be ``None``.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdin(self) -> ByteSendStream | None:
|
||||
"""The stream for the standard input of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdout(self) -> ByteReceiveStream | None:
|
||||
"""The stream for the standard output of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stderr(self) -> ByteReceiveStream | None:
|
||||
"""The stream for the standard error output of the process."""
|
||||
95
myenv/lib/python3.12/site-packages/anyio/abc/_tasks.py
Normal file
95
myenv/lib/python3.12/site-packages/anyio/abc/_tasks.py
Normal file
@@ -0,0 +1,95 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import Awaitable, Callable
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .._core._tasks import CancelScope
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
class TaskStatus(Protocol[T_contra]):
|
||||
@overload
|
||||
def started(self: TaskStatus[None]) -> None: ...
|
||||
|
||||
@overload
|
||||
def started(self, value: T_contra) -> None: ...
|
||||
|
||||
def started(self, value: T_contra | None = None) -> None:
|
||||
"""
|
||||
Signal that the task has started.
|
||||
|
||||
:param value: object passed back to the starter of the task
|
||||
"""
|
||||
|
||||
|
||||
class TaskGroup(metaclass=ABCMeta):
|
||||
"""
|
||||
Groups several asynchronous tasks together.
|
||||
|
||||
:ivar cancel_scope: the cancel scope inherited by all child tasks
|
||||
:vartype cancel_scope: CancelScope
|
||||
"""
|
||||
|
||||
cancel_scope: CancelScope
|
||||
|
||||
@abstractmethod
|
||||
def start_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> None:
|
||||
"""
|
||||
Start a new task in this task group.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to call the function with
|
||||
:param name: name of the task, for the purposes of introspection and debugging
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def start(
|
||||
self,
|
||||
func: Callable[..., Awaitable[Any]],
|
||||
*args: object,
|
||||
name: object = None,
|
||||
) -> Any:
|
||||
"""
|
||||
Start a new task and wait until it signals for readiness.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to call the function with
|
||||
:param name: name of the task, for the purposes of introspection and debugging
|
||||
:return: the value passed to ``task_status.started()``
|
||||
:raises RuntimeError: if the task finishes without calling
|
||||
``task_status.started()``
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def __aenter__(self) -> TaskGroup:
|
||||
"""Enter the task group context and allow starting new tasks."""
|
||||
|
||||
@abstractmethod
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
"""Exit the task group context waiting for all tasks to finish."""
|
||||
65
myenv/lib/python3.12/site-packages/anyio/abc/_testing.py
Normal file
65
myenv/lib/python3.12/site-packages/anyio/abc/_testing.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import types
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable
|
||||
from typing import Any, TypeVar
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
class TestRunner(metaclass=ABCMeta):
|
||||
"""
|
||||
Encapsulates a running event loop. Every call made through this object will use the
|
||||
same event loop.
|
||||
"""
|
||||
|
||||
def __enter__(self) -> TestRunner:
|
||||
return self
|
||||
|
||||
@abstractmethod
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: types.TracebackType | None,
|
||||
) -> bool | None: ...
|
||||
|
||||
@abstractmethod
|
||||
def run_asyncgen_fixture(
|
||||
self,
|
||||
fixture_func: Callable[..., AsyncGenerator[_T, Any]],
|
||||
kwargs: dict[str, Any],
|
||||
) -> Iterable[_T]:
|
||||
"""
|
||||
Run an async generator fixture.
|
||||
|
||||
:param fixture_func: the fixture function
|
||||
:param kwargs: keyword arguments to call the fixture function with
|
||||
:return: an iterator yielding the value yielded from the async generator
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def run_fixture(
|
||||
self,
|
||||
fixture_func: Callable[..., Coroutine[Any, Any, _T]],
|
||||
kwargs: dict[str, Any],
|
||||
) -> _T:
|
||||
"""
|
||||
Run an async fixture.
|
||||
|
||||
:param fixture_func: the fixture function
|
||||
:param kwargs: keyword arguments to call the fixture function with
|
||||
:return: the return value of the fixture function
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def run_test(
|
||||
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Run an async test function.
|
||||
|
||||
:param test_func: the test function
|
||||
:param kwargs: keyword arguments to call the test function with
|
||||
"""
|
||||
530
myenv/lib/python3.12/site-packages/anyio/from_thread.py
Normal file
530
myenv/lib/python3.12/site-packages/anyio/from_thread.py
Normal file
@@ -0,0 +1,530 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import threading
|
||||
from collections.abc import Awaitable, Callable, Generator
|
||||
from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait
|
||||
from contextlib import AbstractContextManager, contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from inspect import isawaitable
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncContextManager,
|
||||
ContextManager,
|
||||
Generic,
|
||||
Iterable,
|
||||
TypeVar,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
|
||||
from ._core import _eventloop
|
||||
from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals
|
||||
from ._core._synchronization import Event
|
||||
from ._core._tasks import CancelScope, create_task_group
|
||||
from .abc import AsyncBackend
|
||||
from .abc._tasks import TaskStatus
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
def run(
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call a coroutine function from a worker thread.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments for the callable
|
||||
:return: the return value of the coroutine function
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend = threadlocals.current_async_backend
|
||||
token = threadlocals.current_token
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
return async_backend.run_async_from_thread(func, args, token=token)
|
||||
|
||||
|
||||
def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call a function in the event loop thread from a worker thread.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:return: the return value of the callable
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend = threadlocals.current_async_backend
|
||||
token = threadlocals.current_token
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
return async_backend.run_sync_from_thread(func, args, token=token)
|
||||
|
||||
|
||||
class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
|
||||
_enter_future: Future[T_co]
|
||||
_exit_future: Future[bool | None]
|
||||
_exit_event: Event
|
||||
_exit_exc_info: tuple[
|
||||
type[BaseException] | None, BaseException | None, TracebackType | None
|
||||
] = (None, None, None)
|
||||
|
||||
def __init__(self, async_cm: AsyncContextManager[T_co], portal: BlockingPortal):
|
||||
self._async_cm = async_cm
|
||||
self._portal = portal
|
||||
|
||||
async def run_async_cm(self) -> bool | None:
|
||||
try:
|
||||
self._exit_event = Event()
|
||||
value = await self._async_cm.__aenter__()
|
||||
except BaseException as exc:
|
||||
self._enter_future.set_exception(exc)
|
||||
raise
|
||||
else:
|
||||
self._enter_future.set_result(value)
|
||||
|
||||
try:
|
||||
# Wait for the sync context manager to exit.
|
||||
# This next statement can raise `get_cancelled_exc_class()` if
|
||||
# something went wrong in a task group in this async context
|
||||
# manager.
|
||||
await self._exit_event.wait()
|
||||
finally:
|
||||
# In case of cancellation, it could be that we end up here before
|
||||
# `_BlockingAsyncContextManager.__exit__` is called, and an
|
||||
# `_exit_exc_info` has been set.
|
||||
result = await self._async_cm.__aexit__(*self._exit_exc_info)
|
||||
return result
|
||||
|
||||
def __enter__(self) -> T_co:
|
||||
self._enter_future = Future()
|
||||
self._exit_future = self._portal.start_task_soon(self.run_async_cm)
|
||||
return self._enter_future.result()
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
__exc_type: type[BaseException] | None,
|
||||
__exc_value: BaseException | None,
|
||||
__traceback: TracebackType | None,
|
||||
) -> bool | None:
|
||||
self._exit_exc_info = __exc_type, __exc_value, __traceback
|
||||
self._portal.call(self._exit_event.set)
|
||||
return self._exit_future.result()
|
||||
|
||||
|
||||
class _BlockingPortalTaskStatus(TaskStatus):
|
||||
def __init__(self, future: Future):
|
||||
self._future = future
|
||||
|
||||
def started(self, value: object = None) -> None:
|
||||
self._future.set_result(value)
|
||||
|
||||
|
||||
class BlockingPortal:
|
||||
"""An object that lets external threads run code in an asynchronous event loop."""
|
||||
|
||||
def __new__(cls) -> BlockingPortal:
|
||||
return get_async_backend().create_blocking_portal()
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._event_loop_thread_id: int | None = threading.get_ident()
|
||||
self._stop_event = Event()
|
||||
self._task_group = create_task_group()
|
||||
self._cancelled_exc_class = get_cancelled_exc_class()
|
||||
|
||||
async def __aenter__(self) -> BlockingPortal:
|
||||
await self._task_group.__aenter__()
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
await self.stop()
|
||||
return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
def _check_running(self) -> None:
|
||||
if self._event_loop_thread_id is None:
|
||||
raise RuntimeError("This portal is not running")
|
||||
if self._event_loop_thread_id == threading.get_ident():
|
||||
raise RuntimeError(
|
||||
"This method cannot be called from the event loop thread"
|
||||
)
|
||||
|
||||
async def sleep_until_stopped(self) -> None:
|
||||
"""Sleep until :meth:`stop` is called."""
|
||||
await self._stop_event.wait()
|
||||
|
||||
async def stop(self, cancel_remaining: bool = False) -> None:
|
||||
"""
|
||||
Signal the portal to shut down.
|
||||
|
||||
This marks the portal as no longer accepting new calls and exits from
|
||||
:meth:`sleep_until_stopped`.
|
||||
|
||||
:param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
|
||||
to let them finish before returning
|
||||
|
||||
"""
|
||||
self._event_loop_thread_id = None
|
||||
self._stop_event.set()
|
||||
if cancel_remaining:
|
||||
self._task_group.cancel_scope.cancel()
|
||||
|
||||
async def _call_func(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
future: Future[T_Retval],
|
||||
) -> None:
|
||||
def callback(f: Future[T_Retval]) -> None:
|
||||
if f.cancelled() and self._event_loop_thread_id not in (
|
||||
None,
|
||||
threading.get_ident(),
|
||||
):
|
||||
self.call(scope.cancel)
|
||||
|
||||
try:
|
||||
retval_or_awaitable = func(*args, **kwargs)
|
||||
if isawaitable(retval_or_awaitable):
|
||||
with CancelScope() as scope:
|
||||
if future.cancelled():
|
||||
scope.cancel()
|
||||
else:
|
||||
future.add_done_callback(callback)
|
||||
|
||||
retval = await retval_or_awaitable
|
||||
else:
|
||||
retval = retval_or_awaitable
|
||||
except self._cancelled_exc_class:
|
||||
future.cancel()
|
||||
future.set_running_or_notify_cancel()
|
||||
except BaseException as exc:
|
||||
if not future.cancelled():
|
||||
future.set_exception(exc)
|
||||
|
||||
# Let base exceptions fall through
|
||||
if not isinstance(exc, Exception):
|
||||
raise
|
||||
else:
|
||||
if not future.cancelled():
|
||||
future.set_result(retval)
|
||||
finally:
|
||||
scope = None # type: ignore[assignment]
|
||||
|
||||
def _spawn_task_from_thread(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
name: object,
|
||||
future: Future[T_Retval],
|
||||
) -> None:
|
||||
"""
|
||||
Spawn a new task using the given callable.
|
||||
|
||||
Implementors must ensure that the future is resolved when the task finishes.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments to be passed to the callable
|
||||
:param kwargs: keyword arguments to be passed to the callable
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:param future: a future that will resolve to the return value of the callable,
|
||||
or the exception raised during its execution
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@overload
|
||||
def call(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
) -> T_Retval: ...
|
||||
|
||||
@overload
|
||||
def call(
|
||||
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval: ...
|
||||
|
||||
def call(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function in the event loop thread.
|
||||
|
||||
If the callable returns a coroutine object, it is awaited on.
|
||||
|
||||
:param func: any callable
|
||||
:raises RuntimeError: if the portal is not running or if this method is called
|
||||
from within the event loop thread
|
||||
|
||||
"""
|
||||
return cast(T_Retval, self.start_task_soon(func, *args).result())
|
||||
|
||||
@overload
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]: ...
|
||||
|
||||
@overload
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]: ...
|
||||
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]:
|
||||
"""
|
||||
Start a task in the portal's task group.
|
||||
|
||||
The task will be run inside a cancel scope which can be cancelled by cancelling
|
||||
the returned future.
|
||||
|
||||
:param func: the target function
|
||||
:param args: positional arguments passed to ``func``
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:return: a future that resolves with the return value of the callable if the
|
||||
task completes successfully, or with the exception raised in the task
|
||||
:raises RuntimeError: if the portal is not running or if this method is called
|
||||
from within the event loop thread
|
||||
:rtype: concurrent.futures.Future[T_Retval]
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
self._check_running()
|
||||
f: Future[T_Retval] = Future()
|
||||
self._spawn_task_from_thread(func, args, {}, name, f)
|
||||
return f
|
||||
|
||||
def start_task(
|
||||
self,
|
||||
func: Callable[..., Awaitable[T_Retval]],
|
||||
*args: object,
|
||||
name: object = None,
|
||||
) -> tuple[Future[T_Retval], Any]:
|
||||
"""
|
||||
Start a task in the portal's task group and wait until it signals for readiness.
|
||||
|
||||
This method works the same way as :meth:`.abc.TaskGroup.start`.
|
||||
|
||||
:param func: the target function
|
||||
:param args: positional arguments passed to ``func``
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:return: a tuple of (future, task_status_value) where the ``task_status_value``
|
||||
is the value passed to ``task_status.started()`` from within the target
|
||||
function
|
||||
:rtype: tuple[concurrent.futures.Future[T_Retval], Any]
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
|
||||
def task_done(future: Future[T_Retval]) -> None:
|
||||
if not task_status_future.done():
|
||||
if future.cancelled():
|
||||
task_status_future.cancel()
|
||||
elif future.exception():
|
||||
task_status_future.set_exception(future.exception())
|
||||
else:
|
||||
exc = RuntimeError(
|
||||
"Task exited without calling task_status.started()"
|
||||
)
|
||||
task_status_future.set_exception(exc)
|
||||
|
||||
self._check_running()
|
||||
task_status_future: Future = Future()
|
||||
task_status = _BlockingPortalTaskStatus(task_status_future)
|
||||
f: Future = Future()
|
||||
f.add_done_callback(task_done)
|
||||
self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
|
||||
return f, task_status_future.result()
|
||||
|
||||
def wrap_async_context_manager(
|
||||
self, cm: AsyncContextManager[T_co]
|
||||
) -> ContextManager[T_co]:
|
||||
"""
|
||||
Wrap an async context manager as a synchronous context manager via this portal.
|
||||
|
||||
Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
|
||||
in the middle until the synchronous context manager exits.
|
||||
|
||||
:param cm: an asynchronous context manager
|
||||
:return: a synchronous context manager
|
||||
|
||||
.. versionadded:: 2.1
|
||||
|
||||
"""
|
||||
return _BlockingAsyncContextManager(cm, self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BlockingPortalProvider:
|
||||
"""
|
||||
A manager for a blocking portal. Used as a context manager. The first thread to
|
||||
enter this context manager causes a blocking portal to be started with the specific
|
||||
parameters, and the last thread to exit causes the portal to be shut down. Thus,
|
||||
there will be exactly one blocking portal running in this context as long as at
|
||||
least one thread has entered this context manager.
|
||||
|
||||
The parameters are the same as for :func:`~anyio.run`.
|
||||
|
||||
:param backend: name of the backend
|
||||
:param backend_options: backend options
|
||||
|
||||
.. versionadded:: 4.4
|
||||
"""
|
||||
|
||||
backend: str = "asyncio"
|
||||
backend_options: dict[str, Any] | None = None
|
||||
_lock: threading.Lock = field(init=False, default_factory=threading.Lock)
|
||||
_leases: int = field(init=False, default=0)
|
||||
_portal: BlockingPortal = field(init=False)
|
||||
_portal_cm: AbstractContextManager[BlockingPortal] | None = field(
|
||||
init=False, default=None
|
||||
)
|
||||
|
||||
def __enter__(self) -> BlockingPortal:
|
||||
with self._lock:
|
||||
if self._portal_cm is None:
|
||||
self._portal_cm = start_blocking_portal(
|
||||
self.backend, self.backend_options
|
||||
)
|
||||
self._portal = self._portal_cm.__enter__()
|
||||
|
||||
self._leases += 1
|
||||
return self._portal
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
portal_cm: AbstractContextManager[BlockingPortal] | None = None
|
||||
with self._lock:
|
||||
assert self._portal_cm
|
||||
assert self._leases > 0
|
||||
self._leases -= 1
|
||||
if not self._leases:
|
||||
portal_cm = self._portal_cm
|
||||
self._portal_cm = None
|
||||
del self._portal
|
||||
|
||||
if portal_cm:
|
||||
portal_cm.__exit__(None, None, None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def start_blocking_portal(
|
||||
backend: str = "asyncio", backend_options: dict[str, Any] | None = None
|
||||
) -> Generator[BlockingPortal, Any, None]:
|
||||
"""
|
||||
Start a new event loop in a new thread and run a blocking portal in its main task.
|
||||
|
||||
The parameters are the same as for :func:`~anyio.run`.
|
||||
|
||||
:param backend: name of the backend
|
||||
:param backend_options: backend options
|
||||
:return: a context manager that yields a blocking portal
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Usage as a context manager is now required.
|
||||
|
||||
"""
|
||||
|
||||
async def run_portal() -> None:
|
||||
async with BlockingPortal() as portal_:
|
||||
if future.set_running_or_notify_cancel():
|
||||
future.set_result(portal_)
|
||||
await portal_.sleep_until_stopped()
|
||||
|
||||
future: Future[BlockingPortal] = Future()
|
||||
with ThreadPoolExecutor(1) as executor:
|
||||
run_future = executor.submit(
|
||||
_eventloop.run, # type: ignore[arg-type]
|
||||
run_portal,
|
||||
backend=backend,
|
||||
backend_options=backend_options,
|
||||
)
|
||||
try:
|
||||
wait(
|
||||
cast(Iterable[Future], [run_future, future]),
|
||||
return_when=FIRST_COMPLETED,
|
||||
)
|
||||
except BaseException:
|
||||
future.cancel()
|
||||
run_future.cancel()
|
||||
raise
|
||||
|
||||
if future.done():
|
||||
portal = future.result()
|
||||
cancel_remaining_tasks = False
|
||||
try:
|
||||
yield portal
|
||||
except BaseException:
|
||||
cancel_remaining_tasks = True
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
portal.call(portal.stop, cancel_remaining_tasks)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
run_future.result()
|
||||
|
||||
|
||||
def check_cancelled() -> None:
|
||||
"""
|
||||
Check if the cancel scope of the host task's running the current worker thread has
|
||||
been cancelled.
|
||||
|
||||
If the host task's current cancel scope has indeed been cancelled, the
|
||||
backend-specific cancellation exception will be raised.
|
||||
|
||||
:raises RuntimeError: if the current thread was not spawned by
|
||||
:func:`.to_thread.run_sync`
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend: AsyncBackend = threadlocals.current_async_backend
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
async_backend.check_cancelled()
|
||||
161
myenv/lib/python3.12/site-packages/anyio/lowlevel.py
Normal file
161
myenv/lib/python3.12/site-packages/anyio/lowlevel.py
Normal file
@@ -0,0 +1,161 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Generic, Literal, TypeVar, overload
|
||||
from weakref import WeakKeyDictionary
|
||||
|
||||
from ._core._eventloop import get_async_backend
|
||||
|
||||
T = TypeVar("T")
|
||||
D = TypeVar("D")
|
||||
|
||||
|
||||
async def checkpoint() -> None:
|
||||
"""
|
||||
Check for cancellation and allow the scheduler to switch to another task.
|
||||
|
||||
Equivalent to (but more efficient than)::
|
||||
|
||||
await checkpoint_if_cancelled()
|
||||
await cancel_shielded_checkpoint()
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().checkpoint()
|
||||
|
||||
|
||||
async def checkpoint_if_cancelled() -> None:
|
||||
"""
|
||||
Enter a checkpoint if the enclosing cancel scope has been cancelled.
|
||||
|
||||
This does not allow the scheduler to switch to a different task.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().checkpoint_if_cancelled()
|
||||
|
||||
|
||||
async def cancel_shielded_checkpoint() -> None:
|
||||
"""
|
||||
Allow the scheduler to switch to another task but without checking for cancellation.
|
||||
|
||||
Equivalent to (but potentially more efficient than)::
|
||||
|
||||
with CancelScope(shield=True):
|
||||
await checkpoint()
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().cancel_shielded_checkpoint()
|
||||
|
||||
|
||||
def current_token() -> object:
|
||||
"""
|
||||
Return a backend specific token object that can be used to get back to the event
|
||||
loop.
|
||||
|
||||
"""
|
||||
return get_async_backend().current_token()
|
||||
|
||||
|
||||
_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
|
||||
_token_wrappers: dict[Any, _TokenWrapper] = {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class _TokenWrapper:
|
||||
__slots__ = "_token", "__weakref__"
|
||||
_token: object
|
||||
|
||||
|
||||
class _NoValueSet(enum.Enum):
|
||||
NO_VALUE_SET = enum.auto()
|
||||
|
||||
|
||||
class RunvarToken(Generic[T]):
|
||||
__slots__ = "_var", "_value", "_redeemed"
|
||||
|
||||
def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
|
||||
self._var = var
|
||||
self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
|
||||
self._redeemed = False
|
||||
|
||||
|
||||
class RunVar(Generic[T]):
|
||||
"""
|
||||
Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
|
||||
"""
|
||||
|
||||
__slots__ = "_name", "_default"
|
||||
|
||||
NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
|
||||
|
||||
_token_wrappers: set[_TokenWrapper] = set()
|
||||
|
||||
def __init__(
|
||||
self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
||||
):
|
||||
self._name = name
|
||||
self._default = default
|
||||
|
||||
@property
|
||||
def _current_vars(self) -> dict[str, T]:
|
||||
token = current_token()
|
||||
try:
|
||||
return _run_vars[token]
|
||||
except KeyError:
|
||||
run_vars = _run_vars[token] = {}
|
||||
return run_vars
|
||||
|
||||
@overload
|
||||
def get(self, default: D) -> T | D: ...
|
||||
|
||||
@overload
|
||||
def get(self) -> T: ...
|
||||
|
||||
def get(
|
||||
self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
||||
) -> T | D:
|
||||
try:
|
||||
return self._current_vars[self._name]
|
||||
except KeyError:
|
||||
if default is not RunVar.NO_VALUE_SET:
|
||||
return default
|
||||
elif self._default is not RunVar.NO_VALUE_SET:
|
||||
return self._default
|
||||
|
||||
raise LookupError(
|
||||
f'Run variable "{self._name}" has no value and no default set'
|
||||
)
|
||||
|
||||
def set(self, value: T) -> RunvarToken[T]:
|
||||
current_vars = self._current_vars
|
||||
token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
|
||||
current_vars[self._name] = value
|
||||
return token
|
||||
|
||||
def reset(self, token: RunvarToken[T]) -> None:
|
||||
if token._var is not self:
|
||||
raise ValueError("This token does not belong to this RunVar")
|
||||
|
||||
if token._redeemed:
|
||||
raise ValueError("This token has already been used")
|
||||
|
||||
if token._value is _NoValueSet.NO_VALUE_SET:
|
||||
try:
|
||||
del self._current_vars[self._name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self._current_vars[self._name] = token._value
|
||||
|
||||
token._redeemed = True
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<RunVar name={self._name!r}>"
|
||||
0
myenv/lib/python3.12/site-packages/anyio/py.typed
Normal file
0
myenv/lib/python3.12/site-packages/anyio/py.typed
Normal file
149
myenv/lib/python3.12/site-packages/anyio/pytest_plugin.py
Normal file
149
myenv/lib/python3.12/site-packages/anyio/pytest_plugin.py
Normal file
@@ -0,0 +1,149 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
from contextlib import ExitStack, contextmanager
|
||||
from inspect import isasyncgenfunction, iscoroutinefunction
|
||||
from typing import Any, Dict, Tuple, cast
|
||||
|
||||
import pytest
|
||||
import sniffio
|
||||
|
||||
from ._core._eventloop import get_all_backends, get_async_backend
|
||||
from .abc import TestRunner
|
||||
|
||||
_current_runner: TestRunner | None = None
|
||||
_runner_stack: ExitStack | None = None
|
||||
_runner_leases = 0
|
||||
|
||||
|
||||
def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
|
||||
if isinstance(backend, str):
|
||||
return backend, {}
|
||||
elif isinstance(backend, tuple) and len(backend) == 2:
|
||||
if isinstance(backend[0], str) and isinstance(backend[1], dict):
|
||||
return cast(Tuple[str, Dict[str, Any]], backend)
|
||||
|
||||
raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_runner(
|
||||
backend_name: str, backend_options: dict[str, Any]
|
||||
) -> Iterator[TestRunner]:
|
||||
global _current_runner, _runner_leases, _runner_stack
|
||||
if _current_runner is None:
|
||||
asynclib = get_async_backend(backend_name)
|
||||
_runner_stack = ExitStack()
|
||||
if sniffio.current_async_library_cvar.get(None) is None:
|
||||
# Since we're in control of the event loop, we can cache the name of the
|
||||
# async library
|
||||
token = sniffio.current_async_library_cvar.set(backend_name)
|
||||
_runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
|
||||
|
||||
backend_options = backend_options or {}
|
||||
_current_runner = _runner_stack.enter_context(
|
||||
asynclib.create_test_runner(backend_options)
|
||||
)
|
||||
|
||||
_runner_leases += 1
|
||||
try:
|
||||
yield _current_runner
|
||||
finally:
|
||||
_runner_leases -= 1
|
||||
if not _runner_leases:
|
||||
assert _runner_stack is not None
|
||||
_runner_stack.close()
|
||||
_runner_stack = _current_runner = None
|
||||
|
||||
|
||||
def pytest_configure(config: Any) -> None:
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"anyio: mark the (coroutine function) test to be run "
|
||||
"asynchronously via anyio.",
|
||||
)
|
||||
|
||||
|
||||
def pytest_fixture_setup(fixturedef: Any, request: Any) -> None:
|
||||
def wrapper(*args, anyio_backend, **kwargs): # type: ignore[no-untyped-def]
|
||||
backend_name, backend_options = extract_backend_and_options(anyio_backend)
|
||||
if has_backend_arg:
|
||||
kwargs["anyio_backend"] = anyio_backend
|
||||
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
if isasyncgenfunction(func):
|
||||
yield from runner.run_asyncgen_fixture(func, kwargs)
|
||||
else:
|
||||
yield runner.run_fixture(func, kwargs)
|
||||
|
||||
# Only apply this to coroutine functions and async generator functions in requests
|
||||
# that involve the anyio_backend fixture
|
||||
func = fixturedef.func
|
||||
if isasyncgenfunction(func) or iscoroutinefunction(func):
|
||||
if "anyio_backend" in request.fixturenames:
|
||||
has_backend_arg = "anyio_backend" in fixturedef.argnames
|
||||
fixturedef.func = wrapper
|
||||
if not has_backend_arg:
|
||||
fixturedef.argnames += ("anyio_backend",)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
|
||||
if collector.istestfunction(obj, name):
|
||||
inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
|
||||
if iscoroutinefunction(inner_func):
|
||||
marker = collector.get_closest_marker("anyio")
|
||||
own_markers = getattr(obj, "pytestmark", ())
|
||||
if marker or any(marker.name == "anyio" for marker in own_markers):
|
||||
pytest.mark.usefixtures("anyio_backend")(obj)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
|
||||
def run_with_hypothesis(**kwargs: Any) -> None:
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
runner.run_test(original_func, kwargs)
|
||||
|
||||
backend = pyfuncitem.funcargs.get("anyio_backend")
|
||||
if backend:
|
||||
backend_name, backend_options = extract_backend_and_options(backend)
|
||||
|
||||
if hasattr(pyfuncitem.obj, "hypothesis"):
|
||||
# Wrap the inner test function unless it's already wrapped
|
||||
original_func = pyfuncitem.obj.hypothesis.inner_test
|
||||
if original_func.__qualname__ != run_with_hypothesis.__qualname__:
|
||||
if iscoroutinefunction(original_func):
|
||||
pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
|
||||
|
||||
return None
|
||||
|
||||
if iscoroutinefunction(pyfuncitem.obj):
|
||||
funcargs = pyfuncitem.funcargs
|
||||
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
runner.run_test(pyfuncitem.obj, testargs)
|
||||
|
||||
return True
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", params=get_all_backends())
|
||||
def anyio_backend(request: Any) -> Any:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend_name(anyio_backend: Any) -> str:
|
||||
if isinstance(anyio_backend, str):
|
||||
return anyio_backend
|
||||
else:
|
||||
return anyio_backend[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
|
||||
if isinstance(anyio_backend, str):
|
||||
return {}
|
||||
else:
|
||||
return anyio_backend[1]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
119
myenv/lib/python3.12/site-packages/anyio/streams/buffered.py
Normal file
119
myenv/lib/python3.12/site-packages/anyio/streams/buffered.py
Normal file
@@ -0,0 +1,119 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
|
||||
from ..abc import AnyByteReceiveStream, ByteReceiveStream
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class BufferedByteReceiveStream(ByteReceiveStream):
|
||||
"""
|
||||
Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
|
||||
receiving capabilities in the form of a byte stream.
|
||||
"""
|
||||
|
||||
receive_stream: AnyByteReceiveStream
|
||||
_buffer: bytearray = field(init=False, default_factory=bytearray)
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.receive_stream.aclose()
|
||||
self._closed = True
|
||||
|
||||
@property
|
||||
def buffer(self) -> bytes:
|
||||
"""The bytes currently in the buffer."""
|
||||
return bytes(self._buffer)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.receive_stream.extra_attributes
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
if self._buffer:
|
||||
chunk = bytes(self._buffer[:max_bytes])
|
||||
del self._buffer[:max_bytes]
|
||||
return chunk
|
||||
elif isinstance(self.receive_stream, ByteReceiveStream):
|
||||
return await self.receive_stream.receive(max_bytes)
|
||||
else:
|
||||
# With a bytes-oriented object stream, we need to handle any surplus bytes
|
||||
# we get from the receive() call
|
||||
chunk = await self.receive_stream.receive()
|
||||
if len(chunk) > max_bytes:
|
||||
# Save the surplus bytes in the buffer
|
||||
self._buffer.extend(chunk[max_bytes:])
|
||||
return chunk[:max_bytes]
|
||||
else:
|
||||
return chunk
|
||||
|
||||
async def receive_exactly(self, nbytes: int) -> bytes:
|
||||
"""
|
||||
Read exactly the given amount of bytes from the stream.
|
||||
|
||||
:param nbytes: the number of bytes to read
|
||||
:return: the bytes read
|
||||
:raises ~anyio.IncompleteRead: if the stream was closed before the requested
|
||||
amount of bytes could be read from the stream
|
||||
|
||||
"""
|
||||
while True:
|
||||
remaining = nbytes - len(self._buffer)
|
||||
if remaining <= 0:
|
||||
retval = self._buffer[:nbytes]
|
||||
del self._buffer[:nbytes]
|
||||
return bytes(retval)
|
||||
|
||||
try:
|
||||
if isinstance(self.receive_stream, ByteReceiveStream):
|
||||
chunk = await self.receive_stream.receive(remaining)
|
||||
else:
|
||||
chunk = await self.receive_stream.receive()
|
||||
except EndOfStream as exc:
|
||||
raise IncompleteRead from exc
|
||||
|
||||
self._buffer.extend(chunk)
|
||||
|
||||
async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
|
||||
"""
|
||||
Read from the stream until the delimiter is found or max_bytes have been read.
|
||||
|
||||
:param delimiter: the marker to look for in the stream
|
||||
:param max_bytes: maximum number of bytes that will be read before raising
|
||||
:exc:`~anyio.DelimiterNotFound`
|
||||
:return: the bytes read (not including the delimiter)
|
||||
:raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
|
||||
was found
|
||||
:raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
|
||||
bytes read up to the maximum allowed
|
||||
|
||||
"""
|
||||
delimiter_size = len(delimiter)
|
||||
offset = 0
|
||||
while True:
|
||||
# Check if the delimiter can be found in the current buffer
|
||||
index = self._buffer.find(delimiter, offset)
|
||||
if index >= 0:
|
||||
found = self._buffer[:index]
|
||||
del self._buffer[: index + len(delimiter) :]
|
||||
return bytes(found)
|
||||
|
||||
# Check if the buffer is already at or over the limit
|
||||
if len(self._buffer) >= max_bytes:
|
||||
raise DelimiterNotFound(max_bytes)
|
||||
|
||||
# Read more data into the buffer from the socket
|
||||
try:
|
||||
data = await self.receive_stream.receive()
|
||||
except EndOfStream as exc:
|
||||
raise IncompleteRead from exc
|
||||
|
||||
# Move the offset forward and add the new data to the buffer
|
||||
offset = max(len(self._buffer) - delimiter_size + 1, 0)
|
||||
self._buffer.extend(data)
|
||||
148
myenv/lib/python3.12/site-packages/anyio/streams/file.py
Normal file
148
myenv/lib/python3.12/site-packages/anyio/streams/file.py
Normal file
@@ -0,0 +1,148 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from io import SEEK_SET, UnsupportedOperation
|
||||
from os import PathLike
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, cast
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
ClosedResourceError,
|
||||
EndOfStream,
|
||||
TypedAttributeSet,
|
||||
to_thread,
|
||||
typed_attribute,
|
||||
)
|
||||
from ..abc import ByteReceiveStream, ByteSendStream
|
||||
|
||||
|
||||
class FileStreamAttribute(TypedAttributeSet):
|
||||
#: the open file descriptor
|
||||
file: BinaryIO = typed_attribute()
|
||||
#: the path of the file on the file system, if available (file must be a real file)
|
||||
path: Path = typed_attribute()
|
||||
#: the file number, if available (file must be a real file or a TTY)
|
||||
fileno: int = typed_attribute()
|
||||
|
||||
|
||||
class _BaseFileStream:
|
||||
def __init__(self, file: BinaryIO):
|
||||
self._file = file
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await to_thread.run_sync(self._file.close)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
attributes: dict[Any, Callable[[], Any]] = {
|
||||
FileStreamAttribute.file: lambda: self._file,
|
||||
}
|
||||
|
||||
if hasattr(self._file, "name"):
|
||||
attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)
|
||||
|
||||
try:
|
||||
self._file.fileno()
|
||||
except UnsupportedOperation:
|
||||
pass
|
||||
else:
|
||||
attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
class FileReadStream(_BaseFileStream, ByteReceiveStream):
|
||||
"""
|
||||
A byte stream that reads from a file in the file system.
|
||||
|
||||
:param file: a file that has been opened for reading in binary mode
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:
|
||||
"""
|
||||
Create a file read stream by opening the given file.
|
||||
|
||||
:param path: path of the file to read from
|
||||
|
||||
"""
|
||||
file = await to_thread.run_sync(Path(path).open, "rb")
|
||||
return cls(cast(BinaryIO, file))
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
try:
|
||||
data = await to_thread.run_sync(self._file.read, max_bytes)
|
||||
except ValueError:
|
||||
raise ClosedResourceError from None
|
||||
except OSError as exc:
|
||||
raise BrokenResourceError from exc
|
||||
|
||||
if data:
|
||||
return data
|
||||
else:
|
||||
raise EndOfStream
|
||||
|
||||
async def seek(self, position: int, whence: int = SEEK_SET) -> int:
|
||||
"""
|
||||
Seek the file to the given position.
|
||||
|
||||
.. seealso:: :meth:`io.IOBase.seek`
|
||||
|
||||
.. note:: Not all file descriptors are seekable.
|
||||
|
||||
:param position: position to seek the file to
|
||||
:param whence: controls how ``position`` is interpreted
|
||||
:return: the new absolute position
|
||||
:raises OSError: if the file is not seekable
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(self._file.seek, position, whence)
|
||||
|
||||
async def tell(self) -> int:
|
||||
"""
|
||||
Return the current stream position.
|
||||
|
||||
.. note:: Not all file descriptors are seekable.
|
||||
|
||||
:return: the current absolute position
|
||||
:raises OSError: if the file is not seekable
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(self._file.tell)
|
||||
|
||||
|
||||
class FileWriteStream(_BaseFileStream, ByteSendStream):
|
||||
"""
|
||||
A byte stream that writes to a file in the file system.
|
||||
|
||||
:param file: a file that has been opened for writing in binary mode
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def from_path(
|
||||
cls, path: str | PathLike[str], append: bool = False
|
||||
) -> FileWriteStream:
|
||||
"""
|
||||
Create a file write stream by opening the given file for writing.
|
||||
|
||||
:param path: path of the file to write to
|
||||
:param append: if ``True``, open the file for appending; if ``False``, any
|
||||
existing file at the given path will be truncated
|
||||
|
||||
"""
|
||||
mode = "ab" if append else "wb"
|
||||
file = await to_thread.run_sync(Path(path).open, mode)
|
||||
return cls(cast(BinaryIO, file))
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
try:
|
||||
await to_thread.run_sync(self._file.write, item)
|
||||
except ValueError:
|
||||
raise ClosedResourceError from None
|
||||
except OSError as exc:
|
||||
raise BrokenResourceError from exc
|
||||
311
myenv/lib/python3.12/site-packages/anyio/streams/memory.py
Normal file
311
myenv/lib/python3.12/site-packages/anyio/streams/memory.py
Normal file
@@ -0,0 +1,311 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from collections import OrderedDict, deque
|
||||
from dataclasses import dataclass, field
|
||||
from types import TracebackType
|
||||
from typing import Generic, NamedTuple, TypeVar
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
ClosedResourceError,
|
||||
EndOfStream,
|
||||
WouldBlock,
|
||||
)
|
||||
from .._core._testing import TaskInfo, get_current_task
|
||||
from ..abc import Event, ObjectReceiveStream, ObjectSendStream
|
||||
from ..lowlevel import checkpoint
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
|
||||
|
||||
class MemoryObjectStreamStatistics(NamedTuple):
|
||||
current_buffer_used: int #: number of items stored in the buffer
|
||||
#: maximum number of items that can be stored on this stream (or :data:`math.inf`)
|
||||
max_buffer_size: float
|
||||
open_send_streams: int #: number of unclosed clones of the send stream
|
||||
open_receive_streams: int #: number of unclosed clones of the receive stream
|
||||
#: number of tasks blocked on :meth:`MemoryObjectSendStream.send`
|
||||
tasks_waiting_send: int
|
||||
#: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive`
|
||||
tasks_waiting_receive: int
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectItemReceiver(Generic[T_Item]):
|
||||
task_info: TaskInfo = field(init=False, default_factory=get_current_task)
|
||||
item: T_Item = field(init=False)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectStreamState(Generic[T_Item]):
|
||||
max_buffer_size: float = field()
|
||||
buffer: deque[T_Item] = field(init=False, default_factory=deque)
|
||||
open_send_channels: int = field(init=False, default=0)
|
||||
open_receive_channels: int = field(init=False, default=0)
|
||||
waiting_receivers: OrderedDict[Event, MemoryObjectItemReceiver[T_Item]] = field(
|
||||
init=False, default_factory=OrderedDict
|
||||
)
|
||||
waiting_senders: OrderedDict[Event, T_Item] = field(
|
||||
init=False, default_factory=OrderedDict
|
||||
)
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
return MemoryObjectStreamStatistics(
|
||||
len(self.buffer),
|
||||
self.max_buffer_size,
|
||||
self.open_send_channels,
|
||||
self.open_receive_channels,
|
||||
len(self.waiting_senders),
|
||||
len(self.waiting_receivers),
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]):
|
||||
_state: MemoryObjectStreamState[T_co]
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self._state.open_receive_channels += 1
|
||||
|
||||
def receive_nowait(self) -> T_co:
|
||||
"""
|
||||
Receive the next item if it can be done without waiting.
|
||||
|
||||
:return: the received item
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.EndOfStream: if the buffer is empty and this stream has been
|
||||
closed from the sending end
|
||||
:raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks
|
||||
waiting to send
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
if self._state.waiting_senders:
|
||||
# Get the item from the next sender
|
||||
send_event, item = self._state.waiting_senders.popitem(last=False)
|
||||
self._state.buffer.append(item)
|
||||
send_event.set()
|
||||
|
||||
if self._state.buffer:
|
||||
return self._state.buffer.popleft()
|
||||
elif not self._state.open_send_channels:
|
||||
raise EndOfStream
|
||||
|
||||
raise WouldBlock
|
||||
|
||||
async def receive(self) -> T_co:
|
||||
await checkpoint()
|
||||
try:
|
||||
return self.receive_nowait()
|
||||
except WouldBlock:
|
||||
# Add ourselves in the queue
|
||||
receive_event = Event()
|
||||
receiver = MemoryObjectItemReceiver[T_co]()
|
||||
self._state.waiting_receivers[receive_event] = receiver
|
||||
|
||||
try:
|
||||
await receive_event.wait()
|
||||
finally:
|
||||
self._state.waiting_receivers.pop(receive_event, None)
|
||||
|
||||
try:
|
||||
return receiver.item
|
||||
except AttributeError:
|
||||
raise EndOfStream
|
||||
|
||||
def clone(self) -> MemoryObjectReceiveStream[T_co]:
|
||||
"""
|
||||
Create a clone of this receive stream.
|
||||
|
||||
Each clone can be closed separately. Only when all clones have been closed will
|
||||
the receiving end of the memory stream be considered closed by the sending ends.
|
||||
|
||||
:return: the cloned stream
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
return MemoryObjectReceiveStream(_state=self._state)
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the stream.
|
||||
|
||||
This works the exact same way as :meth:`aclose`, but is provided as a special
|
||||
case for the benefit of synchronous callbacks.
|
||||
|
||||
"""
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
self._state.open_receive_channels -= 1
|
||||
if self._state.open_receive_channels == 0:
|
||||
send_events = list(self._state.waiting_senders.keys())
|
||||
for event in send_events:
|
||||
event.set()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
self.close()
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this stream.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return self._state.statistics()
|
||||
|
||||
def __enter__(self) -> MemoryObjectReceiveStream[T_co]:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self._closed:
|
||||
warnings.warn(
|
||||
f"Unclosed <{self.__class__.__name__}>",
|
||||
ResourceWarning,
|
||||
source=self,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]):
|
||||
_state: MemoryObjectStreamState[T_contra]
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self._state.open_send_channels += 1
|
||||
|
||||
def send_nowait(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item immediately if it can be done without waiting.
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
||||
receiving end
|
||||
:raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting
|
||||
to receive
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
if not self._state.open_receive_channels:
|
||||
raise BrokenResourceError
|
||||
|
||||
while self._state.waiting_receivers:
|
||||
receive_event, receiver = self._state.waiting_receivers.popitem(last=False)
|
||||
if not receiver.task_info.has_pending_cancellation():
|
||||
receiver.item = item
|
||||
receive_event.set()
|
||||
return
|
||||
|
||||
if len(self._state.buffer) < self._state.max_buffer_size:
|
||||
self._state.buffer.append(item)
|
||||
else:
|
||||
raise WouldBlock
|
||||
|
||||
async def send(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item to the stream.
|
||||
|
||||
If the buffer is full, this method blocks until there is again room in the
|
||||
buffer or the item can be sent directly to a receiver.
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
||||
receiving end
|
||||
|
||||
"""
|
||||
await checkpoint()
|
||||
try:
|
||||
self.send_nowait(item)
|
||||
except WouldBlock:
|
||||
# Wait until there's someone on the receiving end
|
||||
send_event = Event()
|
||||
self._state.waiting_senders[send_event] = item
|
||||
try:
|
||||
await send_event.wait()
|
||||
except BaseException:
|
||||
self._state.waiting_senders.pop(send_event, None)
|
||||
raise
|
||||
|
||||
if send_event in self._state.waiting_senders:
|
||||
del self._state.waiting_senders[send_event]
|
||||
raise BrokenResourceError from None
|
||||
|
||||
def clone(self) -> MemoryObjectSendStream[T_contra]:
|
||||
"""
|
||||
Create a clone of this send stream.
|
||||
|
||||
Each clone can be closed separately. Only when all clones have been closed will
|
||||
the sending end of the memory stream be considered closed by the receiving ends.
|
||||
|
||||
:return: the cloned stream
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
return MemoryObjectSendStream(_state=self._state)
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the stream.
|
||||
|
||||
This works the exact same way as :meth:`aclose`, but is provided as a special
|
||||
case for the benefit of synchronous callbacks.
|
||||
|
||||
"""
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
self._state.open_send_channels -= 1
|
||||
if self._state.open_send_channels == 0:
|
||||
receive_events = list(self._state.waiting_receivers.keys())
|
||||
self._state.waiting_receivers.clear()
|
||||
for event in receive_events:
|
||||
event.set()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
self.close()
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this stream.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return self._state.statistics()
|
||||
|
||||
def __enter__(self) -> MemoryObjectSendStream[T_contra]:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self._closed:
|
||||
warnings.warn(
|
||||
f"Unclosed <{self.__class__.__name__}>",
|
||||
ResourceWarning,
|
||||
source=self,
|
||||
)
|
||||
141
myenv/lib/python3.12/site-packages/anyio/streams/stapled.py
Normal file
141
myenv/lib/python3.12/site-packages/anyio/streams/stapled.py
Normal file
@@ -0,0 +1,141 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping, Sequence
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from ..abc import (
|
||||
ByteReceiveStream,
|
||||
ByteSendStream,
|
||||
ByteStream,
|
||||
Listener,
|
||||
ObjectReceiveStream,
|
||||
ObjectSendStream,
|
||||
ObjectStream,
|
||||
TaskGroup,
|
||||
)
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_Stream = TypeVar("T_Stream")
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class StapledByteStream(ByteStream):
|
||||
"""
|
||||
Combines two byte streams into a single, bidirectional byte stream.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param ByteSendStream send_stream: the sending byte stream
|
||||
:param ByteReceiveStream receive_stream: the receiving byte stream
|
||||
"""
|
||||
|
||||
send_stream: ByteSendStream
|
||||
receive_stream: ByteReceiveStream
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
return await self.receive_stream.receive(max_bytes)
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
await self.send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
await self.receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.send_stream.extra_attributes,
|
||||
**self.receive_stream.extra_attributes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
|
||||
"""
|
||||
Combines two object streams into a single, bidirectional object stream.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param ObjectSendStream send_stream: the sending object stream
|
||||
:param ObjectReceiveStream receive_stream: the receiving object stream
|
||||
"""
|
||||
|
||||
send_stream: ObjectSendStream[T_Item]
|
||||
receive_stream: ObjectReceiveStream[T_Item]
|
||||
|
||||
async def receive(self) -> T_Item:
|
||||
return await self.receive_stream.receive()
|
||||
|
||||
async def send(self, item: T_Item) -> None:
|
||||
await self.send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
await self.receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.send_stream.extra_attributes,
|
||||
**self.receive_stream.extra_attributes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MultiListener(Generic[T_Stream], Listener[T_Stream]):
|
||||
"""
|
||||
Combines multiple listeners into one, serving connections from all of them at once.
|
||||
|
||||
Any MultiListeners in the given collection of listeners will have their listeners
|
||||
moved into this one.
|
||||
|
||||
Extra attributes are provided from each listener, with each successive listener
|
||||
overriding any conflicting attributes from the previous one.
|
||||
|
||||
:param listeners: listeners to serve
|
||||
:type listeners: Sequence[Listener[T_Stream]]
|
||||
"""
|
||||
|
||||
listeners: Sequence[Listener[T_Stream]]
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
listeners: list[Listener[T_Stream]] = []
|
||||
for listener in self.listeners:
|
||||
if isinstance(listener, MultiListener):
|
||||
listeners.extend(listener.listeners)
|
||||
del listener.listeners[:] # type: ignore[attr-defined]
|
||||
else:
|
||||
listeners.append(listener)
|
||||
|
||||
self.listeners = listeners
|
||||
|
||||
async def serve(
|
||||
self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
|
||||
) -> None:
|
||||
from .. import create_task_group
|
||||
|
||||
async with create_task_group() as tg:
|
||||
for listener in self.listeners:
|
||||
tg.start_soon(listener.serve, handler, task_group)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
for listener in self.listeners:
|
||||
await listener.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
attributes: dict = {}
|
||||
for listener in self.listeners:
|
||||
attributes.update(listener.extra_attributes)
|
||||
|
||||
return attributes
|
||||
147
myenv/lib/python3.12/site-packages/anyio/streams/text.py
Normal file
147
myenv/lib/python3.12/site-packages/anyio/streams/text.py
Normal file
@@ -0,0 +1,147 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import codecs
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import InitVar, dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from ..abc import (
|
||||
AnyByteReceiveStream,
|
||||
AnyByteSendStream,
|
||||
AnyByteStream,
|
||||
ObjectReceiveStream,
|
||||
ObjectSendStream,
|
||||
ObjectStream,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextReceiveStream(ObjectReceiveStream[str]):
|
||||
"""
|
||||
Stream wrapper that decodes bytes to strings using the given encoding.
|
||||
|
||||
Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any
|
||||
completely received unicode characters as soon as they come in.
|
||||
|
||||
:param transport_stream: any bytes-based receive stream
|
||||
:param encoding: character encoding to use for decoding bytes to strings (defaults
|
||||
to ``utf-8``)
|
||||
:param errors: handling scheme for decoding errors (defaults to ``strict``; see the
|
||||
`codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteReceiveStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: InitVar[str] = "strict"
|
||||
_decoder: codecs.IncrementalDecoder = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str, errors: str) -> None:
|
||||
decoder_class = codecs.getincrementaldecoder(encoding)
|
||||
self._decoder = decoder_class(errors=errors)
|
||||
|
||||
async def receive(self) -> str:
|
||||
while True:
|
||||
chunk = await self.transport_stream.receive()
|
||||
decoded = self._decoder.decode(chunk)
|
||||
if decoded:
|
||||
return decoded
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.transport_stream.aclose()
|
||||
self._decoder.reset()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.transport_stream.extra_attributes
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextSendStream(ObjectSendStream[str]):
|
||||
"""
|
||||
Sends strings to the wrapped stream as bytes using the given encoding.
|
||||
|
||||
:param AnyByteSendStream transport_stream: any bytes-based send stream
|
||||
:param str encoding: character encoding to use for encoding strings to bytes
|
||||
(defaults to ``utf-8``)
|
||||
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
||||
the `codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteSendStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: str = "strict"
|
||||
_encoder: Callable[..., tuple[bytes, int]] = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str) -> None:
|
||||
self._encoder = codecs.getencoder(encoding)
|
||||
|
||||
async def send(self, item: str) -> None:
|
||||
encoded = self._encoder(item, self.errors)[0]
|
||||
await self.transport_stream.send(encoded)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.transport_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.transport_stream.extra_attributes
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextStream(ObjectStream[str]):
|
||||
"""
|
||||
A bidirectional stream that decodes bytes to strings on receive and encodes strings
|
||||
to bytes on send.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param AnyByteStream transport_stream: any bytes-based stream
|
||||
:param str encoding: character encoding to use for encoding/decoding strings to/from
|
||||
bytes (defaults to ``utf-8``)
|
||||
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
||||
the `codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: InitVar[str] = "strict"
|
||||
_receive_stream: TextReceiveStream = field(init=False)
|
||||
_send_stream: TextSendStream = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str, errors: str) -> None:
|
||||
self._receive_stream = TextReceiveStream(
|
||||
self.transport_stream, encoding=encoding, errors=errors
|
||||
)
|
||||
self._send_stream = TextSendStream(
|
||||
self.transport_stream, encoding=encoding, errors=errors
|
||||
)
|
||||
|
||||
async def receive(self) -> str:
|
||||
return await self._receive_stream.receive()
|
||||
|
||||
async def send(self, item: str) -> None:
|
||||
await self._send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.transport_stream.send_eof()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self._send_stream.aclose()
|
||||
await self._receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self._send_stream.extra_attributes,
|
||||
**self._receive_stream.extra_attributes,
|
||||
}
|
||||
338
myenv/lib/python3.12/site-packages/anyio/streams/tls.py
Normal file
338
myenv/lib/python3.12/site-packages/anyio/streams/tls.py
Normal file
@@ -0,0 +1,338 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import ssl
|
||||
import sys
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import dataclass
|
||||
from functools import wraps
|
||||
from typing import Any, Tuple, TypeVar
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
EndOfStream,
|
||||
aclose_forcefully,
|
||||
get_cancelled_exc_class,
|
||||
)
|
||||
from .._core._typedattr import TypedAttributeSet, typed_attribute
|
||||
from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
_PCTRTT = Tuple[Tuple[str, str], ...]
|
||||
_PCTRTTT = Tuple[_PCTRTT, ...]
|
||||
|
||||
|
||||
class TLSAttribute(TypedAttributeSet):
|
||||
"""Contains Transport Layer Security related attributes."""
|
||||
|
||||
#: the selected ALPN protocol
|
||||
alpn_protocol: str | None = typed_attribute()
|
||||
#: the channel binding for type ``tls-unique``
|
||||
channel_binding_tls_unique: bytes = typed_attribute()
|
||||
#: the selected cipher
|
||||
cipher: tuple[str, str, int] = typed_attribute()
|
||||
#: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert`
|
||||
# for more information)
|
||||
peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute()
|
||||
#: the peer certificate in binary form
|
||||
peer_certificate_binary: bytes | None = typed_attribute()
|
||||
#: ``True`` if this is the server side of the connection
|
||||
server_side: bool = typed_attribute()
|
||||
#: ciphers shared by the client during the TLS handshake (``None`` if this is the
|
||||
#: client side)
|
||||
shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute()
|
||||
#: the :class:`~ssl.SSLObject` used for encryption
|
||||
ssl_object: ssl.SSLObject = typed_attribute()
|
||||
#: ``True`` if this stream does (and expects) a closing TLS handshake when the
|
||||
#: stream is being closed
|
||||
standard_compatible: bool = typed_attribute()
|
||||
#: the TLS protocol version (e.g. ``TLSv1.2``)
|
||||
tls_version: str = typed_attribute()
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TLSStream(ByteStream):
|
||||
"""
|
||||
A stream wrapper that encrypts all sent data and decrypts received data.
|
||||
|
||||
This class has no public initializer; use :meth:`wrap` instead.
|
||||
All extra attributes from :class:`~TLSAttribute` are supported.
|
||||
|
||||
:var AnyByteStream transport_stream: the wrapped stream
|
||||
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteStream
|
||||
standard_compatible: bool
|
||||
_ssl_object: ssl.SSLObject
|
||||
_read_bio: ssl.MemoryBIO
|
||||
_write_bio: ssl.MemoryBIO
|
||||
|
||||
@classmethod
|
||||
async def wrap(
|
||||
cls,
|
||||
transport_stream: AnyByteStream,
|
||||
*,
|
||||
server_side: bool | None = None,
|
||||
hostname: str | None = None,
|
||||
ssl_context: ssl.SSLContext | None = None,
|
||||
standard_compatible: bool = True,
|
||||
) -> TLSStream:
|
||||
"""
|
||||
Wrap an existing stream with Transport Layer Security.
|
||||
|
||||
This performs a TLS handshake with the peer.
|
||||
|
||||
:param transport_stream: a bytes-transporting stream to wrap
|
||||
:param server_side: ``True`` if this is the server side of the connection,
|
||||
``False`` if this is the client side (if omitted, will be set to ``False``
|
||||
if ``hostname`` has been provided, ``False`` otherwise). Used only to create
|
||||
a default context when an explicit context has not been provided.
|
||||
:param hostname: host name of the peer (if host name checking is desired)
|
||||
:param ssl_context: the SSLContext object to use (if not provided, a secure
|
||||
default will be created)
|
||||
:param standard_compatible: if ``False``, skip the closing handshake when
|
||||
closing the connection, and don't raise an exception if the peer does the
|
||||
same
|
||||
:raises ~ssl.SSLError: if the TLS handshake fails
|
||||
|
||||
"""
|
||||
if server_side is None:
|
||||
server_side = not hostname
|
||||
|
||||
if not ssl_context:
|
||||
purpose = (
|
||||
ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH
|
||||
)
|
||||
ssl_context = ssl.create_default_context(purpose)
|
||||
|
||||
# Re-enable detection of unexpected EOFs if it was disabled by Python
|
||||
if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
|
||||
ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
|
||||
|
||||
bio_in = ssl.MemoryBIO()
|
||||
bio_out = ssl.MemoryBIO()
|
||||
ssl_object = ssl_context.wrap_bio(
|
||||
bio_in, bio_out, server_side=server_side, server_hostname=hostname
|
||||
)
|
||||
wrapper = cls(
|
||||
transport_stream=transport_stream,
|
||||
standard_compatible=standard_compatible,
|
||||
_ssl_object=ssl_object,
|
||||
_read_bio=bio_in,
|
||||
_write_bio=bio_out,
|
||||
)
|
||||
await wrapper._call_sslobject_method(ssl_object.do_handshake)
|
||||
return wrapper
|
||||
|
||||
async def _call_sslobject_method(
|
||||
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
while True:
|
||||
try:
|
||||
result = func(*args)
|
||||
except ssl.SSLWantReadError:
|
||||
try:
|
||||
# Flush any pending writes first
|
||||
if self._write_bio.pending:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
|
||||
data = await self.transport_stream.receive()
|
||||
except EndOfStream:
|
||||
self._read_bio.write_eof()
|
||||
except OSError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
raise BrokenResourceError from exc
|
||||
else:
|
||||
self._read_bio.write(data)
|
||||
except ssl.SSLWantWriteError:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
except ssl.SSLSyscallError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
raise BrokenResourceError from exc
|
||||
except ssl.SSLError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
if (
|
||||
isinstance(exc, ssl.SSLEOFError)
|
||||
or "UNEXPECTED_EOF_WHILE_READING" in exc.strerror
|
||||
):
|
||||
if self.standard_compatible:
|
||||
raise BrokenResourceError from exc
|
||||
else:
|
||||
raise EndOfStream from None
|
||||
|
||||
raise
|
||||
else:
|
||||
# Flush any pending writes first
|
||||
if self._write_bio.pending:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
|
||||
return result
|
||||
|
||||
async def unwrap(self) -> tuple[AnyByteStream, bytes]:
|
||||
"""
|
||||
Does the TLS closing handshake.
|
||||
|
||||
:return: a tuple of (wrapped byte stream, bytes left in the read buffer)
|
||||
|
||||
"""
|
||||
await self._call_sslobject_method(self._ssl_object.unwrap)
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
return self.transport_stream, self._read_bio.read()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
if self.standard_compatible:
|
||||
try:
|
||||
await self.unwrap()
|
||||
except BaseException:
|
||||
await aclose_forcefully(self.transport_stream)
|
||||
raise
|
||||
|
||||
await self.transport_stream.aclose()
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
data = await self._call_sslobject_method(self._ssl_object.read, max_bytes)
|
||||
if not data:
|
||||
raise EndOfStream
|
||||
|
||||
return data
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
await self._call_sslobject_method(self._ssl_object.write, item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
tls_version = self.extra(TLSAttribute.tls_version)
|
||||
match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version)
|
||||
if match:
|
||||
major, minor = int(match.group(1)), int(match.group(2) or 0)
|
||||
if (major, minor) < (1, 3):
|
||||
raise NotImplementedError(
|
||||
f"send_eof() requires at least TLSv1.3; current "
|
||||
f"session uses {tls_version}"
|
||||
)
|
||||
|
||||
raise NotImplementedError(
|
||||
"send_eof() has not yet been implemented for TLS streams"
|
||||
)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.transport_stream.extra_attributes,
|
||||
TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol,
|
||||
TLSAttribute.channel_binding_tls_unique: (
|
||||
self._ssl_object.get_channel_binding
|
||||
),
|
||||
TLSAttribute.cipher: self._ssl_object.cipher,
|
||||
TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False),
|
||||
TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(
|
||||
True
|
||||
),
|
||||
TLSAttribute.server_side: lambda: self._ssl_object.server_side,
|
||||
TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers()
|
||||
if self._ssl_object.server_side
|
||||
else None,
|
||||
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
||||
TLSAttribute.ssl_object: lambda: self._ssl_object,
|
||||
TLSAttribute.tls_version: self._ssl_object.version,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TLSListener(Listener[TLSStream]):
|
||||
"""
|
||||
A convenience listener that wraps another listener and auto-negotiates a TLS session
|
||||
on every accepted connection.
|
||||
|
||||
If the TLS handshake times out or raises an exception,
|
||||
:meth:`handle_handshake_error` is called to do whatever post-mortem processing is
|
||||
deemed necessary.
|
||||
|
||||
Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute.
|
||||
|
||||
:param Listener listener: the listener to wrap
|
||||
:param ssl_context: the SSL context object
|
||||
:param standard_compatible: a flag passed through to :meth:`TLSStream.wrap`
|
||||
:param handshake_timeout: time limit for the TLS handshake
|
||||
(passed to :func:`~anyio.fail_after`)
|
||||
"""
|
||||
|
||||
listener: Listener[Any]
|
||||
ssl_context: ssl.SSLContext
|
||||
standard_compatible: bool = True
|
||||
handshake_timeout: float = 30
|
||||
|
||||
@staticmethod
|
||||
async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None:
|
||||
"""
|
||||
Handle an exception raised during the TLS handshake.
|
||||
|
||||
This method does 3 things:
|
||||
|
||||
#. Forcefully closes the original stream
|
||||
#. Logs the exception (unless it was a cancellation exception) using the
|
||||
``anyio.streams.tls`` logger
|
||||
#. Reraises the exception if it was a base exception or a cancellation exception
|
||||
|
||||
:param exc: the exception
|
||||
:param stream: the original stream
|
||||
|
||||
"""
|
||||
await aclose_forcefully(stream)
|
||||
|
||||
# Log all except cancellation exceptions
|
||||
if not isinstance(exc, get_cancelled_exc_class()):
|
||||
# CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using
|
||||
# any asyncio implementation, so we explicitly pass the exception to log
|
||||
# (https://github.com/python/cpython/issues/108668). Trio does not have this
|
||||
# issue because it works around the CPython bug.
|
||||
logging.getLogger(__name__).exception(
|
||||
"Error during TLS handshake", exc_info=exc
|
||||
)
|
||||
|
||||
# Only reraise base exceptions and cancellation exceptions
|
||||
if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()):
|
||||
raise
|
||||
|
||||
async def serve(
|
||||
self,
|
||||
handler: Callable[[TLSStream], Any],
|
||||
task_group: TaskGroup | None = None,
|
||||
) -> None:
|
||||
@wraps(handler)
|
||||
async def handler_wrapper(stream: AnyByteStream) -> None:
|
||||
from .. import fail_after
|
||||
|
||||
try:
|
||||
with fail_after(self.handshake_timeout):
|
||||
wrapped_stream = await TLSStream.wrap(
|
||||
stream,
|
||||
ssl_context=self.ssl_context,
|
||||
standard_compatible=self.standard_compatible,
|
||||
)
|
||||
except BaseException as exc:
|
||||
await self.handle_handshake_error(exc, stream)
|
||||
else:
|
||||
await handler(wrapped_stream)
|
||||
|
||||
await self.listener.serve(handler_wrapper, task_group)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.listener.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
||||
}
|
||||
259
myenv/lib/python3.12/site-packages/anyio/to_process.py
Normal file
259
myenv/lib/python3.12/site-packages/anyio/to_process.py
Normal file
@@ -0,0 +1,259 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import deque
|
||||
from collections.abc import Callable
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from typing import TypeVar, cast
|
||||
|
||||
from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class
|
||||
from ._core._exceptions import BrokenWorkerProcess
|
||||
from ._core._subprocesses import open_process
|
||||
from ._core._synchronization import CapacityLimiter
|
||||
from ._core._tasks import CancelScope, fail_after
|
||||
from .abc import ByteReceiveStream, ByteSendStream, Process
|
||||
from .lowlevel import RunVar, checkpoint_if_cancelled
|
||||
from .streams.buffered import BufferedByteReceiveStream
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
WORKER_MAX_IDLE_TIME = 300 # 5 minutes
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
|
||||
_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
|
||||
"_process_pool_idle_workers"
|
||||
)
|
||||
_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
|
||||
|
||||
|
||||
async def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
cancellable: bool = False,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function with the given arguments in a worker process.
|
||||
|
||||
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
||||
cancelled, the worker process running it will be abruptly terminated using SIGKILL
|
||||
(or ``terminateProcess()`` on Windows).
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:param cancellable: ``True`` to allow cancellation of the operation while it's
|
||||
running
|
||||
:param limiter: capacity limiter to use to limit the total amount of processes
|
||||
running (if omitted, the default limiter is used)
|
||||
:return: an awaitable that yields the return value of the function.
|
||||
|
||||
"""
|
||||
|
||||
async def send_raw_command(pickled_cmd: bytes) -> object:
|
||||
try:
|
||||
await stdin.send(pickled_cmd)
|
||||
response = await buffered.receive_until(b"\n", 50)
|
||||
status, length = response.split(b" ")
|
||||
if status not in (b"RETURN", b"EXCEPTION"):
|
||||
raise RuntimeError(
|
||||
f"Worker process returned unexpected response: {response!r}"
|
||||
)
|
||||
|
||||
pickled_response = await buffered.receive_exactly(int(length))
|
||||
except BaseException as exc:
|
||||
workers.discard(process)
|
||||
try:
|
||||
process.kill()
|
||||
with CancelScope(shield=True):
|
||||
await process.aclose()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
if isinstance(exc, get_cancelled_exc_class()):
|
||||
raise
|
||||
else:
|
||||
raise BrokenWorkerProcess from exc
|
||||
|
||||
retval = pickle.loads(pickled_response)
|
||||
if status == b"EXCEPTION":
|
||||
assert isinstance(retval, BaseException)
|
||||
raise retval
|
||||
else:
|
||||
return retval
|
||||
|
||||
# First pickle the request before trying to reserve a worker process
|
||||
await checkpoint_if_cancelled()
|
||||
request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
# If this is the first run in this event loop thread, set up the necessary variables
|
||||
try:
|
||||
workers = _process_pool_workers.get()
|
||||
idle_workers = _process_pool_idle_workers.get()
|
||||
except LookupError:
|
||||
workers = set()
|
||||
idle_workers = deque()
|
||||
_process_pool_workers.set(workers)
|
||||
_process_pool_idle_workers.set(idle_workers)
|
||||
get_async_backend().setup_process_pool_exit_at_shutdown(workers)
|
||||
|
||||
async with limiter or current_default_process_limiter():
|
||||
# Pop processes from the pool (starting from the most recently used) until we
|
||||
# find one that hasn't exited yet
|
||||
process: Process
|
||||
while idle_workers:
|
||||
process, idle_since = idle_workers.pop()
|
||||
if process.returncode is None:
|
||||
stdin = cast(ByteSendStream, process.stdin)
|
||||
buffered = BufferedByteReceiveStream(
|
||||
cast(ByteReceiveStream, process.stdout)
|
||||
)
|
||||
|
||||
# Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME
|
||||
# seconds or longer
|
||||
now = current_time()
|
||||
killed_processes: list[Process] = []
|
||||
while idle_workers:
|
||||
if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
|
||||
break
|
||||
|
||||
process_to_kill, idle_since = idle_workers.popleft()
|
||||
process_to_kill.kill()
|
||||
workers.remove(process_to_kill)
|
||||
killed_processes.append(process_to_kill)
|
||||
|
||||
with CancelScope(shield=True):
|
||||
for killed_process in killed_processes:
|
||||
await killed_process.aclose()
|
||||
|
||||
break
|
||||
|
||||
workers.remove(process)
|
||||
else:
|
||||
command = [sys.executable, "-u", "-m", __name__]
|
||||
process = await open_process(
|
||||
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
|
||||
)
|
||||
try:
|
||||
stdin = cast(ByteSendStream, process.stdin)
|
||||
buffered = BufferedByteReceiveStream(
|
||||
cast(ByteReceiveStream, process.stdout)
|
||||
)
|
||||
with fail_after(20):
|
||||
message = await buffered.receive(6)
|
||||
|
||||
if message != b"READY\n":
|
||||
raise BrokenWorkerProcess(
|
||||
f"Worker process returned unexpected response: {message!r}"
|
||||
)
|
||||
|
||||
main_module_path = getattr(sys.modules["__main__"], "__file__", None)
|
||||
pickled = pickle.dumps(
|
||||
("init", sys.path, main_module_path),
|
||||
protocol=pickle.HIGHEST_PROTOCOL,
|
||||
)
|
||||
await send_raw_command(pickled)
|
||||
except (BrokenWorkerProcess, get_cancelled_exc_class()):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
process.kill()
|
||||
raise BrokenWorkerProcess(
|
||||
"Error during worker process initialization"
|
||||
) from exc
|
||||
|
||||
workers.add(process)
|
||||
|
||||
with CancelScope(shield=not cancellable):
|
||||
try:
|
||||
return cast(T_Retval, await send_raw_command(request))
|
||||
finally:
|
||||
if process in workers:
|
||||
idle_workers.append((process, current_time()))
|
||||
|
||||
|
||||
def current_default_process_limiter() -> CapacityLimiter:
|
||||
"""
|
||||
Return the capacity limiter that is used by default to limit the number of worker
|
||||
processes.
|
||||
|
||||
:return: a capacity limiter object
|
||||
|
||||
"""
|
||||
try:
|
||||
return _default_process_limiter.get()
|
||||
except LookupError:
|
||||
limiter = CapacityLimiter(os.cpu_count() or 2)
|
||||
_default_process_limiter.set(limiter)
|
||||
return limiter
|
||||
|
||||
|
||||
def process_worker() -> None:
|
||||
# Redirect standard streams to os.devnull so that user code won't interfere with the
|
||||
# parent-worker communication
|
||||
stdin = sys.stdin
|
||||
stdout = sys.stdout
|
||||
sys.stdin = open(os.devnull)
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
|
||||
stdout.buffer.write(b"READY\n")
|
||||
while True:
|
||||
retval = exception = None
|
||||
try:
|
||||
command, *args = pickle.load(stdin.buffer)
|
||||
except EOFError:
|
||||
return
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
else:
|
||||
if command == "run":
|
||||
func, args = args
|
||||
try:
|
||||
retval = func(*args)
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
elif command == "init":
|
||||
main_module_path: str | None
|
||||
sys.path, main_module_path = args
|
||||
del sys.modules["__main__"]
|
||||
if main_module_path:
|
||||
# Load the parent's main module but as __mp_main__ instead of
|
||||
# __main__ (like multiprocessing does) to avoid infinite recursion
|
||||
try:
|
||||
spec = spec_from_file_location("__mp_main__", main_module_path)
|
||||
if spec and spec.loader:
|
||||
main = module_from_spec(spec)
|
||||
spec.loader.exec_module(main)
|
||||
sys.modules["__main__"] = main
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
|
||||
try:
|
||||
if exception is not None:
|
||||
status = b"EXCEPTION"
|
||||
pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
|
||||
else:
|
||||
status = b"RETURN"
|
||||
pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
status = b"EXCEPTION"
|
||||
pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
|
||||
stdout.buffer.write(pickled)
|
||||
|
||||
# Respect SIGTERM
|
||||
if isinstance(exception, SystemExit):
|
||||
raise exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
process_worker()
|
||||
69
myenv/lib/python3.12/site-packages/anyio/to_thread.py
Normal file
69
myenv/lib/python3.12/site-packages/anyio/to_thread.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Callable
|
||||
from typing import TypeVar
|
||||
from warnings import warn
|
||||
|
||||
from ._core._eventloop import get_async_backend
|
||||
from .abc import CapacityLimiter
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
async def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
abandon_on_cancel: bool = False,
|
||||
cancellable: bool | None = None,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function with the given arguments in a worker thread.
|
||||
|
||||
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
||||
cancelled, the thread will still run its course but its return value (or any raised
|
||||
exception) will be ignored.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run
|
||||
unchecked on own) if the host task is cancelled, ``False`` to ignore
|
||||
cancellations in the host task until the operation has completed in the worker
|
||||
thread
|
||||
:param cancellable: deprecated alias of ``abandon_on_cancel``; will override
|
||||
``abandon_on_cancel`` if both parameters are passed
|
||||
:param limiter: capacity limiter to use to limit the total amount of threads running
|
||||
(if omitted, the default limiter is used)
|
||||
:return: an awaitable that yields the return value of the function.
|
||||
|
||||
"""
|
||||
if cancellable is not None:
|
||||
abandon_on_cancel = cancellable
|
||||
warn(
|
||||
"The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "
|
||||
"deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
return await get_async_backend().run_sync_in_worker_thread(
|
||||
func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter
|
||||
)
|
||||
|
||||
|
||||
def current_default_thread_limiter() -> CapacityLimiter:
|
||||
"""
|
||||
Return the capacity limiter that is used by default to limit the number of
|
||||
concurrent threads.
|
||||
|
||||
:return: a capacity limiter object
|
||||
|
||||
"""
|
||||
return get_async_backend().current_default_thread_limiter()
|
||||
Reference in New Issue
Block a user