Skip to content

Commit

Permalink
Run black -C src/trio
Browse files Browse the repository at this point in the history
  • Loading branch information
A5rocks committed Dec 10, 2024
1 parent 95679fb commit 86b17b2
Show file tree
Hide file tree
Showing 97 changed files with 683 additions and 1,680 deletions.
4 changes: 1 addition & 3 deletions src/trio/_abc.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,7 @@ async def getaddrinfo(

@abstractmethod
async def getnameinfo(
self,
sockaddr: tuple[str, int] | tuple[str, int, int, int],
flags: int,
self, sockaddr: tuple[str, int] | tuple[str, int, int, int], flags: int
) -> tuple[str, str]:
"""A custom implementation of :func:`~trio.socket.getnameinfo`.
Expand Down
13 changes: 3 additions & 10 deletions src/trio/_channel.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,7 @@

from collections import OrderedDict, deque
from math import inf
from typing import (
TYPE_CHECKING,
Generic,
)
from typing import TYPE_CHECKING, Generic

import attrs
from outcome import Error, Value
Expand Down Expand Up @@ -81,10 +78,7 @@ def _open_memory_channel(
if max_buffer_size < 0:
raise ValueError("max_buffer_size must be >= 0")
state: MemoryChannelState[T] = MemoryChannelState(max_buffer_size)
return (
MemorySendChannel[T]._create(state),
MemoryReceiveChannel[T]._create(state),
)
return (MemorySendChannel[T]._create(state), MemoryReceiveChannel[T]._create(state))


# This workaround requires python3.9+, once older python versions are not supported
Expand All @@ -94,8 +88,7 @@ def _open_memory_channel(
# written as a class so you can say open_memory_channel[int](5)
class open_memory_channel(tuple["MemorySendChannel[T]", "MemoryReceiveChannel[T]"]):
def __new__( # type: ignore[misc] # "must return a subtype"
cls,
max_buffer_size: int | float, # noqa: PYI041
cls, max_buffer_size: int | float # noqa: PYI041
) -> tuple[MemorySendChannel[T], MemoryReceiveChannel[T]]:
return _open_memory_channel(max_buffer_size)

Expand Down
20 changes: 6 additions & 14 deletions src/trio/_core/_asyncgens.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,7 @@

@_core.disable_ki_protection
def _call_without_ki_protection(
f: Callable[_P, _R],
/,
*args: _P.args,
**kwargs: _P.kwargs,
f: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs
) -> _R:
return f(*args, **kwargs)

Expand Down Expand Up @@ -81,8 +78,7 @@ def firstiter(agen: AsyncGeneratorType[object, NoReturn]) -> None:
self.prev_hooks.firstiter(agen)

def finalize_in_trio_context(
agen: AsyncGeneratorType[object, NoReturn],
agen_name: str,
agen: AsyncGeneratorType[object, NoReturn], agen_name: str
) -> None:
try:
runner.spawn_system_task(
Expand Down Expand Up @@ -110,9 +106,7 @@ def finalizer(agen: AsyncGeneratorType[object, NoReturn]) -> None:
agen_name = name_asyncgen(agen)
if is_ours:
runner.entry_queue.run_sync_soon(
finalize_in_trio_context,
agen,
agen_name,
finalize_in_trio_context, agen, agen_name
)

# Do this last, because it might raise an exception
Expand Down Expand Up @@ -151,7 +145,7 @@ def finalizer(agen: AsyncGeneratorType[object, NoReturn]) -> None:
raise RuntimeError(
f"Non-Trio async generator {agen_name!r} awaited something "
"during finalization; install a finalization hook to "
"support this, or wrap it in 'async with aclosing(...):'",
"support this, or wrap it in 'async with aclosing(...):'"
)

self.prev_hooks = sys.get_asyncgen_hooks()
Expand All @@ -174,7 +168,7 @@ async def finalize_remaining(self, runner: _run.Runner) -> None:
# them was an asyncgen finalizer that snuck in under the wire.
runner.entry_queue.run_sync_soon(runner.reschedule, runner.init_task)
await _core.wait_task_rescheduled(
lambda _: _core.Abort.FAILED, # pragma: no cover
lambda _: _core.Abort.FAILED # pragma: no cover
)
self.alive.update(self.trailing_needs_finalize)
self.trailing_needs_finalize.clear()
Expand Down Expand Up @@ -221,9 +215,7 @@ def close(self) -> None:
sys.set_asyncgen_hooks(*self.prev_hooks)

async def _finalize_one(
self,
agen: AsyncGeneratorType[object, NoReturn],
name: object,
self, agen: AsyncGeneratorType[object, NoReturn], name: object
) -> None:
try:
# This shield ensures that finalize_asyncgen never exits
Expand Down
12 changes: 3 additions & 9 deletions src/trio/_core/_concat_tb.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,29 +95,23 @@ def controller( # type: ignore[no-any-unimported]
# no missing test we could add, and no value in coverage nagging
# us about adding one.
if (
operation.opname
in {
"__getattribute__",
"__getattr__",
}
operation.opname in {"__getattribute__", "__getattr__"}
and operation.args[0] == "tb_next"
) or TYPE_CHECKING: # pragma: no cover
return tb_next
# Delegate is reverting to original behaviour
return operation.delegate() # type: ignore[no-any-return]

return cast(
TracebackType,
tputil.make_proxy(controller, type(base_tb), base_tb),
TracebackType, tputil.make_proxy(controller, type(base_tb), base_tb)
) # Returns proxy to traceback


# this is used for collapsing single-exception ExceptionGroups when using
# `strict_exception_groups=False`. Once that is retired this function and its helper can
# be removed as well.
def concat_tb(
head: TracebackType | None,
tail: TracebackType | None,
head: TracebackType | None, tail: TracebackType | None
) -> TracebackType | None:
# We have to use an iterative algorithm here, because in the worst case
# this might be a RecursionError stack that is by definition too deep to
Expand Down
2 changes: 1 addition & 1 deletion src/trio/_core/_entry_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ async def kill_everything( # noqa: RUF029 # await not used
parent_nursery = _core.current_task().parent_nursery
if parent_nursery is None:
raise AssertionError(
"Internal error: `parent_nursery` should never be `None`",
"Internal error: `parent_nursery` should never be `None`"
) from exc # pragma: no cover
parent_nursery.start_soon(kill_everything, exc)

Expand Down
11 changes: 3 additions & 8 deletions src/trio/_core/_generated_io_kqueue.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,7 @@ def current_kqueue() -> select.kqueue:

@enable_ki_protection
def monitor_kevent(
ident: int,
filter: int,
ident: int, filter: int
) -> AbstractContextManager[_core.UnboundedQueue[select.kevent]]:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -60,19 +59,15 @@ def monitor_kevent(

@enable_ki_protection
async def wait_kevent(
ident: int,
filter: int,
abort_func: Callable[[RaiseCancelT], Abort],
ident: int, filter: int, abort_func: Callable[[RaiseCancelT], Abort]
) -> Abort:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
<https://github.com/python-trio/trio/issues/26>`__.
"""
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_kevent(
ident,
filter,
abort_func,
ident, filter, abort_func
)
except AttributeError:
raise RuntimeError("must be called from async context") from None
Expand Down
19 changes: 5 additions & 14 deletions src/trio/_core/_generated_io_windows.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,18 +136,15 @@ async def wait_overlapped(handle_: int | CData, lpOverlapped: CData | int) -> ob
"""
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_overlapped(
handle_,
lpOverlapped,
handle_, lpOverlapped
)
except AttributeError:
raise RuntimeError("must be called from async context") from None


@enable_ki_protection
async def write_overlapped(
handle: int | CData,
data: Buffer,
file_offset: int = 0,
handle: int | CData, data: Buffer, file_offset: int = 0
) -> int:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -156,19 +153,15 @@ async def write_overlapped(
"""
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.write_overlapped(
handle,
data,
file_offset,
handle, data, file_offset
)
except AttributeError:
raise RuntimeError("must be called from async context") from None


@enable_ki_protection
async def readinto_overlapped(
handle: int | CData,
buffer: Buffer,
file_offset: int = 0,
handle: int | CData, buffer: Buffer, file_offset: int = 0
) -> int:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -177,9 +170,7 @@ async def readinto_overlapped(
"""
try:
return await GLOBAL_RUN_CONTEXT.runner.io_manager.readinto_overlapped(
handle,
buffer,
file_offset,
handle, buffer, file_offset
)
except AttributeError:
raise RuntimeError("must be called from async context") from None
Expand Down
5 changes: 1 addition & 4 deletions src/trio/_core/_generated_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,10 +186,7 @@ def spawn_system_task(
"""
try:
return GLOBAL_RUN_CONTEXT.runner.spawn_system_task(
async_fn,
*args,
name=name,
context=context,
async_fn, *args, name=name, context=context
)
except AttributeError:
raise RuntimeError("must be called from async context") from None
Expand Down
6 changes: 1 addition & 5 deletions src/trio/_core/_instrumentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,7 @@ def remove_instrument(self, instrument: Instrument) -> None:
if not instruments:
del self[hookname]

def call(
self,
hookname: str,
*args: object,
) -> None:
def call(self, hookname: str, *args: object) -> None:
"""Call hookname(*args) on each applicable instrument.
You must first check whether there are any instruments installed for
Expand Down
4 changes: 2 additions & 2 deletions src/trio/_core/_io_epoll.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ class EpollIOManager:
_epoll: select.epoll = attrs.Factory(lambda: select.epoll())
# {fd: EpollWaiters}
_registered: defaultdict[int, EpollWaiters] = attrs.Factory(
lambda: defaultdict(EpollWaiters),
lambda: defaultdict(EpollWaiters)
)
_force_wakeup: WakeupSocketpair = attrs.Factory(WakeupSocketpair)
_force_wakeup_fd: int | None = None
Expand Down Expand Up @@ -298,7 +298,7 @@ async def _epoll_wait(self, fd: int | _HasFileNo, attr_name: str) -> None:
waiters = self._registered[fd]
if getattr(waiters, attr_name) is not None:
raise _core.BusyResourceError(
"another task is already reading / writing this fd",
"another task is already reading / writing this fd"
)
setattr(waiters, attr_name, _core.current_task())
self._update_registrations(fd)
Expand Down
25 changes: 7 additions & 18 deletions src/trio/_core/_io_kqueue.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,7 @@ class KqueueIOManager:

def __attrs_post_init__(self) -> None:
force_wakeup_event = select.kevent(
self._force_wakeup.wakeup_sock,
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
self._force_wakeup.wakeup_sock, select.KQ_FILTER_READ, select.KQ_EV_ADD
)
self._kqueue.control([force_wakeup_event], 0)
self._force_wakeup_fd = self._force_wakeup.wakeup_sock.fileno()
Expand Down Expand Up @@ -122,9 +120,7 @@ def current_kqueue(self) -> select.kqueue:
@contextmanager
@_public
def monitor_kevent(
self,
ident: int,
filter: int,
self, ident: int, filter: int
) -> Iterator[_core.UnboundedQueue[select.kevent]]:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -133,7 +129,7 @@ def monitor_kevent(
key = (ident, filter)
if key in self._registered:
raise _core.BusyResourceError(
"attempt to register multiple listeners for same ident/filter pair",
"attempt to register multiple listeners for same ident/filter pair"
)
q = _core.UnboundedQueue[select.kevent]()
self._registered[key] = q
Expand All @@ -144,10 +140,7 @@ def monitor_kevent(

@_public
async def wait_kevent(
self,
ident: int,
filter: int,
abort_func: Callable[[RaiseCancelT], Abort],
self, ident: int, filter: int, abort_func: Callable[[RaiseCancelT], Abort]
) -> Abort:
"""TODO: these are implemented, but are currently more of a sketch than
anything real. See `#26
Expand All @@ -156,7 +149,7 @@ async def wait_kevent(
key = (ident, filter)
if key in self._registered:
raise _core.BusyResourceError(
"attempt to register multiple listeners for same ident/filter pair",
"attempt to register multiple listeners for same ident/filter pair"
)
self._registered[key] = _core.current_task()

Expand All @@ -169,11 +162,7 @@ def abort(raise_cancel: RaiseCancelT) -> Abort:
# wait_task_rescheduled does not have its return type typed
return await _core.wait_task_rescheduled(abort) # type: ignore[no-any-return]

async def _wait_common(
self,
fd: int | _HasFileNo,
filter: int,
) -> None:
async def _wait_common(self, fd: int | _HasFileNo, filter: int) -> None:
if not isinstance(fd, int):
fd = fd.fileno()
flags = select.KQ_EV_ADD | select.KQ_EV_ONESHOT
Expand Down Expand Up @@ -290,5 +279,5 @@ def notify_closing(self, fd: int | _HasFileNo) -> None:
# XX this is an interesting example of a case where being able
# to close a queue would be useful...
raise NotImplementedError(
"can't close an fd that monitor_kevent is using",
"can't close an fd that monitor_kevent is using"
)
Loading

0 comments on commit 86b17b2

Please sign in to comment.