one-ring-loop 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- one_ring_loop/__init__.py +12 -0
- one_ring_loop/_utils.py +63 -0
- one_ring_loop/cancellation.py +49 -0
- one_ring_loop/exceptions.py +2 -0
- one_ring_loop/fileio/__init__.py +40 -0
- one_ring_loop/log.py +56 -0
- one_ring_loop/loop.py +246 -0
- one_ring_loop/lowlevel.py +36 -0
- one_ring_loop/operations.py +29 -0
- one_ring_loop/py.typed +0 -0
- one_ring_loop/socketio/__init__.py +108 -0
- one_ring_loop/streams/__init__.py +0 -0
- one_ring_loop/streams/buffered.py +97 -0
- one_ring_loop/streams/exceptions.py +14 -0
- one_ring_loop/streams/memory.py +199 -0
- one_ring_loop/streams/protocols.py +29 -0
- one_ring_loop/streams/tls.py +126 -0
- one_ring_loop/sync_primitives.py +139 -0
- one_ring_loop/task/__init__.py +383 -0
- one_ring_loop/task/state.py +35 -0
- one_ring_loop/timerio/__init__.py +17 -0
- one_ring_loop/typedefs.py +18 -0
- one_ring_loop-0.1.0.dist-info/METADATA +83 -0
- one_ring_loop-0.1.0.dist-info/RECORD +25 -0
- one_ring_loop-0.1.0.dist-info/WHEEL +4 -0
one_ring_loop/_utils.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
from collections import deque
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import TYPE_CHECKING, cast
|
|
7
|
+
|
|
8
|
+
from one_ring_core.results import IOResult
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from one_ring_core.operations import IOOperation
|
|
12
|
+
from one_ring_loop.loop import Loop
|
|
13
|
+
from one_ring_loop.typedefs import Coro, TaskID
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _get_new_operation_id() -> TaskID:
|
|
17
|
+
"""Gets an unused ID to submit to the IO worker."""
|
|
18
|
+
ret = _local.free_operation_id
|
|
19
|
+
_local.free_operation_id += 1
|
|
20
|
+
|
|
21
|
+
return ret
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _execute[T: IOResult](op: IOOperation[T]) -> Coro[T]:
|
|
25
|
+
"""Unwrap an IO completion into the expected result type."""
|
|
26
|
+
expected = op.result_type
|
|
27
|
+
completion = yield cast("IOOperation[IOResult]", op)
|
|
28
|
+
if completion is not None and isinstance(result := completion.unwrap(), expected):
|
|
29
|
+
return result
|
|
30
|
+
elif completion is None:
|
|
31
|
+
raise RuntimeError("Low level coroutine was sent None")
|
|
32
|
+
|
|
33
|
+
msg = f"Expected {expected.__name__}, got {type(completion)}. Expected {expected}"
|
|
34
|
+
raise TypeError(msg)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass(kw_only=True)
|
|
38
|
+
class _Local(threading.local):
|
|
39
|
+
"""Wrapper around threading.local for proper type annotations."""
|
|
40
|
+
|
|
41
|
+
loop: Loop | None = None
|
|
42
|
+
free_operation_id: int = 1
|
|
43
|
+
|
|
44
|
+
# TODO: Move the below two to be attributes on Loop.
|
|
45
|
+
cancel_queue: deque[TaskID] = field(default_factory=deque)
|
|
46
|
+
unpark_queue: deque[TaskID] = field(default_factory=deque)
|
|
47
|
+
|
|
48
|
+
def cleanup(self) -> None:
|
|
49
|
+
"""Resets all attributes."""
|
|
50
|
+
self.loop = None
|
|
51
|
+
self.free_operation_id = 1
|
|
52
|
+
|
|
53
|
+
self.cancel_queue = deque()
|
|
54
|
+
self.unpark_queue = deque()
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
_local = _Local()
|
|
58
|
+
|
|
59
|
+
__all__ = [
|
|
60
|
+
"_execute",
|
|
61
|
+
"_get_new_operation_id",
|
|
62
|
+
"_local",
|
|
63
|
+
]
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from contextlib import contextmanager, suppress
|
|
2
|
+
from typing import TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
from one_ring_loop.exceptions import Cancelled
|
|
5
|
+
from one_ring_loop.log import get_logger
|
|
6
|
+
from one_ring_loop.task import CancelScope, _create_standalone_task
|
|
7
|
+
from one_ring_loop.timerio import sleep
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from collections.abc import Generator
|
|
11
|
+
|
|
12
|
+
from one_ring_loop.typedefs import Coro
|
|
13
|
+
|
|
14
|
+
logger = get_logger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@contextmanager
|
|
18
|
+
def fail_after(delay: float, *, shield: bool = False) -> Generator[CancelScope]:
|
|
19
|
+
"""Cancels cancel scope and throws Cancelled after delay."""
|
|
20
|
+
|
|
21
|
+
def cancellation_task(cancel_scope: CancelScope) -> Coro[None]:
|
|
22
|
+
"""Background task that sleeps for delay.
|
|
23
|
+
|
|
24
|
+
Cancels the cancel scope if not finished after sleep.
|
|
25
|
+
"""
|
|
26
|
+
with suppress(Cancelled):
|
|
27
|
+
yield from sleep(delay)
|
|
28
|
+
|
|
29
|
+
if not finished:
|
|
30
|
+
cancel_scope.cancel()
|
|
31
|
+
|
|
32
|
+
finished = False
|
|
33
|
+
with CancelScope(shielded=shield) as scope:
|
|
34
|
+
task = _create_standalone_task(cancellation_task(scope), None, None)
|
|
35
|
+
yield scope
|
|
36
|
+
task.current_cancel_scope().cancel()
|
|
37
|
+
finished = True
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@contextmanager
|
|
41
|
+
def move_on_after(delay: float, *, shield: bool = False) -> Generator[CancelScope]:
|
|
42
|
+
"""Moves on after delay by catching Cancelled."""
|
|
43
|
+
with fail_after(delay, shield=shield) as cancel_scope:
|
|
44
|
+
try:
|
|
45
|
+
yield cancel_scope
|
|
46
|
+
except Cancelled:
|
|
47
|
+
if not cancel_scope.cancelled:
|
|
48
|
+
# Another scope cancelled from above. Re-raise.
|
|
49
|
+
raise
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
from one_ring_core.operations import Close, FileOpen, FileRead, FileWrite
|
|
6
|
+
from one_ring_loop._utils import _execute
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from one_ring_loop.typedefs import Coro
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass(slots=True, kw_only=True)
|
|
13
|
+
class File:
|
|
14
|
+
"""Utility wrapper for file operations. Represents a single regular file."""
|
|
15
|
+
|
|
16
|
+
fd: int
|
|
17
|
+
|
|
18
|
+
def read(self) -> Coro[str]:
|
|
19
|
+
"""Read file low-level coroutine."""
|
|
20
|
+
result = yield from _execute(FileRead(fd=self.fd))
|
|
21
|
+
return result.content.decode()
|
|
22
|
+
|
|
23
|
+
def write(self, data: bytes | str) -> Coro[int]:
|
|
24
|
+
"""Write file low-level coroutine."""
|
|
25
|
+
_data = data.encode() if isinstance(data, str) else data
|
|
26
|
+
result = yield from _execute(FileWrite(fd=self.fd, data=_data))
|
|
27
|
+
return result.size
|
|
28
|
+
|
|
29
|
+
def close(self) -> Coro[None]:
|
|
30
|
+
"""Close file low-level coroutine."""
|
|
31
|
+
yield from _execute(Close(fd=self.fd))
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def open_file(path: str | Path, mode: str = "r") -> Coro[File]:
|
|
36
|
+
"""Open file coroutine."""
|
|
37
|
+
_path = str(path) if isinstance(path, Path) else path
|
|
38
|
+
|
|
39
|
+
result = yield from _execute(FileOpen(path=_path.encode(), mode=mode))
|
|
40
|
+
return File(fd=result.fd)
|
one_ring_loop/log.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
"""Structured logging configuration using structlog."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
import structlog
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def setup_logging() -> None:
|
|
12
|
+
"""Configure structlog with sensible defaults.
|
|
13
|
+
|
|
14
|
+
Uses JSON output when ``LOG_FORMAT=json`` (e.g. production),
|
|
15
|
+
otherwise uses colored console output for development.
|
|
16
|
+
"""
|
|
17
|
+
shared_processors: list[structlog.types.Processor] = [
|
|
18
|
+
structlog.contextvars.merge_contextvars,
|
|
19
|
+
structlog.stdlib.add_log_level,
|
|
20
|
+
# structlog.stdlib.add_logger_name,
|
|
21
|
+
structlog.processors.TimeStamper(fmt="iso"),
|
|
22
|
+
structlog.processors.StackInfoRenderer(),
|
|
23
|
+
structlog.processors.UnicodeDecoder(),
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
if os.environ.get("LOG_FORMAT") == "json":
|
|
27
|
+
renderer: structlog.types.Processor = structlog.processors.JSONRenderer()
|
|
28
|
+
else:
|
|
29
|
+
renderer = structlog.dev.ConsoleRenderer()
|
|
30
|
+
|
|
31
|
+
structlog.configure(
|
|
32
|
+
processors=[
|
|
33
|
+
*shared_processors,
|
|
34
|
+
renderer,
|
|
35
|
+
],
|
|
36
|
+
wrapper_class=structlog.make_filtering_bound_logger(0),
|
|
37
|
+
context_class=dict,
|
|
38
|
+
logger_factory=structlog.PrintLoggerFactory(file=sys.stderr),
|
|
39
|
+
cache_logger_on_first_use=True,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
_configured = False
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def get_logger(*args: object, **kwargs: object) -> structlog.stdlib.BoundLogger:
|
|
47
|
+
"""Return a structlog logger, configuring on first call.
|
|
48
|
+
|
|
49
|
+
This avoids running ``setup_logging()`` at import time, which would
|
|
50
|
+
interfere with tests and multi-process setups.
|
|
51
|
+
"""
|
|
52
|
+
global _configured # noqa: PLW0603
|
|
53
|
+
if not _configured:
|
|
54
|
+
setup_logging()
|
|
55
|
+
_configured = True
|
|
56
|
+
return structlog.get_logger(*args, **kwargs)
|
one_ring_loop/loop.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import errno
|
|
4
|
+
from collections import defaultdict
|
|
5
|
+
from contextlib import contextmanager
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
from one_ring_core.log import get_logger
|
|
10
|
+
from one_ring_core.operations import Cancel, IOOperation
|
|
11
|
+
from one_ring_core.worker import IOWorker
|
|
12
|
+
from one_ring_loop._utils import _get_new_operation_id, _local
|
|
13
|
+
from one_ring_loop.exceptions import Cancelled
|
|
14
|
+
from one_ring_loop.operations import Park, WaitsOn
|
|
15
|
+
from one_ring_loop.task.state import (
|
|
16
|
+
Ready,
|
|
17
|
+
Submitted,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from collections.abc import Generator
|
|
22
|
+
|
|
23
|
+
from one_ring_core.results import IOCompletion, IOResult
|
|
24
|
+
from one_ring_loop.task import Task
|
|
25
|
+
from one_ring_loop.typedefs import Coro, TaskID
|
|
26
|
+
|
|
27
|
+
logger = get_logger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass(slots=True, kw_only=True)
|
|
31
|
+
class Loop:
|
|
32
|
+
"""The one-ring-loop. Bask in it's glory."""
|
|
33
|
+
|
|
34
|
+
# TODO: See if there's a nice way to consolidate the below three attributes.
|
|
35
|
+
"""The tasks currently running"""
|
|
36
|
+
tasks: dict[TaskID, Task] = field(default_factory=dict, init=False)
|
|
37
|
+
|
|
38
|
+
"""Says which other tasks depends on a given task."""
|
|
39
|
+
task_dependencies: defaultdict[TaskID, set[TaskID]] = field(
|
|
40
|
+
default_factory=lambda: defaultdict(set), init=False
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
"""The task which is currently executing synchronously"""
|
|
44
|
+
_current_task: Task | None = None
|
|
45
|
+
|
|
46
|
+
"""Maps operation id to task id for in-flight operations"""
|
|
47
|
+
operation_to_task: dict[int, TaskID] = field(default_factory=dict, init=False)
|
|
48
|
+
|
|
49
|
+
def run_until_complete(self) -> None:
|
|
50
|
+
"""Runs the event loop until all tasks are complete."""
|
|
51
|
+
with IOWorker() as worker:
|
|
52
|
+
while self.tasks:
|
|
53
|
+
self._start_tasks() # Start new tasks
|
|
54
|
+
self._cancel_ready_tasks() # Cancel tasks ready to be submitted
|
|
55
|
+
self._register_io_cancellations(worker) # Register I/O cancellations
|
|
56
|
+
self._register_ready_tasks(worker) # Submit new I/O
|
|
57
|
+
self._drive_unparked_tasks() # Drive wakeups
|
|
58
|
+
self._drive_completed_tasks(worker) # Drive kernel completions
|
|
59
|
+
self._drive_checkpointed_tasks() # Drive checkpoints
|
|
60
|
+
self._remove_done_tasks() # Clean up and wake dependent tasks
|
|
61
|
+
|
|
62
|
+
def _start_tasks(self) -> None:
|
|
63
|
+
"""Starts new tasks."""
|
|
64
|
+
unstarted_tasks = [task for task in self.tasks.values() if not task.is_started]
|
|
65
|
+
for task in unstarted_tasks:
|
|
66
|
+
with self.set_current_task(task):
|
|
67
|
+
task.start()
|
|
68
|
+
|
|
69
|
+
def _register_io_cancellations(self, worker: IOWorker) -> None:
|
|
70
|
+
"""Drains cancellation queue and registers and submits IO cancellations ops."""
|
|
71
|
+
should_submit = False
|
|
72
|
+
while _local.cancel_queue:
|
|
73
|
+
task_id = _local.cancel_queue.popleft()
|
|
74
|
+
if task_id not in self.tasks:
|
|
75
|
+
continue
|
|
76
|
+
task = self.tasks[task_id]
|
|
77
|
+
if task.is_waiting_on:
|
|
78
|
+
continue
|
|
79
|
+
if task.should_cancel():
|
|
80
|
+
if task.is_parked:
|
|
81
|
+
# No kernel op, throw directly
|
|
82
|
+
with self.set_current_task(task):
|
|
83
|
+
task.throw(Cancelled())
|
|
84
|
+
elif (op_id := task.pending_cancel_op_id()) is not None:
|
|
85
|
+
should_submit = True
|
|
86
|
+
cancel_op = Cancel(target_identifier=op_id)
|
|
87
|
+
worker.register(cancel_op, _get_new_operation_id())
|
|
88
|
+
|
|
89
|
+
if should_submit:
|
|
90
|
+
worker.submit()
|
|
91
|
+
|
|
92
|
+
def _get_ready_tasks(self) -> list[Task]:
|
|
93
|
+
"""Gets the tasks ready to register with the IO worker."""
|
|
94
|
+
return [task for task in self.tasks.values() if task.is_ready]
|
|
95
|
+
|
|
96
|
+
def _cancel_ready_tasks(self) -> None:
|
|
97
|
+
"""Cancel tasks with cancelled scopes, otherwise ready for I/O submition."""
|
|
98
|
+
for task in self._get_ready_tasks():
|
|
99
|
+
# Check for cancelled, non-shielded cancel scopes
|
|
100
|
+
if task.should_cancel():
|
|
101
|
+
with self.set_current_task(task):
|
|
102
|
+
task.throw(Cancelled(f"Task {task.task_id} was cancelled"))
|
|
103
|
+
|
|
104
|
+
def _register_ready_tasks(self, worker: IOWorker) -> None:
|
|
105
|
+
"""Register ready tasks with the I/O worker."""
|
|
106
|
+
should_submit = False
|
|
107
|
+
for task in self._get_ready_tasks():
|
|
108
|
+
# If a .throw call finished the task, don't register it.
|
|
109
|
+
if task.is_done:
|
|
110
|
+
continue
|
|
111
|
+
|
|
112
|
+
match task.state:
|
|
113
|
+
case Ready(operation=IOOperation() as op):
|
|
114
|
+
should_submit = True
|
|
115
|
+
op_id = _get_new_operation_id()
|
|
116
|
+
worker.register(op, op_id)
|
|
117
|
+
self.operation_to_task[op_id] = task.task_id
|
|
118
|
+
task.state = Submitted(operation=op, op_id=op_id)
|
|
119
|
+
case Ready(operation=WaitsOn(task_ids=ids)):
|
|
120
|
+
for task_id in ids:
|
|
121
|
+
self.task_dependencies[task_id].add(task.task_id)
|
|
122
|
+
task.state = Submitted(operation=WaitsOn(task_ids=ids))
|
|
123
|
+
case Ready(operation=Park()):
|
|
124
|
+
task.state = Submitted(operation=Park())
|
|
125
|
+
case _:
|
|
126
|
+
continue
|
|
127
|
+
|
|
128
|
+
if should_submit:
|
|
129
|
+
worker.submit()
|
|
130
|
+
|
|
131
|
+
def _collect_completions(self, worker: IOWorker) -> set[IOCompletion[IOResult]]:
|
|
132
|
+
"""Waits for completions if all tasks are waiting, otherwise peeks."""
|
|
133
|
+
completions: set[IOCompletion] = set()
|
|
134
|
+
|
|
135
|
+
if all(task.is_submitted for task in self.tasks.values()):
|
|
136
|
+
if not any(task.has_pending_io for task in self.tasks.values()):
|
|
137
|
+
raise RuntimeError("Deadlock: all tasks blocked, no pending I/O")
|
|
138
|
+
completion = worker.wait()
|
|
139
|
+
completions.add(completion)
|
|
140
|
+
else:
|
|
141
|
+
# Peek until we get None.
|
|
142
|
+
while (completion := worker.peek()) is not None:
|
|
143
|
+
completions.add(completion)
|
|
144
|
+
|
|
145
|
+
return completions
|
|
146
|
+
|
|
147
|
+
def _drive_completed_tasks(self, worker: IOWorker) -> None:
|
|
148
|
+
completions = self._collect_completions(worker)
|
|
149
|
+
for completion in completions:
|
|
150
|
+
op_id = completion.user_data
|
|
151
|
+
task_id = self.operation_to_task.pop(op_id, None)
|
|
152
|
+
|
|
153
|
+
if task_id is None:
|
|
154
|
+
continue
|
|
155
|
+
task = self.tasks.get(task_id)
|
|
156
|
+
if task is None:
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
with self.set_current_task(task):
|
|
160
|
+
if (
|
|
161
|
+
isinstance(oserror := completion.result, OSError)
|
|
162
|
+
and oserror.errno is not None
|
|
163
|
+
and oserror.errno == errno.ECANCELED
|
|
164
|
+
):
|
|
165
|
+
task.throw(Cancelled())
|
|
166
|
+
else:
|
|
167
|
+
task.drive(completion)
|
|
168
|
+
|
|
169
|
+
def _drive_unparked_tasks(self) -> None:
|
|
170
|
+
"""Drives tasks that have been unparked.
|
|
171
|
+
|
|
172
|
+
Needs to run before _drive_completed_tasks to avoid deadlocks.
|
|
173
|
+
"""
|
|
174
|
+
while _local.unpark_queue:
|
|
175
|
+
unparked_task_id = _local.unpark_queue.popleft()
|
|
176
|
+
unparked_task = self.tasks[unparked_task_id]
|
|
177
|
+
|
|
178
|
+
with self.set_current_task(unparked_task):
|
|
179
|
+
unparked_task.drive(None)
|
|
180
|
+
|
|
181
|
+
def _drive_checkpointed_tasks(self) -> None:
|
|
182
|
+
"""Drives tasks that have been checkpointed."""
|
|
183
|
+
checkpointed_tasks = [
|
|
184
|
+
task for task in self.tasks.values() if task.is_checkpointed
|
|
185
|
+
]
|
|
186
|
+
|
|
187
|
+
for task in checkpointed_tasks:
|
|
188
|
+
with self.set_current_task(task):
|
|
189
|
+
task.drive(None)
|
|
190
|
+
|
|
191
|
+
def _remove_done_tasks(self) -> None:
|
|
192
|
+
done_tasks = [task for task in self.tasks.values() if task.is_done]
|
|
193
|
+
for done_task in done_tasks:
|
|
194
|
+
# Now drive tasks that were dependant on the done tasks.
|
|
195
|
+
while self.task_dependencies[done_task.task_id]:
|
|
196
|
+
waiting_task_id = self.task_dependencies[done_task.task_id].pop()
|
|
197
|
+
waiting_task = self.tasks[waiting_task_id]
|
|
198
|
+
if waiting_task.is_done:
|
|
199
|
+
# The loop has already driven this forward, for example if all tasks
|
|
200
|
+
# waiting_task depended on finished in the same loop iteration.
|
|
201
|
+
continue
|
|
202
|
+
with self.set_current_task(waiting_task):
|
|
203
|
+
waiting_task.drive(None)
|
|
204
|
+
|
|
205
|
+
self.task_dependencies.pop(done_task.task_id)
|
|
206
|
+
|
|
207
|
+
self.tasks = {
|
|
208
|
+
task_id: task for task_id, task in self.tasks.items() if not task.is_done
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
@property
|
|
212
|
+
def current_task(self) -> Task:
|
|
213
|
+
"""Gets currently executing task."""
|
|
214
|
+
if self._current_task is None:
|
|
215
|
+
raise RuntimeError("No task currently executing")
|
|
216
|
+
|
|
217
|
+
return self._current_task
|
|
218
|
+
|
|
219
|
+
@contextmanager
|
|
220
|
+
def set_current_task(self, task: Task) -> Generator[None]:
|
|
221
|
+
"""Utility wrapper for setting and removing currently executing task."""
|
|
222
|
+
self._current_task = task
|
|
223
|
+
try:
|
|
224
|
+
yield
|
|
225
|
+
finally:
|
|
226
|
+
self._current_task = None
|
|
227
|
+
|
|
228
|
+
def add_task(self, task: Task) -> None:
|
|
229
|
+
"""Adds a task to be run by the event loop."""
|
|
230
|
+
self.tasks[task.task_id] = task
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def run(gen: Coro) -> None:
|
|
234
|
+
"""Entry point for running the event loop.
|
|
235
|
+
|
|
236
|
+
Creates a Task from the generator on the event loop, and runs the loop.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
gen: the entry coroutine
|
|
240
|
+
"""
|
|
241
|
+
from one_ring_loop.task import _create_standalone_task # noqa: PLC0415
|
|
242
|
+
|
|
243
|
+
_local.loop = Loop()
|
|
244
|
+
_create_standalone_task(gen, None, None)
|
|
245
|
+
_local.loop.run_until_complete()
|
|
246
|
+
_local.cleanup()
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from one_ring_loop._utils import _local
|
|
4
|
+
from one_ring_loop.operations import Checkpoint
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from one_ring_loop.loop import Loop
|
|
8
|
+
from one_ring_loop.task import Task
|
|
9
|
+
from one_ring_loop.typedefs import Coro, TaskID
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def get_running_loop() -> Loop:
|
|
13
|
+
"""Gets the currently running event loop. I don't want to expose this easily."""
|
|
14
|
+
if _local.loop is None:
|
|
15
|
+
raise RuntimeError("No event loop running")
|
|
16
|
+
|
|
17
|
+
return _local.loop
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_current_task() -> Task:
|
|
21
|
+
"""Gets the currently executing task from the loop."""
|
|
22
|
+
return get_running_loop().current_task
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def unpark(task_id: TaskID) -> None:
|
|
26
|
+
"""Unparks a parked task.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
task_id: The id of the task to unpark
|
|
30
|
+
"""
|
|
31
|
+
_local.unpark_queue.append(task_id)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def checkpoint() -> Coro[None]:
|
|
35
|
+
"""Nop that yields control back to event loop."""
|
|
36
|
+
yield Checkpoint()
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""This namespace includes extra operations for the event loop.
|
|
2
|
+
|
|
3
|
+
This extends the operations provided by one_ring_core.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from one_ring_loop.typedefs import TaskID
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass(slots=True, kw_only=True)
|
|
16
|
+
class WaitsOn:
|
|
17
|
+
"""For dependency relationships between tasks."""
|
|
18
|
+
|
|
19
|
+
task_ids: tuple[TaskID, ...]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass(slots=True, kw_only=True)
|
|
23
|
+
class Park:
|
|
24
|
+
"""Parks the yielding task until resumed by another task."""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass(slots=True, kw_only=True)
|
|
28
|
+
class Checkpoint:
|
|
29
|
+
"""Sentinel that yields control back to event loop."""
|
one_ring_loop/py.typed
ADDED
|
File without changes
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
from one_ring_core.operations import (
|
|
5
|
+
Close,
|
|
6
|
+
SocketAccept,
|
|
7
|
+
SocketBind,
|
|
8
|
+
SocketConnect,
|
|
9
|
+
SocketCreate,
|
|
10
|
+
SocketListen,
|
|
11
|
+
SocketRecv,
|
|
12
|
+
SocketSend,
|
|
13
|
+
SocketSetOpt,
|
|
14
|
+
)
|
|
15
|
+
from one_ring_loop._utils import _execute
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from one_ring_loop.typedefs import Coro
|
|
19
|
+
|
|
20
|
+
# TODO: Clean up
|
|
21
|
+
# Better names
|
|
22
|
+
# Centralize "close" together with fileio
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _create() -> Coro[int]:
|
|
26
|
+
result = yield from _execute(SocketCreate())
|
|
27
|
+
return result.fd
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _set_options(fd: int) -> Coro[None]:
|
|
31
|
+
yield from _execute(SocketSetOpt(fd=fd))
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _bind(fd: int, host: bytes, port: int) -> Coro[None]:
|
|
36
|
+
yield from _execute(SocketBind(fd=fd, ip=host, port=port))
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _listen(fd: int) -> Coro[None]:
|
|
41
|
+
yield from _execute(SocketListen(fd=fd))
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _connect(fd: int, host: bytes, port: int) -> Coro[None]:
|
|
46
|
+
yield from _execute(SocketConnect(fd=fd, ip=host, port=port))
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def create_server(host: bytes, port: int) -> Coro[Server]:
|
|
51
|
+
"""Creates a socket (server), sets options, binds it and wraps in SocketListener."""
|
|
52
|
+
fd = yield from _create()
|
|
53
|
+
yield from _set_options(fd)
|
|
54
|
+
yield from _bind(fd, host, port)
|
|
55
|
+
yield from _listen(fd)
|
|
56
|
+
return Server(fd=fd)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def connect(host: bytes, port: int) -> Coro[Connection]:
|
|
60
|
+
"""Connects to a listening socket."""
|
|
61
|
+
fd = yield from _create()
|
|
62
|
+
yield from _connect(fd, host, port)
|
|
63
|
+
|
|
64
|
+
return Connection(fd=fd)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@dataclass(slots=True, kw_only=True)
|
|
68
|
+
class Server:
|
|
69
|
+
"""Used to accept new connections."""
|
|
70
|
+
|
|
71
|
+
"""The socket's file descriptor"""
|
|
72
|
+
fd: int
|
|
73
|
+
|
|
74
|
+
def accept(self) -> Coro[Connection]:
|
|
75
|
+
"""Waits until there's a connection to accept.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
client file descriptor
|
|
79
|
+
"""
|
|
80
|
+
result = yield from _execute(SocketAccept(fd=self.fd))
|
|
81
|
+
return Connection(fd=result.fd)
|
|
82
|
+
|
|
83
|
+
def close(self) -> Coro[None]:
|
|
84
|
+
"""Close socket."""
|
|
85
|
+
yield from _execute(Close(fd=self.fd))
|
|
86
|
+
return None
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@dataclass(slots=True, kw_only=True)
|
|
90
|
+
class Connection:
|
|
91
|
+
"""Corresponds a socket connection. Either from server, or client."""
|
|
92
|
+
|
|
93
|
+
"""Either server file descriptor, or client file descriptor."""
|
|
94
|
+
fd: int
|
|
95
|
+
|
|
96
|
+
def receive(self, max_bytes: int = 65536) -> Coro[bytes]:
|
|
97
|
+
"""Reads data from socket."""
|
|
98
|
+
result = yield from _execute(SocketRecv(fd=self.fd, size=max_bytes))
|
|
99
|
+
return result.content
|
|
100
|
+
|
|
101
|
+
def send(self, data: bytes, /) -> Coro[None]:
|
|
102
|
+
"""Sends data to socket."""
|
|
103
|
+
yield from _execute(SocketSend(fd=self.fd, data=data))
|
|
104
|
+
|
|
105
|
+
def close(self) -> Coro[None]:
|
|
106
|
+
"""Close socket."""
|
|
107
|
+
yield from _execute(Close(fd=self.fd))
|
|
108
|
+
return None
|
|
File without changes
|