mm-std 0.3.13__py3-none-any.whl → 0.3.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mm_std/__init__.py +7 -6
- mm_std/concurrency/__init__.py +0 -0
- mm_std/concurrency/async_decorators.py +19 -0
- mm_std/{async_concurrency.py → concurrency/async_scheduler.py} +5 -25
- mm_std/concurrency/async_task_runner.py +113 -0
- mm_std/concurrency/sync_decorators.py +35 -0
- mm_std/concurrency/sync_scheduler.py +73 -0
- mm_std/concurrency/sync_task_runner.py +45 -0
- mm_std/http_.py +28 -25
- mm_std/types_.py +5 -1
- {mm_std-0.3.13.dist-info → mm_std-0.3.15.dist-info}/METADATA +2 -2
- {mm_std-0.3.13.dist-info → mm_std-0.3.15.dist-info}/RECORD +13 -8
- mm_std/concurrency.py +0 -152
- {mm_std-0.3.13.dist-info → mm_std-0.3.15.dist-info}/WHEEL +0 -0
mm_std/__init__.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1
|
-
from .async_concurrency import AsyncScheduler as AsyncScheduler
|
2
|
-
from .async_concurrency import async_synchronized as async_synchronized
|
3
1
|
from .command import CommandResult as CommandResult
|
4
2
|
from .command import run_command as run_command
|
5
3
|
from .command import run_ssh_command as run_ssh_command
|
6
|
-
from .concurrency import
|
7
|
-
from .concurrency import
|
8
|
-
from .concurrency import
|
9
|
-
from .concurrency import
|
4
|
+
from .concurrency.async_decorators import async_synchronized as async_synchronized
|
5
|
+
from .concurrency.async_scheduler import AsyncScheduler as AsyncScheduler
|
6
|
+
from .concurrency.async_task_runner import AsyncTaskRunner as AsyncTaskRunner
|
7
|
+
from .concurrency.sync_decorators import synchronized as synchronized
|
8
|
+
from .concurrency.sync_decorators import synchronized_parameter as synchronized_parameter
|
9
|
+
from .concurrency.sync_scheduler import Scheduler as Scheduler
|
10
|
+
from .concurrency.sync_task_runner import ConcurrentTasks as ConcurrentTasks
|
10
11
|
from .config import BaseConfig as BaseConfig
|
11
12
|
from .crypto import fernet_decrypt as fernet_decrypt
|
12
13
|
from .crypto import fernet_encrypt as fernet_encrypt
|
File without changes
|
@@ -0,0 +1,19 @@
|
|
1
|
+
import functools
|
2
|
+
from collections.abc import Awaitable, Callable
|
3
|
+
from typing import ParamSpec, TypeVar
|
4
|
+
|
5
|
+
import anyio
|
6
|
+
|
7
|
+
P = ParamSpec("P")
|
8
|
+
R = TypeVar("R")
|
9
|
+
|
10
|
+
|
11
|
+
def async_synchronized(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]:
|
12
|
+
lock = anyio.Lock()
|
13
|
+
|
14
|
+
@functools.wraps(func)
|
15
|
+
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
16
|
+
async with lock:
|
17
|
+
return await func(*args, **kwargs)
|
18
|
+
|
19
|
+
return wrapper
|
@@ -1,17 +1,12 @@
|
|
1
|
-
import functools
|
2
1
|
import threading
|
3
|
-
from collections.abc import Awaitable, Callable
|
4
2
|
from dataclasses import dataclass, field
|
5
|
-
from datetime import
|
3
|
+
from datetime import datetime
|
6
4
|
from logging import Logger
|
7
|
-
from typing import ParamSpec, TypeVar
|
8
5
|
|
9
6
|
import anyio
|
10
7
|
|
11
|
-
|
12
|
-
|
13
|
-
Args = tuple[object, ...]
|
14
|
-
Kwargs = dict[str, object]
|
8
|
+
from mm_std.date import utc_now
|
9
|
+
from mm_std.types_ import Args, AsyncFunc, Kwargs
|
15
10
|
|
16
11
|
|
17
12
|
class AsyncScheduler:
|
@@ -46,7 +41,7 @@ class AsyncScheduler:
|
|
46
41
|
"""Internal loop for running a single task repeatedly."""
|
47
42
|
task = self.tasks[task_id]
|
48
43
|
while self._running:
|
49
|
-
task.last_run =
|
44
|
+
task.last_run = utc_now()
|
50
45
|
task.run_count += 1
|
51
46
|
try:
|
52
47
|
await task.func(*task.args, **task.kwargs)
|
@@ -55,7 +50,7 @@ class AsyncScheduler:
|
|
55
50
|
self._logger.exception("AsyncScheduler exception")
|
56
51
|
|
57
52
|
# Calculate elapsed time and sleep if needed so that tasks never overlap.
|
58
|
-
elapsed = (
|
53
|
+
elapsed = (utc_now() - task.last_run).total_seconds()
|
59
54
|
sleep_time = task.interval - elapsed
|
60
55
|
if sleep_time > 0:
|
61
56
|
try:
|
@@ -109,18 +104,3 @@ class AsyncScheduler:
|
|
109
104
|
self._thread.join(timeout=5)
|
110
105
|
self._thread = None
|
111
106
|
self._logger.debug("AsyncScheduler stopped")
|
112
|
-
|
113
|
-
|
114
|
-
P = ParamSpec("P")
|
115
|
-
R = TypeVar("R")
|
116
|
-
|
117
|
-
|
118
|
-
def async_synchronized(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]:
|
119
|
-
lock = anyio.Lock()
|
120
|
-
|
121
|
-
@functools.wraps(func)
|
122
|
-
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
123
|
-
async with lock:
|
124
|
-
return await func(*args, **kwargs)
|
125
|
-
|
126
|
-
return wrapper
|
@@ -0,0 +1,113 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from collections.abc import Awaitable, Callable
|
4
|
+
from dataclasses import dataclass
|
5
|
+
from typing import Any
|
6
|
+
|
7
|
+
import anyio
|
8
|
+
|
9
|
+
|
10
|
+
class AsyncTaskRunner:
|
11
|
+
"""
|
12
|
+
AsyncTaskRunner executes a batch of asynchronous tasks with controlled concurrency.
|
13
|
+
Note: This runner is designed for one-time use. Create a new instance for each batch of tasks.
|
14
|
+
"""
|
15
|
+
|
16
|
+
@dataclass
|
17
|
+
class Result:
|
18
|
+
results: dict[str, Any] # Maps task_id to result
|
19
|
+
exceptions: dict[str, Any] # Maps task_id to exception (if any)
|
20
|
+
is_ok: bool # True if no exception and no timeout occurred
|
21
|
+
is_timeout: bool # True if at least one task was cancelled due to timeout
|
22
|
+
|
23
|
+
@dataclass
|
24
|
+
class Task:
|
25
|
+
"""Individual task representation"""
|
26
|
+
|
27
|
+
task_id: str
|
28
|
+
async_func: Callable[..., Awaitable[Any]]
|
29
|
+
args: tuple[Any, ...]
|
30
|
+
kwargs: dict[str, Any]
|
31
|
+
|
32
|
+
def __init__(self, max_concurrent_tasks: int, timeout: float | None = None) -> None:
|
33
|
+
"""
|
34
|
+
:param max_concurrent_tasks: Maximum number of tasks that can run concurrently.
|
35
|
+
:param timeout: Optional overall timeout in seconds for running all tasks.
|
36
|
+
"""
|
37
|
+
if timeout is not None and timeout <= 0:
|
38
|
+
raise ValueError("Timeout must be positive if specified.")
|
39
|
+
self.max_concurrent_tasks: int = max_concurrent_tasks
|
40
|
+
self.timeout: float | None = timeout
|
41
|
+
self.limiter: anyio.CapacityLimiter = anyio.CapacityLimiter(max_concurrent_tasks)
|
42
|
+
self._tasks: list[AsyncTaskRunner.Task] = []
|
43
|
+
self._was_run: bool = False
|
44
|
+
self._task_ids: set[str] = set()
|
45
|
+
|
46
|
+
def add_task(
|
47
|
+
self,
|
48
|
+
task_id: str,
|
49
|
+
async_func: Callable[..., Awaitable[Any]],
|
50
|
+
*args: object,
|
51
|
+
**kwargs: object,
|
52
|
+
) -> None:
|
53
|
+
"""
|
54
|
+
Adds a task to the runner that will be executed when run() is called.
|
55
|
+
|
56
|
+
:param task_id: Unique identifier for the task.
|
57
|
+
:param async_func: The asynchronous function to execute.
|
58
|
+
:param args: Positional arguments for async_func.
|
59
|
+
:param kwargs: Keyword arguments for async_func.
|
60
|
+
:raises RuntimeError: If the runner has already been used.
|
61
|
+
:raises ValueError: If task_id is empty or already exists.
|
62
|
+
"""
|
63
|
+
if self._was_run:
|
64
|
+
raise RuntimeError("This AsyncTaskRunner has already been used. Create a new instance for new tasks.")
|
65
|
+
|
66
|
+
if not task_id:
|
67
|
+
raise ValueError("Task ID cannot be empty")
|
68
|
+
|
69
|
+
if task_id in self._task_ids:
|
70
|
+
raise ValueError(f"Task ID '{task_id}' already exists. All task IDs must be unique.")
|
71
|
+
|
72
|
+
self._task_ids.add(task_id)
|
73
|
+
self._tasks.append(AsyncTaskRunner.Task(task_id, async_func, args, kwargs))
|
74
|
+
|
75
|
+
async def run(self) -> AsyncTaskRunner.Result:
|
76
|
+
"""
|
77
|
+
Executes all added tasks with concurrency limited by the capacity limiter.
|
78
|
+
If a timeout is specified, non-finished tasks are cancelled.
|
79
|
+
|
80
|
+
:return: AsyncTaskRunner.Result containing task results, exceptions, and flags indicating overall status.
|
81
|
+
:raises RuntimeError: If the runner has already been used.
|
82
|
+
"""
|
83
|
+
if self._was_run:
|
84
|
+
raise RuntimeError("This AsyncTaskRunner instance can only be run once. Create a new instance for new tasks.")
|
85
|
+
|
86
|
+
self._was_run = True
|
87
|
+
results: dict[str, Any] = {}
|
88
|
+
exceptions: dict[str, Any] = {}
|
89
|
+
is_timeout: bool = False
|
90
|
+
|
91
|
+
async def run_task(task: AsyncTaskRunner.Task) -> None:
|
92
|
+
async with self.limiter:
|
93
|
+
try:
|
94
|
+
res: Any = await task.async_func(*task.args, **task.kwargs)
|
95
|
+
results[task.task_id] = res
|
96
|
+
except Exception as e:
|
97
|
+
exceptions[task.task_id] = e
|
98
|
+
|
99
|
+
try:
|
100
|
+
if self.timeout is not None:
|
101
|
+
with anyio.fail_after(self.timeout):
|
102
|
+
async with anyio.create_task_group() as tg:
|
103
|
+
for task in self._tasks:
|
104
|
+
tg.start_soon(run_task, task)
|
105
|
+
else:
|
106
|
+
async with anyio.create_task_group() as tg:
|
107
|
+
for task in self._tasks:
|
108
|
+
tg.start_soon(run_task, task)
|
109
|
+
except TimeoutError:
|
110
|
+
is_timeout = True
|
111
|
+
|
112
|
+
is_ok: bool = (not exceptions) and (not is_timeout)
|
113
|
+
return AsyncTaskRunner.Result(results=results, exceptions=exceptions, is_ok=is_ok, is_timeout=is_timeout)
|
@@ -0,0 +1,35 @@
|
|
1
|
+
import functools
|
2
|
+
from collections import defaultdict
|
3
|
+
from collections.abc import Callable
|
4
|
+
from threading import Lock
|
5
|
+
|
6
|
+
|
7
|
+
def synchronized_parameter[T, **P](arg_index: int = 0, skip_if_locked: bool = False) -> Callable[..., Callable[P, T | None]]:
|
8
|
+
locks: dict[object, Lock] = defaultdict(Lock)
|
9
|
+
|
10
|
+
def outer(func: Callable[P, T]) -> Callable[P, T | None]:
|
11
|
+
@functools.wraps(func)
|
12
|
+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | None:
|
13
|
+
if skip_if_locked and locks[args[arg_index]].locked():
|
14
|
+
return None
|
15
|
+
try:
|
16
|
+
with locks[args[arg_index]]:
|
17
|
+
return func(*args, **kwargs)
|
18
|
+
finally:
|
19
|
+
locks.pop(args[arg_index], None)
|
20
|
+
|
21
|
+
wrapper.locks = locks # type: ignore[attr-defined]
|
22
|
+
return wrapper
|
23
|
+
|
24
|
+
return outer
|
25
|
+
|
26
|
+
|
27
|
+
def synchronized[T, **P](fn: Callable[P, T]) -> Callable[P, T]:
|
28
|
+
lock = Lock()
|
29
|
+
|
30
|
+
@functools.wraps(fn)
|
31
|
+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
32
|
+
with lock:
|
33
|
+
return fn(*args, **kwargs)
|
34
|
+
|
35
|
+
return wrapper
|
@@ -0,0 +1,73 @@
|
|
1
|
+
import time
|
2
|
+
from dataclasses import dataclass, field
|
3
|
+
from datetime import datetime
|
4
|
+
from logging import Logger
|
5
|
+
from threading import Thread
|
6
|
+
|
7
|
+
from mm_std.date import is_too_old, utc_now
|
8
|
+
from mm_std.types_ import Func
|
9
|
+
|
10
|
+
|
11
|
+
class Scheduler:
|
12
|
+
def __init__(self, log: Logger, loop_delay: float = 0.5, debug: bool = False) -> None:
|
13
|
+
self.log = log
|
14
|
+
self.debug = debug
|
15
|
+
self.loop_delay = loop_delay
|
16
|
+
self.stopped = False
|
17
|
+
self.jobs: list[Scheduler.Job] = []
|
18
|
+
self.run_immediately_jobs: list[Scheduler.Job] = []
|
19
|
+
self._debug("init")
|
20
|
+
|
21
|
+
@dataclass
|
22
|
+
class Job:
|
23
|
+
func: Func
|
24
|
+
args: tuple[object, ...]
|
25
|
+
interval: int
|
26
|
+
is_running: bool = False
|
27
|
+
last_at: datetime = field(default_factory=utc_now)
|
28
|
+
|
29
|
+
def __str__(self) -> str:
|
30
|
+
return str(self.func)
|
31
|
+
|
32
|
+
def add_job(self, func: Func, interval: int, args: tuple[object, ...] = (), run_immediately: bool = False) -> None:
|
33
|
+
job = Scheduler.Job(func, args, interval)
|
34
|
+
self.jobs.append(job)
|
35
|
+
if run_immediately:
|
36
|
+
self.run_immediately_jobs.append(job)
|
37
|
+
|
38
|
+
def _run_job(self, job: Job) -> None:
|
39
|
+
self._debug(f"_run_job: {job}")
|
40
|
+
if self.stopped:
|
41
|
+
return
|
42
|
+
try:
|
43
|
+
job.func(*job.args)
|
44
|
+
self._debug(f"_run_job: {job} done")
|
45
|
+
except Exception:
|
46
|
+
self.log.exception("scheduler error")
|
47
|
+
self._debug(f"_run_job: {job} error")
|
48
|
+
finally:
|
49
|
+
job.is_running = False
|
50
|
+
|
51
|
+
def _start(self) -> None:
|
52
|
+
self._debug(f"_start: jobs={len(self.jobs)}, run_immediately_jobs={len(self.run_immediately_jobs)}")
|
53
|
+
for j in self.run_immediately_jobs:
|
54
|
+
j.is_running = True
|
55
|
+
j.last_at = utc_now()
|
56
|
+
Thread(target=self._run_job, args=(j,)).start()
|
57
|
+
while not self.stopped:
|
58
|
+
for j in self.jobs:
|
59
|
+
if not j.is_running and is_too_old(j.last_at, j.interval):
|
60
|
+
j.is_running = True
|
61
|
+
j.last_at = utc_now()
|
62
|
+
Thread(target=self._run_job, args=(j,)).start()
|
63
|
+
time.sleep(self.loop_delay)
|
64
|
+
|
65
|
+
def _debug(self, message: str) -> None:
|
66
|
+
if self.debug:
|
67
|
+
self.log.debug("Scheduler: %s", message)
|
68
|
+
|
69
|
+
def start(self) -> None:
|
70
|
+
Thread(target=self._start).start()
|
71
|
+
|
72
|
+
def stop(self) -> None:
|
73
|
+
self.stopped = True
|
@@ -0,0 +1,45 @@
|
|
1
|
+
import concurrent
|
2
|
+
from concurrent.futures import ThreadPoolExecutor
|
3
|
+
from dataclasses import dataclass
|
4
|
+
|
5
|
+
from mm_std.types_ import Args, Func, Kwargs
|
6
|
+
|
7
|
+
|
8
|
+
class ConcurrentTasks:
|
9
|
+
def __init__(self, max_workers: int = 5, timeout: int | None = None, thread_name_prefix: str = "concurrent_tasks") -> None:
|
10
|
+
self.max_workers = max_workers
|
11
|
+
self.timeout = timeout
|
12
|
+
self.thread_name_prefix = thread_name_prefix
|
13
|
+
self.tasks: list[ConcurrentTasks.Task] = []
|
14
|
+
self.exceptions: dict[str, Exception] = {}
|
15
|
+
self.error = False
|
16
|
+
self.timeout_error = False
|
17
|
+
self.result: dict[str, object] = {}
|
18
|
+
|
19
|
+
@dataclass
|
20
|
+
class Task:
|
21
|
+
key: str
|
22
|
+
func: Func
|
23
|
+
args: Args
|
24
|
+
kwargs: Kwargs
|
25
|
+
|
26
|
+
def add_task(self, key: str, func: Func, args: Args = (), kwargs: Kwargs | None = None) -> None:
|
27
|
+
if kwargs is None:
|
28
|
+
kwargs = {}
|
29
|
+
self.tasks.append(ConcurrentTasks.Task(key, func, args, kwargs))
|
30
|
+
|
31
|
+
def execute(self) -> None:
|
32
|
+
with ThreadPoolExecutor(self.max_workers, thread_name_prefix=self.thread_name_prefix) as executor:
|
33
|
+
future_to_key = {executor.submit(task.func, *task.args, **task.kwargs): task.key for task in self.tasks}
|
34
|
+
try:
|
35
|
+
result_map = concurrent.futures.as_completed(future_to_key, timeout=self.timeout)
|
36
|
+
for future in result_map:
|
37
|
+
key = future_to_key[future]
|
38
|
+
try:
|
39
|
+
self.result[key] = future.result()
|
40
|
+
except Exception as err:
|
41
|
+
self.error = True
|
42
|
+
self.exceptions[key] = err
|
43
|
+
except concurrent.futures.TimeoutError:
|
44
|
+
self.error = True
|
45
|
+
self.timeout_error = True
|
mm_std/http_.py
CHANGED
@@ -3,6 +3,7 @@ from dataclasses import asdict, dataclass, field
|
|
3
3
|
from typing import Any
|
4
4
|
from urllib.parse import urlencode
|
5
5
|
|
6
|
+
import anyio
|
6
7
|
import httpx
|
7
8
|
import pydash
|
8
9
|
import requests
|
@@ -170,31 +171,33 @@ async def async_hrequest(
|
|
170
171
|
else:
|
171
172
|
data = params
|
172
173
|
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
174
|
+
with anyio.move_on_after(timeout):
|
175
|
+
try:
|
176
|
+
async with httpx.AsyncClient(
|
177
|
+
proxy=proxy,
|
178
|
+
timeout=timeout,
|
179
|
+
cookies=cookies,
|
180
|
+
auth=auth,
|
181
|
+
verify=verify,
|
182
|
+
) as client:
|
183
|
+
r = await client.request(
|
184
|
+
method,
|
185
|
+
url,
|
186
|
+
headers=headers,
|
187
|
+
params=query_params,
|
188
|
+
json=json_,
|
189
|
+
data=data,
|
190
|
+
)
|
191
|
+
return HResponse(code=r.status_code, body=r.text, headers=dict(r.headers))
|
192
|
+
except httpx.TimeoutException:
|
193
|
+
return HResponse(error="timeout")
|
194
|
+
except httpx.ProxyError:
|
195
|
+
return HResponse(error="proxy_error")
|
196
|
+
except httpx.RequestError as err:
|
197
|
+
return HResponse(error=f"connection_error: {err}")
|
198
|
+
except Exception as err:
|
199
|
+
return HResponse(error=f"exception: {err}")
|
200
|
+
return HResponse(error="timeout")
|
198
201
|
|
199
202
|
|
200
203
|
def add_query_params_to_url(url: str, params: dict[str, object]) -> str:
|
mm_std/types_.py
CHANGED
@@ -1,4 +1,8 @@
|
|
1
|
-
from collections.abc import Callable
|
1
|
+
from collections.abc import Awaitable, Callable
|
2
2
|
from typing import Any
|
3
3
|
|
4
4
|
type CallableAny = Callable[..., Any]
|
5
|
+
type Func = Callable[..., object]
|
6
|
+
type AsyncFunc = Callable[..., Awaitable[object]]
|
7
|
+
type Args = tuple[object, ...]
|
8
|
+
type Kwargs = dict[str, object]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mm-std
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.15
|
4
4
|
Requires-Python: >=3.12
|
5
5
|
Requires-Dist: anyio>=4.9.0
|
6
6
|
Requires-Dist: cryptography~=44.0.2
|
@@ -8,7 +8,7 @@ Requires-Dist: httpx[socks]>=0.28.1
|
|
8
8
|
Requires-Dist: pydantic-settings>=2.8.1
|
9
9
|
Requires-Dist: pydantic~=2.10.6
|
10
10
|
Requires-Dist: pydash~=8.0.5
|
11
|
-
Requires-Dist: python-dotenv~=1.0
|
11
|
+
Requires-Dist: python-dotenv~=1.1.0
|
12
12
|
Requires-Dist: requests[socks]~=2.32.3
|
13
13
|
Requires-Dist: rich~=13.9.4
|
14
14
|
Requires-Dist: tomlkit~=0.13.2
|
@@ -1,14 +1,12 @@
|
|
1
|
-
mm_std/__init__.py,sha256=
|
2
|
-
mm_std/async_concurrency.py,sha256=q4I2NWbm6HkGLg2sWaUDitp4J02zMCopT5CLueEfdu8,4432
|
1
|
+
mm_std/__init__.py,sha256=UkRzp_lm7yddXImD8nRlJtBnx9YlopWJKqukcbqjRyo,2881
|
3
2
|
mm_std/command.py,sha256=ze286wjUjg0QSTgIu-2WZks53_Vclg69UaYYgPpQvCU,1283
|
4
|
-
mm_std/concurrency.py,sha256=4kKLhde6YQYsjJJjH6K5eMQj6FtegEz55Mo5TmhQMM0,5242
|
5
3
|
mm_std/config.py,sha256=4ox4D2CgGR76bvZ2n2vGQOYUDagFnlKEDb87to5zpxE,1871
|
6
4
|
mm_std/crypto.py,sha256=jdk0_TCmeU0pPXMyz9xH6kQHSjjZ9GcGClBwQps5vBo,340
|
7
5
|
mm_std/date.py,sha256=976eEkSONuNqHQBgSRu8hrtH23tJqztbmHFHLdbP2TY,1879
|
8
6
|
mm_std/dict.py,sha256=6GkhJPXD0LiJDxPcYe6jPdEDw-MN7P7mKu6U5XxwYDk,675
|
9
7
|
mm_std/env.py,sha256=5zaR9VeIfObN-4yfgxoFeU5IM1GDeZZj9SuYf7t9sOA,125
|
10
8
|
mm_std/fs.py,sha256=RwarNRJq3tIMG6LVX_g03hasfYpjYFh_O27oVDt5IPQ,291
|
11
|
-
mm_std/http_.py,sha256=
|
9
|
+
mm_std/http_.py,sha256=x5d4wnQcPlB_IYaYNCQGz7iOTEft8cQmMggHt30o7xI,6193
|
12
10
|
mm_std/json_.py,sha256=Naa6mBE4D0yiQGkPNRrFvndnUH3R7ovw3FeaejWV60o,1196
|
13
11
|
mm_std/log.py,sha256=6ux6njNKc_ZCQlvWn1FZR6vcSY2Cem-mQzmNXvsg5IE,913
|
14
12
|
mm_std/net.py,sha256=qdRCBIDneip6FaPNe5mx31UtYVmzqam_AoUF7ydEyjA,590
|
@@ -18,8 +16,15 @@ mm_std/random_.py,sha256=OuUX4VJeSd13NZBya4qrGpR2TfN7_87tfebOY6DBUnI,1113
|
|
18
16
|
mm_std/result.py,sha256=KLnPWjICYFkP6CAhq7Ifs22XSD-PQ9RkG6n1-cZcXkM,7625
|
19
17
|
mm_std/str.py,sha256=BEjJ1p5O4-uSYK0h-enasSSDdwzkBbiwdQ4_dsrlEE8,3257
|
20
18
|
mm_std/toml.py,sha256=CNznWKR0bpOxS6e3VB5LGS-Oa9lW-wterkcPUFtPcls,610
|
21
|
-
mm_std/types_.py,sha256=
|
19
|
+
mm_std/types_.py,sha256=9FGd2q47a8M9QQgsWJR1Kq34jLxBAkYSoJuwih4PPqg,257
|
22
20
|
mm_std/zip.py,sha256=axzF1BwcIygtfNNTefZH7hXKaQqwe-ZH3ChuRWr9dnk,396
|
23
|
-
mm_std
|
24
|
-
mm_std
|
25
|
-
mm_std
|
21
|
+
mm_std/concurrency/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
+
mm_std/concurrency/async_decorators.py,sha256=tJ5u-_G9mAF3zrFr1aBUhYh03rB-K_6oiDKGdmGdlGk,449
|
23
|
+
mm_std/concurrency/async_scheduler.py,sha256=NsR9nrxAEdiYjGL-lTdDlQAXJHJZBUYnm26h-0zaxCg,3932
|
24
|
+
mm_std/concurrency/async_task_runner.py,sha256=D4fD_hwSVy55sM_-U1_sdVYlkahnQeigXdMfbIi8Rpc,4492
|
25
|
+
mm_std/concurrency/sync_decorators.py,sha256=syCQBOmN7qPO55yzgJB2rbkh10CVww376hmyvs6e5tA,1080
|
26
|
+
mm_std/concurrency/sync_scheduler.py,sha256=j4tBL_cBI1spr0cZplTA7N2CoYsznuORMeRN8rpR6gY,2407
|
27
|
+
mm_std/concurrency/sync_task_runner.py,sha256=s5JPlLYLGQGHIxy4oDS-PN7O9gcy-yPZFoNm8RQwzcw,1780
|
28
|
+
mm_std-0.3.15.dist-info/METADATA,sha256=HQY5RR2r6A77RPEc0_2wMT6QdKEFc6QyuzExC1f6fL8,410
|
29
|
+
mm_std-0.3.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
30
|
+
mm_std-0.3.15.dist-info/RECORD,,
|
mm_std/concurrency.py
DELETED
@@ -1,152 +0,0 @@
|
|
1
|
-
import concurrent.futures
|
2
|
-
import functools
|
3
|
-
import time
|
4
|
-
from collections import defaultdict
|
5
|
-
from collections.abc import Callable
|
6
|
-
from concurrent.futures.thread import ThreadPoolExecutor
|
7
|
-
from dataclasses import dataclass, field
|
8
|
-
from datetime import datetime
|
9
|
-
from logging import Logger
|
10
|
-
from threading import Lock, Thread
|
11
|
-
|
12
|
-
from .date import is_too_old, utc_now
|
13
|
-
|
14
|
-
type Func = Callable[..., object]
|
15
|
-
type Args = tuple[object, ...]
|
16
|
-
type Kwargs = dict[str, object]
|
17
|
-
|
18
|
-
|
19
|
-
class ConcurrentTasks:
|
20
|
-
def __init__(self, max_workers: int = 5, timeout: int | None = None, thread_name_prefix: str = "concurrent_tasks") -> None:
|
21
|
-
self.max_workers = max_workers
|
22
|
-
self.timeout = timeout
|
23
|
-
self.thread_name_prefix = thread_name_prefix
|
24
|
-
self.tasks: list[ConcurrentTasks.Task] = []
|
25
|
-
self.exceptions: dict[str, Exception] = {}
|
26
|
-
self.error = False
|
27
|
-
self.timeout_error = False
|
28
|
-
self.result: dict[str, object] = {}
|
29
|
-
|
30
|
-
@dataclass
|
31
|
-
class Task:
|
32
|
-
key: str
|
33
|
-
func: Func
|
34
|
-
args: Args
|
35
|
-
kwargs: Kwargs
|
36
|
-
|
37
|
-
def add_task(self, key: str, func: Func, args: Args = (), kwargs: Kwargs | None = None) -> None:
|
38
|
-
if kwargs is None:
|
39
|
-
kwargs = {}
|
40
|
-
self.tasks.append(ConcurrentTasks.Task(key, func, args, kwargs))
|
41
|
-
|
42
|
-
def execute(self) -> None:
|
43
|
-
with ThreadPoolExecutor(self.max_workers, thread_name_prefix=self.thread_name_prefix) as executor:
|
44
|
-
future_to_key = {executor.submit(task.func, *task.args, **task.kwargs): task.key for task in self.tasks}
|
45
|
-
try:
|
46
|
-
result_map = concurrent.futures.as_completed(future_to_key, timeout=self.timeout)
|
47
|
-
for future in result_map:
|
48
|
-
key = future_to_key[future]
|
49
|
-
try:
|
50
|
-
self.result[key] = future.result()
|
51
|
-
except Exception as err:
|
52
|
-
self.error = True
|
53
|
-
self.exceptions[key] = err
|
54
|
-
except concurrent.futures.TimeoutError:
|
55
|
-
self.error = True
|
56
|
-
self.timeout_error = True
|
57
|
-
|
58
|
-
|
59
|
-
def synchronized_parameter[T, **P](arg_index: int = 0, skip_if_locked: bool = False) -> Callable[..., Callable[P, T | None]]:
|
60
|
-
locks: dict[object, Lock] = defaultdict(Lock)
|
61
|
-
|
62
|
-
def outer(func: Callable[P, T]) -> Callable[P, T | None]:
|
63
|
-
@functools.wraps(func)
|
64
|
-
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | None:
|
65
|
-
if skip_if_locked and locks[args[arg_index]].locked():
|
66
|
-
return None
|
67
|
-
try:
|
68
|
-
with locks[args[arg_index]]:
|
69
|
-
return func(*args, **kwargs)
|
70
|
-
finally:
|
71
|
-
locks.pop(args[arg_index], None)
|
72
|
-
|
73
|
-
wrapper.locks = locks # type: ignore[attr-defined]
|
74
|
-
return wrapper
|
75
|
-
|
76
|
-
return outer
|
77
|
-
|
78
|
-
|
79
|
-
def synchronized[T, **P](fn: Callable[P, T]) -> Callable[P, T]:
|
80
|
-
lock = Lock()
|
81
|
-
|
82
|
-
@functools.wraps(fn)
|
83
|
-
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
84
|
-
with lock:
|
85
|
-
return fn(*args, **kwargs)
|
86
|
-
|
87
|
-
return wrapper
|
88
|
-
|
89
|
-
|
90
|
-
class Scheduler:
|
91
|
-
def __init__(self, log: Logger, loop_delay: float = 0.5, debug: bool = False) -> None:
|
92
|
-
self.log = log
|
93
|
-
self.debug = debug
|
94
|
-
self.loop_delay = loop_delay
|
95
|
-
self.stopped = False
|
96
|
-
self.jobs: list[Scheduler.Job] = []
|
97
|
-
self.run_immediately_jobs: list[Scheduler.Job] = []
|
98
|
-
self._debug("init")
|
99
|
-
|
100
|
-
@dataclass
|
101
|
-
class Job:
|
102
|
-
func: Func
|
103
|
-
args: tuple[object, ...]
|
104
|
-
interval: int
|
105
|
-
is_running: bool = False
|
106
|
-
last_at: datetime = field(default_factory=utc_now)
|
107
|
-
|
108
|
-
def __str__(self) -> str:
|
109
|
-
return str(self.func)
|
110
|
-
|
111
|
-
def add_job(self, func: Func, interval: int, args: tuple[object, ...] = (), run_immediately: bool = False) -> None:
|
112
|
-
job = Scheduler.Job(func, args, interval)
|
113
|
-
self.jobs.append(job)
|
114
|
-
if run_immediately:
|
115
|
-
self.run_immediately_jobs.append(job)
|
116
|
-
|
117
|
-
def _run_job(self, job: Job) -> None:
|
118
|
-
self._debug(f"_run_job: {job}")
|
119
|
-
if self.stopped:
|
120
|
-
return
|
121
|
-
try:
|
122
|
-
job.func(*job.args)
|
123
|
-
self._debug(f"_run_job: {job} done")
|
124
|
-
except Exception:
|
125
|
-
self.log.exception("scheduler error")
|
126
|
-
self._debug(f"_run_job: {job} error")
|
127
|
-
finally:
|
128
|
-
job.is_running = False
|
129
|
-
|
130
|
-
def _start(self) -> None:
|
131
|
-
self._debug(f"_start: jobs={len(self.jobs)}, run_immediately_jobs={len(self.run_immediately_jobs)}")
|
132
|
-
for j in self.run_immediately_jobs:
|
133
|
-
j.is_running = True
|
134
|
-
j.last_at = utc_now()
|
135
|
-
Thread(target=self._run_job, args=(j,)).start()
|
136
|
-
while not self.stopped:
|
137
|
-
for j in self.jobs:
|
138
|
-
if not j.is_running and is_too_old(j.last_at, j.interval):
|
139
|
-
j.is_running = True
|
140
|
-
j.last_at = utc_now()
|
141
|
-
Thread(target=self._run_job, args=(j,)).start()
|
142
|
-
time.sleep(self.loop_delay)
|
143
|
-
|
144
|
-
def _debug(self, message: str) -> None:
|
145
|
-
if self.debug:
|
146
|
-
self.log.debug("Scheduler: %s", message)
|
147
|
-
|
148
|
-
def start(self) -> None:
|
149
|
-
Thread(target=self._start).start()
|
150
|
-
|
151
|
-
def stop(self) -> None:
|
152
|
-
self.stopped = True
|
File without changes
|