dbos 0.7.0a1__py3-none-any.whl → 0.7.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/__init__.py +2 -0
- dbos/application_database.py +6 -11
- dbos/context.py +3 -2
- dbos/core.py +74 -54
- dbos/dbos.py +57 -69
- dbos/dbos_config.py +1 -1
- dbos/fastapi.py +46 -2
- dbos/kafka.py +27 -12
- dbos/migrations/versions/eab0cc1d9a14_job_queue.py +55 -0
- dbos/queue.py +36 -0
- dbos/recovery.py +1 -1
- dbos/scheduler/scheduler.py +8 -10
- dbos/schemas/system_database.py +23 -0
- dbos/system_database.py +106 -68
- {dbos-0.7.0a1.dist-info → dbos-0.7.0a8.dist-info}/METADATA +2 -2
- {dbos-0.7.0a1.dist-info → dbos-0.7.0a8.dist-info}/RECORD +19 -17
- {dbos-0.7.0a1.dist-info → dbos-0.7.0a8.dist-info}/WHEEL +0 -0
- {dbos-0.7.0a1.dist-info → dbos-0.7.0a8.dist-info}/entry_points.txt +0 -0
- {dbos-0.7.0a1.dist-info → dbos-0.7.0a8.dist-info}/licenses/LICENSE +0 -0
dbos/fastapi.py
CHANGED
|
@@ -1,8 +1,13 @@
|
|
|
1
1
|
import uuid
|
|
2
|
-
from typing import Any, Callable
|
|
2
|
+
from typing import Any, Callable, cast
|
|
3
3
|
|
|
4
4
|
from fastapi import FastAPI
|
|
5
5
|
from fastapi import Request as FastAPIRequest
|
|
6
|
+
from fastapi.responses import JSONResponse
|
|
7
|
+
from starlette.types import ASGIApp, Message, Receive, Scope, Send
|
|
8
|
+
|
|
9
|
+
from dbos import DBOS
|
|
10
|
+
from dbos.error import DBOSException
|
|
6
11
|
|
|
7
12
|
from .context import (
|
|
8
13
|
EnterDBOSHandler,
|
|
@@ -35,7 +40,46 @@ def make_request(request: FastAPIRequest) -> Request:
|
|
|
35
40
|
)
|
|
36
41
|
|
|
37
42
|
|
|
38
|
-
def
|
|
43
|
+
async def dbos_error_handler(request: FastAPIRequest, gexc: Exception) -> JSONResponse:
|
|
44
|
+
exc: DBOSException = cast(DBOSException, gexc)
|
|
45
|
+
status_code = 500
|
|
46
|
+
if exc.status_code is not None:
|
|
47
|
+
status_code = exc.status_code
|
|
48
|
+
return JSONResponse(
|
|
49
|
+
status_code=status_code,
|
|
50
|
+
content={
|
|
51
|
+
"message": str(exc.message),
|
|
52
|
+
"dbos_error_code": str(exc.dbos_error_code),
|
|
53
|
+
"dbos_error": str(exc.__class__.__name__),
|
|
54
|
+
},
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class LifespanMiddleware:
|
|
59
|
+
def __init__(self, app: ASGIApp, dbos: DBOS):
|
|
60
|
+
self.app = app
|
|
61
|
+
self.dbos = dbos
|
|
62
|
+
|
|
63
|
+
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
|
64
|
+
if scope["type"] == "lifespan":
|
|
65
|
+
while True:
|
|
66
|
+
message = await receive()
|
|
67
|
+
if message["type"] == "lifespan.startup":
|
|
68
|
+
self.dbos._launch()
|
|
69
|
+
await send({"type": "lifespan.startup.complete"})
|
|
70
|
+
elif message["type"] == "lifespan.shutdown":
|
|
71
|
+
self.dbos._destroy()
|
|
72
|
+
await send({"type": "lifespan.shutdown.complete"})
|
|
73
|
+
break
|
|
74
|
+
else:
|
|
75
|
+
await self.app(scope, receive, send)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
|
|
79
|
+
|
|
80
|
+
app.add_middleware(LifespanMiddleware, dbos=dbos)
|
|
81
|
+
app.add_exception_handler(DBOSException, dbos_error_handler)
|
|
82
|
+
|
|
39
83
|
@app.middleware("http")
|
|
40
84
|
async def dbos_fastapi_middleware(
|
|
41
85
|
request: FastAPIRequest, call_next: Callable[..., Any]
|
dbos/kafka.py
CHANGED
|
@@ -1,26 +1,30 @@
|
|
|
1
1
|
import threading
|
|
2
|
-
import
|
|
3
|
-
from dataclasses import dataclass
|
|
4
|
-
from typing import TYPE_CHECKING, Any, Callable, Generator, NoReturn, Optional, Union
|
|
2
|
+
from typing import TYPE_CHECKING, Any, Callable, NoReturn
|
|
5
3
|
|
|
6
4
|
from confluent_kafka import Consumer, KafkaError, KafkaException
|
|
7
|
-
|
|
5
|
+
|
|
6
|
+
from dbos.queue import Queue
|
|
8
7
|
|
|
9
8
|
if TYPE_CHECKING:
|
|
10
9
|
from dbos.dbos import _DBOSRegistry
|
|
11
10
|
|
|
12
11
|
from .context import SetWorkflowID
|
|
12
|
+
from .error import DBOSInitializationError
|
|
13
13
|
from .kafka_message import KafkaMessage
|
|
14
14
|
from .logger import dbos_logger
|
|
15
15
|
|
|
16
16
|
KafkaConsumerWorkflow = Callable[[KafkaMessage], None]
|
|
17
17
|
|
|
18
|
+
kafka_queue: Queue
|
|
19
|
+
in_order_kafka_queues: dict[str, Queue] = {}
|
|
20
|
+
|
|
18
21
|
|
|
19
22
|
def _kafka_consumer_loop(
|
|
20
23
|
func: KafkaConsumerWorkflow,
|
|
21
24
|
config: dict[str, Any],
|
|
22
25
|
topics: list[str],
|
|
23
26
|
stop_event: threading.Event,
|
|
27
|
+
in_order: bool,
|
|
24
28
|
) -> None:
|
|
25
29
|
|
|
26
30
|
def on_error(err: KafkaError) -> NoReturn:
|
|
@@ -70,24 +74,35 @@ def _kafka_consumer_loop(
|
|
|
70
74
|
with SetWorkflowID(
|
|
71
75
|
f"kafka-unique-id-{msg.topic}-{msg.partition}-{msg.offset}"
|
|
72
76
|
):
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
)
|
|
77
|
+
if in_order:
|
|
78
|
+
assert msg.topic is not None
|
|
79
|
+
queue = in_order_kafka_queues[msg.topic]
|
|
80
|
+
queue.enqueue(func, msg)
|
|
81
|
+
else:
|
|
82
|
+
kafka_queue.enqueue(func, msg)
|
|
79
83
|
|
|
80
84
|
finally:
|
|
81
85
|
consumer.close()
|
|
82
86
|
|
|
83
87
|
|
|
84
88
|
def kafka_consumer(
|
|
85
|
-
dbosreg: "_DBOSRegistry", config: dict[str, Any], topics: list[str]
|
|
89
|
+
dbosreg: "_DBOSRegistry", config: dict[str, Any], topics: list[str], in_order: bool
|
|
86
90
|
) -> Callable[[KafkaConsumerWorkflow], KafkaConsumerWorkflow]:
|
|
87
91
|
def decorator(func: KafkaConsumerWorkflow) -> KafkaConsumerWorkflow:
|
|
92
|
+
if in_order:
|
|
93
|
+
for topic in topics:
|
|
94
|
+
if topic.startswith("^"):
|
|
95
|
+
raise DBOSInitializationError(
|
|
96
|
+
f"Error: in-order processing is not supported for regular expression topic selectors ({topic})"
|
|
97
|
+
)
|
|
98
|
+
queue = Queue(f"_dbos_kafka_queue_topic_{topic}", concurrency=1)
|
|
99
|
+
in_order_kafka_queues[topic] = queue
|
|
100
|
+
else:
|
|
101
|
+
global kafka_queue
|
|
102
|
+
kafka_queue = Queue("_dbos_internal_queue")
|
|
88
103
|
stop_event = threading.Event()
|
|
89
104
|
dbosreg.register_poller(
|
|
90
|
-
stop_event, _kafka_consumer_loop, func, config, topics, stop_event
|
|
105
|
+
stop_event, _kafka_consumer_loop, func, config, topics, stop_event, in_order
|
|
91
106
|
)
|
|
92
107
|
return func
|
|
93
108
|
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""job_queue
|
|
2
|
+
|
|
3
|
+
Revision ID: eab0cc1d9a14
|
|
4
|
+
Revises: a3b18ad34abe
|
|
5
|
+
Create Date: 2024-09-13 14:50:00.531294
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "eab0cc1d9a14"
|
|
16
|
+
down_revision: Union[str, None] = "a3b18ad34abe"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
op.create_table(
|
|
23
|
+
"job_queue",
|
|
24
|
+
sa.Column("workflow_uuid", sa.Text(), nullable=False),
|
|
25
|
+
sa.Column("queue_name", sa.Text(), nullable=False),
|
|
26
|
+
sa.Column(
|
|
27
|
+
"created_at_epoch_ms",
|
|
28
|
+
sa.BigInteger(),
|
|
29
|
+
server_default=sa.text(
|
|
30
|
+
"(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"
|
|
31
|
+
),
|
|
32
|
+
nullable=False,
|
|
33
|
+
primary_key=True,
|
|
34
|
+
),
|
|
35
|
+
sa.ForeignKeyConstraint(
|
|
36
|
+
["workflow_uuid"],
|
|
37
|
+
["dbos.workflow_status.workflow_uuid"],
|
|
38
|
+
onupdate="CASCADE",
|
|
39
|
+
ondelete="CASCADE",
|
|
40
|
+
),
|
|
41
|
+
schema="dbos",
|
|
42
|
+
)
|
|
43
|
+
op.add_column(
|
|
44
|
+
"workflow_status",
|
|
45
|
+
sa.Column(
|
|
46
|
+
"queue_name",
|
|
47
|
+
sa.Text(),
|
|
48
|
+
),
|
|
49
|
+
schema="dbos",
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def downgrade() -> None:
|
|
54
|
+
op.drop_table("job_queue", schema="dbos")
|
|
55
|
+
op.drop_column("workflow_status", "queue_name", schema="dbos")
|
dbos/queue.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
import time
|
|
3
|
+
from typing import TYPE_CHECKING, Optional
|
|
4
|
+
|
|
5
|
+
from dbos.core import P, R, _execute_workflow_id, _start_workflow
|
|
6
|
+
from dbos.error import DBOSInitializationError
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from dbos.dbos import DBOS, Workflow, WorkflowHandle
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Queue:
|
|
13
|
+
def __init__(self, name: str, concurrency: Optional[int] = None) -> None:
|
|
14
|
+
self.name = name
|
|
15
|
+
self.concurrency = concurrency
|
|
16
|
+
from dbos.dbos import _get_or_create_dbos_registry
|
|
17
|
+
|
|
18
|
+
registry = _get_or_create_dbos_registry()
|
|
19
|
+
registry.queue_info_map[self.name] = self
|
|
20
|
+
|
|
21
|
+
def enqueue(
|
|
22
|
+
self, func: "Workflow[P, R]", *args: P.args, **kwargs: P.kwargs
|
|
23
|
+
) -> "WorkflowHandle[R]":
|
|
24
|
+
from dbos.dbos import _get_dbos_instance
|
|
25
|
+
|
|
26
|
+
dbos = _get_dbos_instance()
|
|
27
|
+
return _start_workflow(dbos, func, self.name, False, *args, **kwargs)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def queue_thread(stop_event: threading.Event, dbos: "DBOS") -> None:
|
|
31
|
+
while not stop_event.is_set():
|
|
32
|
+
time.sleep(1)
|
|
33
|
+
for queue_name, queue in dbos._registry.queue_info_map.items():
|
|
34
|
+
wf_ids = dbos._sys_db.start_queued_workflows(queue_name, queue.concurrency)
|
|
35
|
+
for id in wf_ids:
|
|
36
|
+
_execute_workflow_id(dbos, id)
|
dbos/recovery.py
CHANGED
|
@@ -41,7 +41,7 @@ def _recover_pending_workflows(
|
|
|
41
41
|
f"Skip local recovery because it's running in a VM: {os.environ.get('DBOS__VMID')}"
|
|
42
42
|
)
|
|
43
43
|
dbos.logger.debug(f"Recovering pending workflows for executor: {executor_id}")
|
|
44
|
-
workflow_ids = dbos.
|
|
44
|
+
workflow_ids = dbos._sys_db.get_pending_workflows(executor_id)
|
|
45
45
|
dbos.logger.debug(f"Pending workflows: {workflow_ids}")
|
|
46
46
|
|
|
47
47
|
for workflowID in workflow_ids:
|
dbos/scheduler/scheduler.py
CHANGED
|
@@ -1,41 +1,39 @@
|
|
|
1
1
|
import threading
|
|
2
|
-
import traceback
|
|
3
2
|
from datetime import datetime, timezone
|
|
4
3
|
from typing import TYPE_CHECKING, Callable
|
|
5
4
|
|
|
5
|
+
from dbos.queue import Queue
|
|
6
|
+
|
|
6
7
|
if TYPE_CHECKING:
|
|
7
8
|
from dbos.dbos import _DBOSRegistry
|
|
8
9
|
|
|
9
10
|
from ..context import SetWorkflowID
|
|
10
|
-
from ..logger import dbos_logger
|
|
11
11
|
from .croniter import croniter # type: ignore
|
|
12
12
|
|
|
13
13
|
ScheduledWorkflow = Callable[[datetime, datetime], None]
|
|
14
14
|
|
|
15
|
+
scheduler_queue: Queue
|
|
16
|
+
|
|
15
17
|
|
|
16
18
|
def scheduler_loop(
|
|
17
19
|
func: ScheduledWorkflow, cron: str, stop_event: threading.Event
|
|
18
20
|
) -> None:
|
|
19
|
-
iter = croniter(cron, datetime.now(timezone.utc))
|
|
21
|
+
iter = croniter(cron, datetime.now(timezone.utc), second_at_beginning=True)
|
|
20
22
|
while not stop_event.is_set():
|
|
21
23
|
nextExecTime = iter.get_next(datetime)
|
|
22
24
|
sleepTime = nextExecTime - datetime.now(timezone.utc)
|
|
23
25
|
if stop_event.wait(timeout=sleepTime.total_seconds()):
|
|
24
26
|
return
|
|
25
27
|
with SetWorkflowID(f"sched-{func.__qualname__}-{nextExecTime.isoformat()}"):
|
|
26
|
-
|
|
27
|
-
func(nextExecTime, datetime.now(timezone.utc))
|
|
28
|
-
except Exception as e:
|
|
29
|
-
dbos_logger.error(
|
|
30
|
-
f"Exception encountered in scheduled workflow: {traceback.format_exc()}"
|
|
31
|
-
)
|
|
32
|
-
pass # Let the thread keep running
|
|
28
|
+
scheduler_queue.enqueue(func, nextExecTime, datetime.now(timezone.utc))
|
|
33
29
|
|
|
34
30
|
|
|
35
31
|
def scheduled(
|
|
36
32
|
dbosreg: "_DBOSRegistry", cron: str
|
|
37
33
|
) -> Callable[[ScheduledWorkflow], ScheduledWorkflow]:
|
|
38
34
|
def decorator(func: ScheduledWorkflow) -> ScheduledWorkflow:
|
|
35
|
+
global scheduler_queue
|
|
36
|
+
scheduler_queue = Queue("_dbos_internal_queue")
|
|
39
37
|
stop_event = threading.Event()
|
|
40
38
|
dbosreg.register_poller(stop_event, scheduler_loop, func, cron, stop_event)
|
|
41
39
|
return func
|
dbos/schemas/system_database.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from sqlalchemy import (
|
|
2
2
|
BigInteger,
|
|
3
|
+
Boolean,
|
|
3
4
|
Column,
|
|
4
5
|
ForeignKey,
|
|
5
6
|
Index,
|
|
@@ -53,6 +54,7 @@ class SystemSchema:
|
|
|
53
54
|
nullable=True,
|
|
54
55
|
server_default=text("'0'::bigint"),
|
|
55
56
|
),
|
|
57
|
+
Column("queue_name", Text),
|
|
56
58
|
Index("workflow_status_created_at_index", "created_at"),
|
|
57
59
|
Index("workflow_status_executor_id_index", "executor_id"),
|
|
58
60
|
)
|
|
@@ -139,3 +141,24 @@ class SystemSchema:
|
|
|
139
141
|
Column("workflow_fn_name", Text, primary_key=True, nullable=False),
|
|
140
142
|
Column("last_run_time", BigInteger, nullable=False),
|
|
141
143
|
)
|
|
144
|
+
|
|
145
|
+
job_queue = Table(
|
|
146
|
+
"job_queue",
|
|
147
|
+
metadata_obj,
|
|
148
|
+
Column(
|
|
149
|
+
"workflow_uuid",
|
|
150
|
+
Text,
|
|
151
|
+
ForeignKey(
|
|
152
|
+
"workflow_status.workflow_uuid", onupdate="CASCADE", ondelete="CASCADE"
|
|
153
|
+
),
|
|
154
|
+
nullable=False,
|
|
155
|
+
primary_key=True,
|
|
156
|
+
),
|
|
157
|
+
Column("queue_name", Text, nullable=False),
|
|
158
|
+
Column(
|
|
159
|
+
"created_at_epoch_ms",
|
|
160
|
+
BigInteger,
|
|
161
|
+
nullable=False,
|
|
162
|
+
server_default=text("(EXTRACT(epoch FROM now()) * 1000::numeric)::bigint"),
|
|
163
|
+
),
|
|
164
|
+
)
|
dbos/system_database.py
CHANGED
|
@@ -6,7 +6,7 @@ import time
|
|
|
6
6
|
from enum import Enum
|
|
7
7
|
from typing import Any, Dict, List, Literal, Optional, Sequence, Set, TypedDict, cast
|
|
8
8
|
|
|
9
|
-
import
|
|
9
|
+
import psycopg
|
|
10
10
|
import sqlalchemy as sa
|
|
11
11
|
import sqlalchemy.dialects.postgresql as pg
|
|
12
12
|
from alembic import command
|
|
@@ -33,10 +33,11 @@ class WorkflowStatusString(Enum):
|
|
|
33
33
|
ERROR = "ERROR"
|
|
34
34
|
RETRIES_EXCEEDED = "RETRIES_EXCEEDED"
|
|
35
35
|
CANCELLED = "CANCELLED"
|
|
36
|
+
ENQUEUED = "ENQUEUED"
|
|
36
37
|
|
|
37
38
|
|
|
38
39
|
WorkflowStatuses = Literal[
|
|
39
|
-
"PENDING", "SUCCESS", "ERROR", "RETRIES_EXCEEDED", "CANCELLED"
|
|
40
|
+
"PENDING", "SUCCESS", "ERROR", "RETRIES_EXCEEDED", "CANCELLED", "ENQUEUED"
|
|
40
41
|
]
|
|
41
42
|
|
|
42
43
|
|
|
@@ -61,6 +62,7 @@ class WorkflowStatusInternal(TypedDict):
|
|
|
61
62
|
authenticated_user: Optional[str]
|
|
62
63
|
assumed_role: Optional[str]
|
|
63
64
|
authenticated_roles: Optional[str] # JSON list of roles.
|
|
65
|
+
queue_name: Optional[str]
|
|
64
66
|
|
|
65
67
|
|
|
66
68
|
class RecordedResult(TypedDict):
|
|
@@ -154,7 +156,7 @@ class SystemDatabase:
|
|
|
154
156
|
|
|
155
157
|
# If the system database does not already exist, create it
|
|
156
158
|
postgres_db_url = sa.URL.create(
|
|
157
|
-
"postgresql",
|
|
159
|
+
"postgresql+psycopg",
|
|
158
160
|
username=config["database"]["username"],
|
|
159
161
|
password=config["database"]["password"],
|
|
160
162
|
host=config["database"]["hostname"],
|
|
@@ -172,7 +174,7 @@ class SystemDatabase:
|
|
|
172
174
|
engine.dispose()
|
|
173
175
|
|
|
174
176
|
system_db_url = sa.URL.create(
|
|
175
|
-
"postgresql",
|
|
177
|
+
"postgresql+psycopg",
|
|
176
178
|
username=config["database"]["username"],
|
|
177
179
|
password=config["database"]["password"],
|
|
178
180
|
host=config["database"]["hostname"],
|
|
@@ -196,7 +198,7 @@ class SystemDatabase:
|
|
|
196
198
|
)
|
|
197
199
|
command.upgrade(alembic_cfg, "head")
|
|
198
200
|
|
|
199
|
-
self.notification_conn: Optional[
|
|
201
|
+
self.notification_conn: Optional[psycopg.connection.Connection] = None
|
|
200
202
|
self.notifications_map: Dict[str, threading.Condition] = {}
|
|
201
203
|
self.workflow_events_map: Dict[str, threading.Condition] = {}
|
|
202
204
|
|
|
@@ -247,6 +249,7 @@ class SystemDatabase:
|
|
|
247
249
|
authenticated_user=status["authenticated_user"],
|
|
248
250
|
authenticated_roles=status["authenticated_roles"],
|
|
249
251
|
assumed_role=status["assumed_role"],
|
|
252
|
+
queue_name=status["queue_name"],
|
|
250
253
|
)
|
|
251
254
|
if replace:
|
|
252
255
|
cmd = cmd.on_conflict_do_update(
|
|
@@ -320,6 +323,7 @@ class SystemDatabase:
|
|
|
320
323
|
SystemSchema.workflow_status.c.authenticated_user,
|
|
321
324
|
SystemSchema.workflow_status.c.authenticated_roles,
|
|
322
325
|
SystemSchema.workflow_status.c.assumed_role,
|
|
326
|
+
SystemSchema.workflow_status.c.queue_name,
|
|
323
327
|
).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
|
|
324
328
|
).fetchone()
|
|
325
329
|
if row is None:
|
|
@@ -340,6 +344,7 @@ class SystemDatabase:
|
|
|
340
344
|
"authenticated_user": row[6],
|
|
341
345
|
"authenticated_roles": row[7],
|
|
342
346
|
"assumed_role": row[8],
|
|
347
|
+
"queue_name": row[9],
|
|
343
348
|
}
|
|
344
349
|
return status
|
|
345
350
|
|
|
@@ -379,6 +384,7 @@ class SystemDatabase:
|
|
|
379
384
|
SystemSchema.workflow_status.c.authenticated_user,
|
|
380
385
|
SystemSchema.workflow_status.c.authenticated_roles,
|
|
381
386
|
SystemSchema.workflow_status.c.assumed_role,
|
|
387
|
+
SystemSchema.workflow_status.c.queue_name,
|
|
382
388
|
).where(SystemSchema.workflow_status.c.workflow_uuid == workflow_uuid)
|
|
383
389
|
).fetchone()
|
|
384
390
|
if row is None:
|
|
@@ -399,6 +405,7 @@ class SystemDatabase:
|
|
|
399
405
|
"authenticated_user": row[7],
|
|
400
406
|
"authenticated_roles": row[8],
|
|
401
407
|
"assumed_role": row[9],
|
|
408
|
+
"queue_name": row[10],
|
|
402
409
|
}
|
|
403
410
|
return status
|
|
404
411
|
|
|
@@ -565,11 +572,9 @@ class SystemDatabase:
|
|
|
565
572
|
with self.engine.begin() as c:
|
|
566
573
|
c.execute(sql)
|
|
567
574
|
except DBAPIError as dbapi_error:
|
|
568
|
-
if dbapi_error.orig.
|
|
575
|
+
if dbapi_error.orig.sqlstate == "23505": # type: ignore
|
|
569
576
|
raise DBOSWorkflowConflictIDError(result["workflow_uuid"])
|
|
570
|
-
raise
|
|
571
|
-
except Exception as e:
|
|
572
|
-
raise e
|
|
577
|
+
raise
|
|
573
578
|
|
|
574
579
|
def check_operation_execution(
|
|
575
580
|
self, workflow_uuid: str, function_id: int, conn: Optional[sa.Connection] = None
|
|
@@ -623,11 +628,9 @@ class SystemDatabase:
|
|
|
623
628
|
)
|
|
624
629
|
except DBAPIError as dbapi_error:
|
|
625
630
|
# Foreign key violation
|
|
626
|
-
if dbapi_error.orig.
|
|
631
|
+
if dbapi_error.orig.sqlstate == "23503": # type: ignore
|
|
627
632
|
raise DBOSNonExistentWorkflowError(destination_uuid)
|
|
628
|
-
raise
|
|
629
|
-
except Exception as e:
|
|
630
|
-
raise e
|
|
633
|
+
raise
|
|
631
634
|
output: OperationResultInternal = {
|
|
632
635
|
"workflow_uuid": workflow_uuid,
|
|
633
636
|
"function_id": function_id,
|
|
@@ -729,69 +732,59 @@ class SystemDatabase:
|
|
|
729
732
|
return message
|
|
730
733
|
|
|
731
734
|
def _notification_listener(self) -> None:
|
|
732
|
-
notification_cursor: Optional[psycopg2.extensions.cursor] = None
|
|
733
735
|
while self._run_background_processes:
|
|
734
736
|
try:
|
|
735
|
-
#
|
|
736
|
-
|
|
737
|
-
self.engine.url.
|
|
737
|
+
# since we're using the psycopg connection directly, we need a url without the "+pycopg" suffix
|
|
738
|
+
url = sa.URL.create(
|
|
739
|
+
"postgresql", **self.engine.url.translate_connect_args()
|
|
738
740
|
)
|
|
739
|
-
|
|
740
|
-
|
|
741
|
+
# Listen to notifications
|
|
742
|
+
self.notification_conn = psycopg.connect(
|
|
743
|
+
url.render_as_string(hide_password=False), autocommit=True
|
|
741
744
|
)
|
|
742
|
-
notification_cursor = self.notification_conn.cursor()
|
|
743
745
|
|
|
744
|
-
|
|
745
|
-
|
|
746
|
+
self.notification_conn.execute("LISTEN dbos_notifications_channel")
|
|
747
|
+
self.notification_conn.execute("LISTEN dbos_workflow_events_channel")
|
|
748
|
+
|
|
746
749
|
while self._run_background_processes:
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
notify.payload
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
notify.payload
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
condition.acquire()
|
|
780
|
-
condition.notify_all()
|
|
781
|
-
condition.release()
|
|
782
|
-
dbos_logger.debug(
|
|
783
|
-
f"Signaled workflow_events condition for {notify.payload}"
|
|
784
|
-
)
|
|
785
|
-
else:
|
|
786
|
-
dbos_logger.error(f"Unknown channel: {channel}")
|
|
750
|
+
gen = self.notification_conn.notifies(timeout=60)
|
|
751
|
+
for notify in gen:
|
|
752
|
+
channel = notify.channel
|
|
753
|
+
dbos_logger.debug(
|
|
754
|
+
f"Received notification on channel: {channel}, payload: {notify.payload}"
|
|
755
|
+
)
|
|
756
|
+
if channel == "dbos_notifications_channel":
|
|
757
|
+
if (
|
|
758
|
+
notify.payload
|
|
759
|
+
and notify.payload in self.notifications_map
|
|
760
|
+
):
|
|
761
|
+
condition = self.notifications_map[notify.payload]
|
|
762
|
+
condition.acquire()
|
|
763
|
+
condition.notify_all()
|
|
764
|
+
condition.release()
|
|
765
|
+
dbos_logger.debug(
|
|
766
|
+
f"Signaled notifications condition for {notify.payload}"
|
|
767
|
+
)
|
|
768
|
+
elif channel == "dbos_workflow_events_channel":
|
|
769
|
+
if (
|
|
770
|
+
notify.payload
|
|
771
|
+
and notify.payload in self.workflow_events_map
|
|
772
|
+
):
|
|
773
|
+
condition = self.workflow_events_map[notify.payload]
|
|
774
|
+
condition.acquire()
|
|
775
|
+
condition.notify_all()
|
|
776
|
+
condition.release()
|
|
777
|
+
dbos_logger.debug(
|
|
778
|
+
f"Signaled workflow_events condition for {notify.payload}"
|
|
779
|
+
)
|
|
780
|
+
else:
|
|
781
|
+
dbos_logger.error(f"Unknown channel: {channel}")
|
|
787
782
|
except Exception as e:
|
|
788
783
|
if self._run_background_processes:
|
|
789
784
|
dbos_logger.error(f"Notification listener error: {e}")
|
|
790
785
|
time.sleep(1)
|
|
791
786
|
# Then the loop will try to reconnect and restart the listener
|
|
792
787
|
finally:
|
|
793
|
-
if notification_cursor is not None:
|
|
794
|
-
notification_cursor.close()
|
|
795
788
|
if self.notification_conn is not None:
|
|
796
789
|
self.notification_conn.close()
|
|
797
790
|
|
|
@@ -848,11 +841,9 @@ class SystemDatabase:
|
|
|
848
841
|
)
|
|
849
842
|
)
|
|
850
843
|
except DBAPIError as dbapi_error:
|
|
851
|
-
if dbapi_error.orig.
|
|
844
|
+
if dbapi_error.orig.sqlstate == "23505": # type: ignore
|
|
852
845
|
raise DBOSDuplicateWorkflowEventError(workflow_uuid, key)
|
|
853
|
-
raise
|
|
854
|
-
except Exception as e:
|
|
855
|
-
raise e
|
|
846
|
+
raise
|
|
856
847
|
output: OperationResultInternal = {
|
|
857
848
|
"workflow_uuid": workflow_uuid,
|
|
858
849
|
"function_id": function_id,
|
|
@@ -1026,3 +1017,50 @@ class SystemDatabase:
|
|
|
1026
1017
|
len(self._workflow_status_buffer) == 0
|
|
1027
1018
|
and len(self._workflow_inputs_buffer) == 0
|
|
1028
1019
|
)
|
|
1020
|
+
|
|
1021
|
+
def enqueue(self, workflow_id: str, queue_name: str) -> None:
|
|
1022
|
+
with self.engine.begin() as c:
|
|
1023
|
+
c.execute(
|
|
1024
|
+
pg.insert(SystemSchema.job_queue)
|
|
1025
|
+
.values(
|
|
1026
|
+
workflow_uuid=workflow_id,
|
|
1027
|
+
queue_name=queue_name,
|
|
1028
|
+
)
|
|
1029
|
+
.on_conflict_do_nothing()
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
def start_queued_workflows(
|
|
1033
|
+
self, queue_name: str, concurrency: Optional[int]
|
|
1034
|
+
) -> List[str]:
|
|
1035
|
+
with self.engine.begin() as c:
|
|
1036
|
+
query = sa.select(SystemSchema.job_queue.c.workflow_uuid).where(
|
|
1037
|
+
SystemSchema.job_queue.c.queue_name == queue_name
|
|
1038
|
+
)
|
|
1039
|
+
if concurrency is not None:
|
|
1040
|
+
query = query.order_by(
|
|
1041
|
+
SystemSchema.job_queue.c.created_at_epoch_ms.asc()
|
|
1042
|
+
).limit(concurrency)
|
|
1043
|
+
rows = c.execute(query).fetchall()
|
|
1044
|
+
dequeued_ids: List[str] = [row[0] for row in rows]
|
|
1045
|
+
ret_ids = []
|
|
1046
|
+
for id in dequeued_ids:
|
|
1047
|
+
result = c.execute(
|
|
1048
|
+
SystemSchema.workflow_status.update()
|
|
1049
|
+
.where(SystemSchema.workflow_status.c.workflow_uuid == id)
|
|
1050
|
+
.where(
|
|
1051
|
+
SystemSchema.workflow_status.c.status
|
|
1052
|
+
== WorkflowStatusString.ENQUEUED.value
|
|
1053
|
+
)
|
|
1054
|
+
.values(status=WorkflowStatusString.PENDING.value)
|
|
1055
|
+
)
|
|
1056
|
+
if result.rowcount > 0:
|
|
1057
|
+
ret_ids.append(id)
|
|
1058
|
+
return ret_ids
|
|
1059
|
+
|
|
1060
|
+
def remove_from_queue(self, workflow_id: str) -> None:
|
|
1061
|
+
with self.engine.begin() as c:
|
|
1062
|
+
c.execute(
|
|
1063
|
+
sa.delete(SystemSchema.job_queue).where(
|
|
1064
|
+
SystemSchema.job_queue.c.workflow_uuid == workflow_id
|
|
1065
|
+
)
|
|
1066
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: dbos
|
|
3
|
-
Version: 0.7.
|
|
3
|
+
Version: 0.7.0a8
|
|
4
4
|
Summary: Ultra-lightweight durable execution in Python
|
|
5
5
|
Author-Email: "DBOS, Inc." <contact@dbos.dev>
|
|
6
6
|
License: MIT
|
|
@@ -8,7 +8,6 @@ Requires-Python: >=3.9
|
|
|
8
8
|
Requires-Dist: pyyaml>=6.0.2
|
|
9
9
|
Requires-Dist: jsonschema>=4.23.0
|
|
10
10
|
Requires-Dist: alembic>=1.13.2
|
|
11
|
-
Requires-Dist: psycopg2-binary>=2.9.9
|
|
12
11
|
Requires-Dist: typing-extensions>=4.12.2; python_version < "3.10"
|
|
13
12
|
Requires-Dist: typer>=0.12.3
|
|
14
13
|
Requires-Dist: jsonpickle>=3.2.2
|
|
@@ -19,6 +18,7 @@ Requires-Dist: python-dateutil>=2.9.0.post0
|
|
|
19
18
|
Requires-Dist: fastapi[standard]>=0.112.1
|
|
20
19
|
Requires-Dist: psutil>=6.0.0
|
|
21
20
|
Requires-Dist: tomlkit>=0.13.2
|
|
21
|
+
Requires-Dist: psycopg>=3.2.1
|
|
22
22
|
Description-Content-Type: text/markdown
|
|
23
23
|
|
|
24
24
|
|