dbos 1.1.0a4__py3-none-any.whl → 1.2.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbos/_admin_server.py +24 -4
- dbos/_app_db.py +0 -15
- dbos/_core.py +0 -1
- dbos/_dbos.py +22 -2
- dbos/_dbos_config.py +0 -23
- dbos/_event_loop.py +10 -7
- dbos/_queue.py +3 -0
- dbos/_sys_db.py +37 -61
- dbos/_workflow_commands.py +1 -10
- {dbos-1.1.0a4.dist-info → dbos-1.2.0a4.dist-info}/METADATA +1 -1
- {dbos-1.1.0a4.dist-info → dbos-1.2.0a4.dist-info}/RECORD +14 -14
- {dbos-1.1.0a4.dist-info → dbos-1.2.0a4.dist-info}/WHEEL +0 -0
- {dbos-1.1.0a4.dist-info → dbos-1.2.0a4.dist-info}/entry_points.txt +0 -0
- {dbos-1.1.0a4.dist-info → dbos-1.2.0a4.dist-info}/licenses/LICENSE +0 -0
dbos/_admin_server.py
CHANGED
@@ -5,8 +5,9 @@ import re
|
|
5
5
|
import threading
|
6
6
|
from functools import partial
|
7
7
|
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
|
8
|
-
from typing import TYPE_CHECKING, Any, List, TypedDict
|
8
|
+
from typing import TYPE_CHECKING, Any, List, Optional, TypedDict
|
9
9
|
|
10
|
+
from ._context import SetWorkflowID
|
10
11
|
from ._error import DBOSException
|
11
12
|
from ._logger import dbos_logger
|
12
13
|
from ._recovery import recover_pending_workflows
|
@@ -141,7 +142,11 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
141
142
|
try:
|
142
143
|
data = json.loads(post_data.decode("utf-8"))
|
143
144
|
start_step: int = data.get("start_step", 1)
|
144
|
-
|
145
|
+
new_workflow_id: Optional[str] = data.get("new_workflow_id")
|
146
|
+
application_version: Optional[str] = data.get("application_version")
|
147
|
+
self._handle_fork(
|
148
|
+
workflow_id, start_step, new_workflow_id, application_version
|
149
|
+
)
|
145
150
|
except (json.JSONDecodeError, AttributeError) as e:
|
146
151
|
self.send_response(500)
|
147
152
|
self.send_header("Content-Type", "application/json")
|
@@ -191,9 +196,24 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
|
|
191
196
|
self.end_headers()
|
192
197
|
self.wfile.write(response_body)
|
193
198
|
|
194
|
-
def _handle_fork(
|
199
|
+
def _handle_fork(
|
200
|
+
self,
|
201
|
+
workflow_id: str,
|
202
|
+
start_step: int,
|
203
|
+
new_workflow_id: Optional[str],
|
204
|
+
application_version: Optional[str],
|
205
|
+
) -> None:
|
195
206
|
try:
|
196
|
-
|
207
|
+
print(f"Forking workflow {workflow_id} from step {start_step}")
|
208
|
+
if new_workflow_id is not None:
|
209
|
+
with SetWorkflowID(new_workflow_id):
|
210
|
+
handle = self.dbos.fork_workflow(
|
211
|
+
workflow_id, start_step, application_version=application_version
|
212
|
+
)
|
213
|
+
else:
|
214
|
+
handle = self.dbos.fork_workflow(
|
215
|
+
workflow_id, start_step, application_version=application_version
|
216
|
+
)
|
197
217
|
response_body = json.dumps(
|
198
218
|
{
|
199
219
|
"workflow_id": handle.workflow_id,
|
dbos/_app_db.py
CHANGED
@@ -216,21 +216,6 @@ class ApplicationDatabase:
|
|
216
216
|
for row in rows
|
217
217
|
]
|
218
218
|
|
219
|
-
def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
|
220
|
-
with self.engine.begin() as conn:
|
221
|
-
max_function_id_row = conn.execute(
|
222
|
-
sa.select(
|
223
|
-
sa.func.max(ApplicationSchema.transaction_outputs.c.function_id)
|
224
|
-
).where(
|
225
|
-
ApplicationSchema.transaction_outputs.c.workflow_uuid
|
226
|
-
== workflow_uuid
|
227
|
-
)
|
228
|
-
).fetchone()
|
229
|
-
|
230
|
-
max_function_id = max_function_id_row[0] if max_function_id_row else None
|
231
|
-
|
232
|
-
return max_function_id
|
233
|
-
|
234
219
|
def clone_workflow_transactions(
|
235
220
|
self, src_workflow_id: str, forked_workflow_id: str, start_step: int
|
236
221
|
) -> None:
|
dbos/_core.py
CHANGED
@@ -602,7 +602,6 @@ async def start_workflow_async(
|
|
602
602
|
*args: P.args,
|
603
603
|
**kwargs: P.kwargs,
|
604
604
|
) -> "WorkflowHandleAsync[R]":
|
605
|
-
|
606
605
|
# If the function has a class, add the class object as its first argument
|
607
606
|
fself: Optional[object] = None
|
608
607
|
if hasattr(func, "__self__"):
|
dbos/_dbos.py
CHANGED
@@ -90,7 +90,6 @@ from ._context import (
|
|
90
90
|
from ._dbos_config import (
|
91
91
|
ConfigFile,
|
92
92
|
DBOSConfig,
|
93
|
-
check_config_consistency,
|
94
93
|
overwrite_config,
|
95
94
|
process_config,
|
96
95
|
set_env_vars,
|
@@ -299,6 +298,7 @@ class DBOS:
|
|
299
298
|
|
300
299
|
self._launched: bool = False
|
301
300
|
self._debug_mode: bool = False
|
301
|
+
self._configured_threadpool: bool = False
|
302
302
|
self._sys_db_field: Optional[SystemDatabase] = None
|
303
303
|
self._app_db_field: Optional[ApplicationDatabase] = None
|
304
304
|
self._registry: DBOSRegistry = _get_or_create_dbos_registry()
|
@@ -323,7 +323,6 @@ class DBOS:
|
|
323
323
|
unvalidated_config = translate_dbos_config_to_config_file(config)
|
324
324
|
if os.environ.get("DBOS__CLOUD") == "true":
|
325
325
|
unvalidated_config = overwrite_config(unvalidated_config)
|
326
|
-
check_config_consistency(name=unvalidated_config["name"])
|
327
326
|
|
328
327
|
if unvalidated_config is not None:
|
329
328
|
self._config: ConfigFile = process_config(data=unvalidated_config)
|
@@ -719,6 +718,7 @@ class DBOS:
|
|
719
718
|
**kwargs: P.kwargs,
|
720
719
|
) -> WorkflowHandleAsync[R]:
|
721
720
|
"""Invoke a workflow function on the event loop, returning a handle to the ongoing execution."""
|
721
|
+
await cls._configure_asyncio_thread_pool()
|
722
722
|
return await start_workflow_async(
|
723
723
|
_get_dbos_instance(), func, None, True, *args, **kwargs
|
724
724
|
)
|
@@ -736,6 +736,7 @@ class DBOS:
|
|
736
736
|
async def get_workflow_status_async(
|
737
737
|
cls, workflow_id: str
|
738
738
|
) -> Optional[WorkflowStatus]:
|
739
|
+
await cls._configure_asyncio_thread_pool()
|
739
740
|
"""Return the status of a workflow execution."""
|
740
741
|
return await asyncio.to_thread(cls.get_workflow_status, workflow_id)
|
741
742
|
|
@@ -757,6 +758,7 @@ class DBOS:
|
|
757
758
|
) -> WorkflowHandleAsync[R]:
|
758
759
|
"""Return a `WorkflowHandle` for a workflow execution."""
|
759
760
|
dbos = _get_dbos_instance()
|
761
|
+
await cls._configure_asyncio_thread_pool()
|
760
762
|
if existing_workflow:
|
761
763
|
stat = await dbos.get_workflow_status_async(workflow_id)
|
762
764
|
if stat is None:
|
@@ -775,6 +777,7 @@ class DBOS:
|
|
775
777
|
cls, destination_id: str, message: Any, topic: Optional[str] = None
|
776
778
|
) -> None:
|
777
779
|
"""Send a message to a workflow execution."""
|
780
|
+
await cls._configure_asyncio_thread_pool()
|
778
781
|
await asyncio.to_thread(lambda: DBOS.send(destination_id, message, topic))
|
779
782
|
|
780
783
|
@classmethod
|
@@ -797,6 +800,7 @@ class DBOS:
|
|
797
800
|
This function is to be called from within a workflow.
|
798
801
|
`recv_async` will return the message sent on `topic`, asyncronously waiting if necessary.
|
799
802
|
"""
|
803
|
+
await cls._configure_asyncio_thread_pool()
|
800
804
|
return await asyncio.to_thread(lambda: DBOS.recv(topic, timeout_seconds))
|
801
805
|
|
802
806
|
@classmethod
|
@@ -835,6 +839,7 @@ class DBOS:
|
|
835
839
|
It is important to use `DBOS.sleep` or `DBOS.sleep_async` (as opposed to any other sleep) within workflows,
|
836
840
|
as the DBOS sleep methods are durable and completed sleeps will be skipped during recovery.
|
837
841
|
"""
|
842
|
+
await cls._configure_asyncio_thread_pool()
|
838
843
|
await asyncio.to_thread(lambda: DBOS.sleep(seconds))
|
839
844
|
|
840
845
|
@classmethod
|
@@ -869,6 +874,7 @@ class DBOS:
|
|
869
874
|
value(Any): A serializable value to associate with the key
|
870
875
|
|
871
876
|
"""
|
877
|
+
await cls._configure_asyncio_thread_pool()
|
872
878
|
await asyncio.to_thread(lambda: DBOS.set_event(key, value))
|
873
879
|
|
874
880
|
@classmethod
|
@@ -901,6 +907,7 @@ class DBOS:
|
|
901
907
|
timeout_seconds(float): The amount of time to wait, in case `set_event` has not yet been called byt the workflow
|
902
908
|
|
903
909
|
"""
|
910
|
+
await cls._configure_asyncio_thread_pool()
|
904
911
|
return await asyncio.to_thread(
|
905
912
|
lambda: DBOS.get_event(workflow_id, key, timeout_seconds)
|
906
913
|
)
|
@@ -929,6 +936,19 @@ class DBOS:
|
|
929
936
|
fn, "DBOS.cancelWorkflow"
|
930
937
|
)
|
931
938
|
|
939
|
+
@classmethod
|
940
|
+
async def _configure_asyncio_thread_pool(cls) -> None:
|
941
|
+
"""
|
942
|
+
Configure the thread pool for asyncio.to_thread.
|
943
|
+
|
944
|
+
This function is called before the first call to asyncio.to_thread.
|
945
|
+
"""
|
946
|
+
if _get_dbos_instance()._configured_threadpool:
|
947
|
+
return
|
948
|
+
loop = asyncio.get_running_loop()
|
949
|
+
loop.set_default_executor(_get_dbos_instance()._executor)
|
950
|
+
_get_dbos_instance()._configured_threadpool = True
|
951
|
+
|
932
952
|
@classmethod
|
933
953
|
def resume_workflow(cls, workflow_id: str) -> WorkflowHandle[Any]:
|
934
954
|
"""Resume a workflow by ID."""
|
dbos/_dbos_config.py
CHANGED
@@ -529,26 +529,3 @@ def overwrite_config(provided_config: ConfigFile) -> ConfigFile:
|
|
529
529
|
del provided_config["env"]
|
530
530
|
|
531
531
|
return provided_config
|
532
|
-
|
533
|
-
|
534
|
-
def check_config_consistency(
|
535
|
-
*,
|
536
|
-
name: str,
|
537
|
-
config_file_path: str = DBOS_CONFIG_PATH,
|
538
|
-
) -> None:
|
539
|
-
# First load the config file and check whether it is present
|
540
|
-
try:
|
541
|
-
config = load_config(config_file_path, silent=True, run_process_config=False)
|
542
|
-
except FileNotFoundError:
|
543
|
-
dbos_logger.debug(
|
544
|
-
f"No configuration file {config_file_path} found. Skipping consistency check with provided config."
|
545
|
-
)
|
546
|
-
return
|
547
|
-
except Exception as e:
|
548
|
-
raise e
|
549
|
-
|
550
|
-
# Check the name
|
551
|
-
if name != config["name"]:
|
552
|
-
raise DBOSInitializationError(
|
553
|
-
f"Provided app name '{name}' does not match the app name '{config['name']}' in {config_file_path}."
|
554
|
-
)
|
dbos/_event_loop.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
import asyncio
|
2
2
|
import threading
|
3
|
+
from concurrent.futures import ThreadPoolExecutor
|
3
4
|
from typing import Any, Coroutine, Optional, TypeVar
|
4
5
|
|
5
6
|
|
@@ -33,15 +34,17 @@ class BackgroundEventLoop:
|
|
33
34
|
|
34
35
|
def _run_event_loop(self) -> None:
|
35
36
|
self._loop = asyncio.new_event_loop()
|
36
|
-
|
37
|
+
with ThreadPoolExecutor(max_workers=64) as thread_pool:
|
38
|
+
self._loop.set_default_executor(thread_pool)
|
39
|
+
asyncio.set_event_loop(self._loop)
|
37
40
|
|
38
|
-
|
39
|
-
|
41
|
+
self._running = True
|
42
|
+
self._ready.set() # Signal that the loop is ready
|
40
43
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
44
|
+
try:
|
45
|
+
self._loop.run_forever()
|
46
|
+
finally:
|
47
|
+
self._loop.close()
|
45
48
|
|
46
49
|
async def _shutdown(self) -> None:
|
47
50
|
if self._loop is None:
|
dbos/_queue.py
CHANGED
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypedDict
|
|
5
5
|
from psycopg import errors
|
6
6
|
from sqlalchemy.exc import OperationalError
|
7
7
|
|
8
|
+
from dbos._logger import dbos_logger
|
8
9
|
from dbos._utils import GlobalParams
|
9
10
|
|
10
11
|
from ._core import P, R, execute_workflow_by_id, start_workflow, start_workflow_async
|
@@ -56,6 +57,8 @@ class Queue:
|
|
56
57
|
from ._dbos import _get_or_create_dbos_registry
|
57
58
|
|
58
59
|
registry = _get_or_create_dbos_registry()
|
60
|
+
if self.name in registry.queue_info_map:
|
61
|
+
dbos_logger.warning(f"Queue {name} has already been declared")
|
59
62
|
registry.queue_info_map[self.name] = self
|
60
63
|
|
61
64
|
def enqueue(
|
dbos/_sys_db.py
CHANGED
@@ -601,18 +601,6 @@ class SystemDatabase:
|
|
601
601
|
)
|
602
602
|
)
|
603
603
|
|
604
|
-
def get_max_function_id(self, workflow_uuid: str) -> Optional[int]:
|
605
|
-
with self.engine.begin() as conn:
|
606
|
-
max_function_id_row = conn.execute(
|
607
|
-
sa.select(
|
608
|
-
sa.func.max(SystemSchema.operation_outputs.c.function_id)
|
609
|
-
).where(SystemSchema.operation_outputs.c.workflow_uuid == workflow_uuid)
|
610
|
-
).fetchone()
|
611
|
-
|
612
|
-
max_function_id = max_function_id_row[0] if max_function_id_row else None
|
613
|
-
|
614
|
-
return max_function_id
|
615
|
-
|
616
604
|
def fork_workflow(
|
617
605
|
self,
|
618
606
|
original_workflow_id: str,
|
@@ -1722,13 +1710,8 @@ class SystemDatabase:
|
|
1722
1710
|
if num_recent_queries >= queue.limiter["limit"]:
|
1723
1711
|
return []
|
1724
1712
|
|
1725
|
-
#
|
1726
|
-
|
1727
|
-
# functions, else select all of them.
|
1728
|
-
|
1729
|
-
# First lets figure out how many tasks are eligible for dequeue.
|
1730
|
-
# This means figuring out how many unstarted tasks are within the local and global concurrency limits
|
1731
|
-
running_tasks_query = (
|
1713
|
+
# Count how many workflows on this queue are currently PENDING both locally and globally.
|
1714
|
+
pending_tasks_query = (
|
1732
1715
|
sa.select(
|
1733
1716
|
SystemSchema.workflow_status.c.executor_id,
|
1734
1717
|
sa.func.count().label("task_count"),
|
@@ -1742,41 +1725,37 @@ class SystemDatabase:
|
|
1742
1725
|
)
|
1743
1726
|
.where(SystemSchema.workflow_queue.c.queue_name == queue.name)
|
1744
1727
|
.where(
|
1745
|
-
SystemSchema.
|
1746
|
-
|
1747
|
-
) # Task is started
|
1748
|
-
)
|
1749
|
-
.where(
|
1750
|
-
SystemSchema.workflow_queue.c.completed_at_epoch_ms.is_(
|
1751
|
-
None
|
1752
|
-
) # Task is not completed.
|
1728
|
+
SystemSchema.workflow_status.c.status
|
1729
|
+
== WorkflowStatusString.PENDING.value
|
1753
1730
|
)
|
1754
1731
|
.group_by(SystemSchema.workflow_status.c.executor_id)
|
1755
1732
|
)
|
1756
|
-
|
1757
|
-
|
1758
|
-
|
1759
|
-
executor_id, 0
|
1760
|
-
) # Get count for current executor
|
1733
|
+
pending_workflows = c.execute(pending_tasks_query).fetchall()
|
1734
|
+
pending_workflows_dict = {row[0]: row[1] for row in pending_workflows}
|
1735
|
+
local_pending_workflows = pending_workflows_dict.get(executor_id, 0)
|
1761
1736
|
|
1737
|
+
# Compute max_tasks, the number of workflows that can be dequeued given local and global concurrency limits,
|
1762
1738
|
max_tasks = float("inf")
|
1763
1739
|
if queue.worker_concurrency is not None:
|
1764
|
-
|
1765
|
-
|
1766
|
-
|
1740
|
+
# Print a warning if the local concurrency limit is violated
|
1741
|
+
if local_pending_workflows > queue.worker_concurrency:
|
1742
|
+
dbos_logger.warning(
|
1743
|
+
f"The number of local pending workflows ({local_pending_workflows}) on queue {queue.name} exceeds the local concurrency limit ({queue.worker_concurrency})"
|
1744
|
+
)
|
1745
|
+
max_tasks = max(0, queue.worker_concurrency - local_pending_workflows)
|
1746
|
+
|
1767
1747
|
if queue.concurrency is not None:
|
1768
|
-
|
1769
|
-
#
|
1770
|
-
|
1771
|
-
if total_running_tasks > queue.concurrency:
|
1748
|
+
global_pending_workflows = sum(pending_workflows_dict.values())
|
1749
|
+
# Print a warning if the global concurrency limit is violated
|
1750
|
+
if global_pending_workflows > queue.concurrency:
|
1772
1751
|
dbos_logger.warning(
|
1773
|
-
f"
|
1752
|
+
f"The total number of pending workflows ({global_pending_workflows}) on queue {queue.name} exceeds the global concurrency limit ({queue.concurrency})"
|
1774
1753
|
)
|
1775
|
-
available_tasks = max(0, queue.concurrency -
|
1754
|
+
available_tasks = max(0, queue.concurrency - global_pending_workflows)
|
1776
1755
|
max_tasks = min(max_tasks, available_tasks)
|
1777
1756
|
|
1778
1757
|
# Retrieve the first max_tasks workflows in the queue.
|
1779
|
-
# Only retrieve workflows of the
|
1758
|
+
# Only retrieve workflows of the local version (or without version set)
|
1780
1759
|
query = (
|
1781
1760
|
sa.select(
|
1782
1761
|
SystemSchema.workflow_queue.c.workflow_uuid,
|
@@ -1789,8 +1768,10 @@ class SystemDatabase:
|
|
1789
1768
|
)
|
1790
1769
|
)
|
1791
1770
|
.where(SystemSchema.workflow_queue.c.queue_name == queue.name)
|
1792
|
-
.where(
|
1793
|
-
|
1771
|
+
.where(
|
1772
|
+
SystemSchema.workflow_status.c.status
|
1773
|
+
== WorkflowStatusString.ENQUEUED.value
|
1774
|
+
)
|
1794
1775
|
.where(
|
1795
1776
|
sa.or_(
|
1796
1777
|
SystemSchema.workflow_status.c.application_version
|
@@ -1819,20 +1800,16 @@ class SystemDatabase:
|
|
1819
1800
|
ret_ids: list[str] = []
|
1820
1801
|
|
1821
1802
|
for id in dequeued_ids:
|
1822
|
-
# If we have a limiter, stop
|
1823
|
-
# of
|
1803
|
+
# If we have a limiter, stop dequeueing workflows when the number
|
1804
|
+
# of workflows started this period exceeds the limit.
|
1824
1805
|
if queue.limiter is not None:
|
1825
1806
|
if len(ret_ids) + num_recent_queries >= queue.limiter["limit"]:
|
1826
1807
|
break
|
1827
1808
|
|
1828
|
-
# To start a
|
1829
|
-
|
1809
|
+
# To start a workflow, first set its status to PENDING and update its executor ID
|
1810
|
+
c.execute(
|
1830
1811
|
SystemSchema.workflow_status.update()
|
1831
1812
|
.where(SystemSchema.workflow_status.c.workflow_uuid == id)
|
1832
|
-
.where(
|
1833
|
-
SystemSchema.workflow_status.c.status
|
1834
|
-
== WorkflowStatusString.ENQUEUED.value
|
1835
|
-
)
|
1836
1813
|
.values(
|
1837
1814
|
status=WorkflowStatusString.PENDING.value,
|
1838
1815
|
application_version=app_version,
|
@@ -1855,16 +1832,15 @@ class SystemDatabase:
|
|
1855
1832
|
),
|
1856
1833
|
)
|
1857
1834
|
)
|
1858
|
-
|
1859
|
-
|
1860
|
-
|
1861
|
-
|
1862
|
-
|
1863
|
-
|
1864
|
-
|
1865
|
-
ret_ids.append(id)
|
1835
|
+
# Then give it a start time
|
1836
|
+
c.execute(
|
1837
|
+
SystemSchema.workflow_queue.update()
|
1838
|
+
.where(SystemSchema.workflow_queue.c.workflow_uuid == id)
|
1839
|
+
.values(started_at_epoch_ms=start_time_ms)
|
1840
|
+
)
|
1841
|
+
ret_ids.append(id)
|
1866
1842
|
|
1867
|
-
# If we have a limiter, garbage-collect all completed
|
1843
|
+
# If we have a limiter, garbage-collect all completed workflows started
|
1868
1844
|
# before the period. If there's no limiter, there's no need--they were
|
1869
1845
|
# deleted on completion.
|
1870
1846
|
if queue.limiter is not None:
|
dbos/_workflow_commands.py
CHANGED
@@ -103,16 +103,7 @@ def fork_workflow(
|
|
103
103
|
*,
|
104
104
|
application_version: Optional[str],
|
105
105
|
) -> str:
|
106
|
-
|
107
|
-
max_transactions = app_db.get_max_function_id(workflow_uuid) or 0
|
108
|
-
max_operations = sys_db.get_max_function_id(workflow_uuid) or 0
|
109
|
-
return max(max_transactions, max_operations)
|
110
|
-
|
111
|
-
max_function_id = get_max_function_id(workflow_id)
|
112
|
-
if max_function_id > 0 and start_step > max_function_id:
|
113
|
-
raise DBOSException(
|
114
|
-
f"Cannot fork workflow {workflow_id} from step {start_step}. The workflow has {max_function_id} steps."
|
115
|
-
)
|
106
|
+
|
116
107
|
ctx = get_local_dbos_context()
|
117
108
|
if ctx is not None and len(ctx.id_assigned_for_next_workflow) > 0:
|
118
109
|
forked_workflow_id = ctx.id_assigned_for_next_workflow
|
@@ -1,24 +1,24 @@
|
|
1
|
-
dbos-1.
|
2
|
-
dbos-1.
|
3
|
-
dbos-1.
|
4
|
-
dbos-1.
|
1
|
+
dbos-1.2.0a4.dist-info/METADATA,sha256=Tx0t3cKXZZ4AFcoCMceTSlObY86rhSLkxAedOQw5V6c,13267
|
2
|
+
dbos-1.2.0a4.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
|
3
|
+
dbos-1.2.0a4.dist-info/entry_points.txt,sha256=_QOQ3tVfEjtjBlr1jS4sHqHya9lI2aIEIWkz8dqYp14,58
|
4
|
+
dbos-1.2.0a4.dist-info/licenses/LICENSE,sha256=VGZit_a5-kdw9WT6fY5jxAWVwGQzgLFyPWrcVVUhVNU,1067
|
5
5
|
dbos/__init__.py,sha256=NssPCubaBxdiKarOWa-wViz1hdJSkmBGcpLX_gQ4NeA,891
|
6
6
|
dbos/__main__.py,sha256=G7Exn-MhGrVJVDbgNlpzhfh8WMX_72t3_oJaFT9Lmt8,653
|
7
|
-
dbos/_admin_server.py,sha256=
|
8
|
-
dbos/_app_db.py,sha256=
|
7
|
+
dbos/_admin_server.py,sha256=TWXi4drrzKFpKkUmEJpJkQBZxAtOalnhtYicEn2nDK0,10618
|
8
|
+
dbos/_app_db.py,sha256=0PKqpxJ3EbIaak3Wl0lNl3hXvhBfz4EEHaCw1bUOvIM,9937
|
9
9
|
dbos/_classproperty.py,sha256=f0X-_BySzn3yFDRKB2JpCbLYQ9tLwt1XftfshvY7CBs,626
|
10
10
|
dbos/_client.py,sha256=-nK2GjS9D0qnD2DkRDs7gKxNECwYlsvW6hFCjADlnv0,14186
|
11
11
|
dbos/_conductor/conductor.py,sha256=o0IaZjwnZ2TOyHeP2H4iSX6UnXLXQ4uODvWAKD9hHMs,21703
|
12
12
|
dbos/_conductor/protocol.py,sha256=wgOFZxmS81bv0WCB9dAyg0s6QzldpzVKQDoSPeaX0Ws,6967
|
13
13
|
dbos/_context.py,sha256=5ajoWAmToAfzzmMLylnJZoL4Ny9rBwZWuG05sXadMIA,24798
|
14
|
-
dbos/_core.py,sha256=
|
14
|
+
dbos/_core.py,sha256=7ukQH_KClBaMFy0sVTSR5tWylW-RqI9qaReBY-LDKrk,48316
|
15
15
|
dbos/_croniter.py,sha256=XHAyUyibs_59sJQfSNWkP7rqQY6_XrlfuuCxk4jYqek,47559
|
16
|
-
dbos/_dbos.py,sha256=
|
17
|
-
dbos/_dbos_config.py,sha256=
|
16
|
+
dbos/_dbos.py,sha256=1EhH7r6v2vwW3Z74nK6_Zw8InE1jSXedEsztz0I4ggA,47269
|
17
|
+
dbos/_dbos_config.py,sha256=BFL2ol4nrqOPEiu1Dj-Nk3HRiVih0DecOgCdMyENOSQ,20233
|
18
18
|
dbos/_debug.py,sha256=MNlQVZ6TscGCRQeEEL0VE8Uignvr6dPeDDDefS3xgIE,1823
|
19
19
|
dbos/_docker_pg_helper.py,sha256=tLJXWqZ4S-ExcaPnxg_i6cVxL6ZxrYlZjaGsklY-s2I,6115
|
20
20
|
dbos/_error.py,sha256=q0OQJZTbR8FFHV9hEpAGpz9oWBT5L509zUhmyff7FJw,8500
|
21
|
-
dbos/_event_loop.py,sha256=
|
21
|
+
dbos/_event_loop.py,sha256=ts2T1_imfQjdu6hPs7-WZHui4DtmsZ2HUsPgIJ1GXZg,2335
|
22
22
|
dbos/_fastapi.py,sha256=m4SL3H9P-NBQ_ZrbFxAWMOqNyIi3HGEn2ODR7xAK038,3118
|
23
23
|
dbos/_flask.py,sha256=Npnakt-a3W5OykONFRkDRnumaDhTQmA0NPdUCGRYKXE,1652
|
24
24
|
dbos/_kafka.py,sha256=pz0xZ9F3X9Ky1k-VSbeF3tfPhP3UPr3lUUhUfE41__U,4198
|
@@ -38,7 +38,7 @@ dbos/_migrations/versions/d76646551a6c_workflow_queue.py,sha256=G942nophZ2uC2vc4
|
|
38
38
|
dbos/_migrations/versions/eab0cc1d9a14_job_queue.py,sha256=uvhFOtqbBreCePhAxZfIT0qCAI7BiZTou9wt6QnbY7c,1412
|
39
39
|
dbos/_migrations/versions/f4b9b32ba814_functionname_childid_op_outputs.py,sha256=m90Lc5YH0ZISSq1MyxND6oq3RZrZKrIqEsZtwJ1jWxA,1049
|
40
40
|
dbos/_outcome.py,sha256=EXxBg4jXCVJsByDQ1VOCIedmbeq_03S6d-p1vqQrLFU,6810
|
41
|
-
dbos/_queue.py,sha256=
|
41
|
+
dbos/_queue.py,sha256=6cmqB1DoCJUh-y7DetneZRrL5jM5mw0xG9qj7jPu8EE,3687
|
42
42
|
dbos/_recovery.py,sha256=jVMexjfCCNopzyn8gVQzJCmGJaP9G3C1EFaoCQ_Nh7g,2564
|
43
43
|
dbos/_registrations.py,sha256=CZt1ElqDjCT7hz6iyT-1av76Yu-iuwu_c9lozO87wvM,7303
|
44
44
|
dbos/_roles.py,sha256=iOsgmIAf1XVzxs3gYWdGRe1B880YfOw5fpU7Jwx8_A8,2271
|
@@ -47,7 +47,7 @@ dbos/_schemas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
47
|
dbos/_schemas/application_database.py,sha256=SypAS9l9EsaBHFn9FR8jmnqt01M74d9AF1AMa4m2hhI,1040
|
48
48
|
dbos/_schemas/system_database.py,sha256=3Z0L72bOgHnusK1hBaETWU9RfiLBP0QnS-fdu41i0yY,5835
|
49
49
|
dbos/_serialization.py,sha256=bWuwhXSQcGmiazvhJHA5gwhrRWxtmFmcCFQSDJnqqkU,3666
|
50
|
-
dbos/_sys_db.py,sha256=
|
50
|
+
dbos/_sys_db.py,sha256=BZdUrFHG8Ze77hIuxwHpsnE--6UymjjhlH7cA3yP_-0,83230
|
51
51
|
dbos/_templates/dbos-db-starter/README.md,sha256=GhxhBj42wjTt1fWEtwNriHbJuKb66Vzu89G4pxNHw2g,930
|
52
52
|
dbos/_templates/dbos-db-starter/__package/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
53
53
|
dbos/_templates/dbos-db-starter/__package/main.py.dbos,sha256=aQnBPSSQpkB8ERfhf7gB7P9tsU6OPKhZscfeh0yiaD8,2702
|
@@ -60,11 +60,11 @@ dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py,sh
|
|
60
60
|
dbos/_templates/dbos-db-starter/start_postgres_docker.py,sha256=lQVLlYO5YkhGPEgPqwGc7Y8uDKse9HsWv5fynJEFJHM,1681
|
61
61
|
dbos/_tracer.py,sha256=yN6GRDKu_1p-EqtQLNarMocPfga2ZuqpzStzzSPYhzo,2732
|
62
62
|
dbos/_utils.py,sha256=UbpMYRBSyvJqdXeWAnfSw8xXM1R1mfnyl1oTunhEjJM,513
|
63
|
-
dbos/_workflow_commands.py,sha256=
|
63
|
+
dbos/_workflow_commands.py,sha256=UCpHWvCEXjVZtf5FNanFvtJpgUJDSI1EFBqQP0x_2A0,3346
|
64
64
|
dbos/cli/_github_init.py,sha256=Y_bDF9gfO2jB1id4FV5h1oIxEJRWyqVjhb7bNEa5nQ0,3224
|
65
65
|
dbos/cli/_template_init.py,sha256=7JBcpMqP1r2mfCnvWatu33z8ctEGHJarlZYKgB83cXE,2972
|
66
66
|
dbos/cli/cli.py,sha256=HinoCGrAUTiSeq7AAoCFfhdiE0uDw7vLMuDMN1_YTLI,20705
|
67
67
|
dbos/dbos-config.schema.json,sha256=CjaspeYmOkx6Ip_pcxtmfXJTn_YGdSx_0pcPBF7KZmo,6060
|
68
68
|
dbos/py.typed,sha256=QfzXT1Ktfk3Rj84akygc7_42z0lRpCq0Ilh8OXI6Zas,44
|
69
69
|
version/__init__.py,sha256=L4sNxecRuqdtSFdpUGX3TtBi9KL3k7YsZVIvv-fv9-A,1678
|
70
|
-
dbos-1.
|
70
|
+
dbos-1.2.0a4.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|