dbos 1.12.0a3__tar.gz → 1.13.0a3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbos might be problematic. Click here for more details.
- {dbos-1.12.0a3 → dbos-1.13.0a3}/PKG-INFO +1 -1
- dbos-1.13.0a3/dbos/_alembic_migrations/versions/471b60d64126_dbos_migrations.py +35 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_context.py +4 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_core.py +3 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_dbos.py +24 -6
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_dbos_config.py +6 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_fastapi.py +1 -1
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_logger.py +3 -1
- dbos-1.13.0a3/dbos/_migration.py +233 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_sys_db.py +10 -40
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_tracer.py +5 -1
- {dbos-1.12.0a3 → dbos-1.13.0a3}/pyproject.toml +1 -1
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_dbos.py +34 -1
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_schema_migration.py +55 -40
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_spans.py +58 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/LICENSE +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/README.md +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/__init__.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/__main__.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_admin_server.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/env.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/script.py.mako +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/01ce9f07bd10_streaming.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/27ac6900c6ad_add_queue_dedup.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/50f3227f0b4b_fix_job_queue.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/5c361fc04708_added_system_tables.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/66478e1b95e5_consolidate_queues.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/83f3732ae8e7_workflow_timeout.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/933e86bdac6a_add_queue_priority.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/a3b18ad34abe_added_triggers.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/d76646551a6b_job_queue_limiter.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/d76646551a6c_workflow_queue.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/d994145b47b6_consolidate_inputs.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/eab0cc1d9a14_job_queue.py +0 -0
- {dbos-1.12.0a3/dbos/_migrations → dbos-1.13.0a3/dbos/_alembic_migrations}/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_app_db.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_classproperty.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_client.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_conductor/conductor.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_conductor/protocol.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_croniter.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_debug.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_docker_pg_helper.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_error.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_event_loop.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_flask.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_kafka.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_kafka_message.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_outcome.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_queue.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_recovery.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_registrations.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_roles.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_scheduler.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_schemas/__init__.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_schemas/application_database.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_schemas/system_database.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_serialization.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/README.md +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/__package/__init__.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/__package/main.py.dbos +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/__package/schema.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/alembic.ini +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/dbos-config.yaml.dbos +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/migrations/env.py.dbos +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/migrations/script.py.mako +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/migrations/versions/2024_07_31_180642_init.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_templates/dbos-db-starter/start_postgres_docker.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_utils.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/_workflow_commands.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/cli/_github_init.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/cli/_template_init.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/cli/cli.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/cli/migration.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/dbos-config.schema.json +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/dbos/py.typed +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/__init__.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/atexit_no_ctor.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/atexit_no_launch.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/classdefs.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/client_collateral.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/client_worker.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/conftest.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/dupname_classdefs1.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/dupname_classdefsa.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/more_classdefs.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/queuedworkflow.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_admin_server.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_async.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_async_workflow_management.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_classdecorators.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_cli.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_client.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_concurrency.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_config.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_croniter.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_debug.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_docker_secrets.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_failures.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_fastapi.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_fastapi_roles.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_flask.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_kafka.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_migrate.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_outcome.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_package.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_queue.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_scheduler.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_singleton.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_sqlalchemy.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_streaming.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_workflow_introspection.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/tests/test_workflow_management.py +0 -0
- {dbos-1.12.0a3 → dbos-1.13.0a3}/version/__init__.py +0 -0
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""dbos_migrations
|
|
2
|
+
|
|
3
|
+
Revision ID: 471b60d64126
|
|
4
|
+
Revises: 01ce9f07bd10
|
|
5
|
+
Create Date: 2025-08-21 14:22:31.455266
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Sequence, Union
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision: str = "471b60d64126"
|
|
16
|
+
down_revision: Union[str, None] = "01ce9f07bd10"
|
|
17
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
18
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def upgrade() -> None:
|
|
22
|
+
# Create dbos_migrations table
|
|
23
|
+
op.create_table(
|
|
24
|
+
"dbos_migrations",
|
|
25
|
+
sa.Column("version", sa.BigInteger(), nullable=False),
|
|
26
|
+
sa.PrimaryKeyConstraint("version"),
|
|
27
|
+
schema="dbos",
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Insert initial version 1
|
|
31
|
+
op.execute("INSERT INTO dbos.dbos_migrations (version) VALUES (1)")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def downgrade() -> None:
|
|
35
|
+
op.drop_table("dbos_migrations", schema="dbos")
|
|
@@ -221,6 +221,8 @@ class DBOSContext:
|
|
|
221
221
|
return None
|
|
222
222
|
|
|
223
223
|
def _start_span(self, attributes: TracedAttributes) -> None:
|
|
224
|
+
if dbos_tracer.disable_otlp:
|
|
225
|
+
return
|
|
224
226
|
attributes["operationUUID"] = (
|
|
225
227
|
self.workflow_id if len(self.workflow_id) > 0 else None
|
|
226
228
|
)
|
|
@@ -246,6 +248,8 @@ class DBOSContext:
|
|
|
246
248
|
cm.__enter__()
|
|
247
249
|
|
|
248
250
|
def _end_span(self, exc_value: Optional[BaseException]) -> None:
|
|
251
|
+
if dbos_tracer.disable_otlp:
|
|
252
|
+
return
|
|
249
253
|
context_span = self.context_spans.pop()
|
|
250
254
|
if exc_value is None:
|
|
251
255
|
context_span.span.set_status(Status(StatusCode.OK))
|
|
@@ -356,6 +356,7 @@ def _get_wf_invoke_func(
|
|
|
356
356
|
)
|
|
357
357
|
return recorded_result
|
|
358
358
|
try:
|
|
359
|
+
dbos._active_workflows_set.add(status["workflow_uuid"])
|
|
359
360
|
output = func()
|
|
360
361
|
if not dbos.debug_mode:
|
|
361
362
|
dbos._sys_db.update_workflow_outcome(
|
|
@@ -378,6 +379,8 @@ def _get_wf_invoke_func(
|
|
|
378
379
|
error=_serialization.serialize_exception(error),
|
|
379
380
|
)
|
|
380
381
|
raise
|
|
382
|
+
finally:
|
|
383
|
+
dbos._active_workflows_set.discard(status["workflow_uuid"])
|
|
381
384
|
|
|
382
385
|
return persist
|
|
383
386
|
|
|
@@ -293,16 +293,24 @@ class DBOS:
|
|
|
293
293
|
return _dbos_global_instance
|
|
294
294
|
|
|
295
295
|
@classmethod
|
|
296
|
-
def destroy(
|
|
296
|
+
def destroy(
|
|
297
|
+
cls,
|
|
298
|
+
*,
|
|
299
|
+
destroy_registry: bool = False,
|
|
300
|
+
workflow_completion_timeout_sec: int = 0,
|
|
301
|
+
) -> None:
|
|
297
302
|
global _dbos_global_instance
|
|
298
303
|
if _dbos_global_instance is not None:
|
|
299
|
-
_dbos_global_instance._destroy(
|
|
304
|
+
_dbos_global_instance._destroy(
|
|
305
|
+
workflow_completion_timeout_sec=workflow_completion_timeout_sec,
|
|
306
|
+
)
|
|
300
307
|
_dbos_global_instance = None
|
|
301
308
|
if destroy_registry:
|
|
302
309
|
global _dbos_global_registry
|
|
303
310
|
_dbos_global_registry = None
|
|
304
311
|
GlobalParams.app_version = os.environ.get("DBOS__APPVERSION", "")
|
|
305
312
|
GlobalParams.executor_id = os.environ.get("DBOS__VMID", "local")
|
|
313
|
+
dbos_logger.info("DBOS successfully shut down")
|
|
306
314
|
|
|
307
315
|
def __init__(
|
|
308
316
|
self,
|
|
@@ -337,6 +345,7 @@ class DBOS:
|
|
|
337
345
|
self.conductor_key: Optional[str] = conductor_key
|
|
338
346
|
self.conductor_websocket: Optional[ConductorWebsocket] = None
|
|
339
347
|
self._background_event_loop: BackgroundEventLoop = BackgroundEventLoop()
|
|
348
|
+
self._active_workflows_set: set[str] = set()
|
|
340
349
|
|
|
341
350
|
# Globally set the application version and executor ID.
|
|
342
351
|
# In DBOS Cloud, instead use the values supplied through environment variables.
|
|
@@ -588,12 +597,23 @@ class DBOS:
|
|
|
588
597
|
|
|
589
598
|
reset_system_database(pg_db_url, sysdb_name)
|
|
590
599
|
|
|
591
|
-
def _destroy(self) -> None:
|
|
600
|
+
def _destroy(self, *, workflow_completion_timeout_sec: int) -> None:
|
|
592
601
|
self._initialized = False
|
|
593
602
|
for event in self.poller_stop_events:
|
|
594
603
|
event.set()
|
|
595
604
|
for event in self.background_thread_stop_events:
|
|
596
605
|
event.set()
|
|
606
|
+
if workflow_completion_timeout_sec > 0:
|
|
607
|
+
deadline = time.time() + workflow_completion_timeout_sec
|
|
608
|
+
while time.time() < deadline:
|
|
609
|
+
time.sleep(1)
|
|
610
|
+
active_workflows = len(self._active_workflows_set)
|
|
611
|
+
if active_workflows > 0:
|
|
612
|
+
dbos_logger.info(
|
|
613
|
+
f"Attempting to shut down DBOS. {active_workflows} workflows remain active. IDs: {self._active_workflows_set}"
|
|
614
|
+
)
|
|
615
|
+
else:
|
|
616
|
+
break
|
|
597
617
|
self._background_event_loop.stop()
|
|
598
618
|
if self._sys_db_field is not None:
|
|
599
619
|
self._sys_db_field.destroy()
|
|
@@ -609,10 +629,8 @@ class DBOS:
|
|
|
609
629
|
and self.conductor_websocket.websocket is not None
|
|
610
630
|
):
|
|
611
631
|
self.conductor_websocket.websocket.close()
|
|
612
|
-
# CB - This needs work, some things ought to stop before DBs are tossed out,
|
|
613
|
-
# on the other hand it hangs to move it
|
|
614
632
|
if self._executor_field is not None:
|
|
615
|
-
self._executor_field.shutdown(cancel_futures=True)
|
|
633
|
+
self._executor_field.shutdown(wait=False, cancel_futures=True)
|
|
616
634
|
self._executor_field = None
|
|
617
635
|
for bg_thread in self._background_threads:
|
|
618
636
|
bg_thread.join()
|
|
@@ -33,6 +33,9 @@ class DBOSConfig(TypedDict, total=False):
|
|
|
33
33
|
admin_port (int): Admin port
|
|
34
34
|
run_admin_server (bool): Whether to run the DBOS admin server
|
|
35
35
|
otlp_attributes (dict[str, str]): A set of custom attributes to apply OTLP-exported logs and traces
|
|
36
|
+
application_version (str): Application version
|
|
37
|
+
executor_id (str): Executor ID, used to identify the application instance in distributed environments
|
|
38
|
+
disable_otlp (bool): If True, disables OTLP tracing and logging. Defaults to False.
|
|
36
39
|
"""
|
|
37
40
|
|
|
38
41
|
name: str
|
|
@@ -49,6 +52,7 @@ class DBOSConfig(TypedDict, total=False):
|
|
|
49
52
|
otlp_attributes: Optional[dict[str, str]]
|
|
50
53
|
application_version: Optional[str]
|
|
51
54
|
executor_id: Optional[str]
|
|
55
|
+
disable_otlp: Optional[bool]
|
|
52
56
|
|
|
53
57
|
|
|
54
58
|
class RuntimeConfig(TypedDict, total=False):
|
|
@@ -91,6 +95,7 @@ class TelemetryConfig(TypedDict, total=False):
|
|
|
91
95
|
logs: Optional[LoggerConfig]
|
|
92
96
|
OTLPExporter: Optional[OTLPExporterConfig]
|
|
93
97
|
otlp_attributes: Optional[dict[str, str]]
|
|
98
|
+
disable_otlp: Optional[bool]
|
|
94
99
|
|
|
95
100
|
|
|
96
101
|
class ConfigFile(TypedDict, total=False):
|
|
@@ -157,6 +162,7 @@ def translate_dbos_config_to_config_file(config: DBOSConfig) -> ConfigFile:
|
|
|
157
162
|
telemetry: TelemetryConfig = {
|
|
158
163
|
"OTLPExporter": {"tracesEndpoint": [], "logsEndpoint": []},
|
|
159
164
|
"otlp_attributes": config.get("otlp_attributes", {}),
|
|
165
|
+
"disable_otlp": config.get("disable_otlp", False),
|
|
160
166
|
}
|
|
161
167
|
# For mypy
|
|
162
168
|
assert telemetry["OTLPExporter"] is not None
|
|
@@ -49,7 +49,7 @@ class LifespanMiddleware:
|
|
|
49
49
|
if not self.dbos._launched:
|
|
50
50
|
self.dbos._launch()
|
|
51
51
|
elif message["type"] == "lifespan.shutdown.complete":
|
|
52
|
-
self.dbos.
|
|
52
|
+
self.dbos.destroy()
|
|
53
53
|
await send(message)
|
|
54
54
|
|
|
55
55
|
# Call the original app with our wrapped functions
|
|
@@ -77,7 +77,9 @@ def config_logger(config: "ConfigFile") -> None:
|
|
|
77
77
|
otlp_logs_endpoints = (
|
|
78
78
|
config.get("telemetry", {}).get("OTLPExporter", {}).get("logsEndpoint") # type: ignore
|
|
79
79
|
)
|
|
80
|
-
|
|
80
|
+
disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
|
|
81
|
+
|
|
82
|
+
if not disable_otlp and otlp_logs_endpoints:
|
|
81
83
|
log_provider = PatchedOTLPLoggerProvider(
|
|
82
84
|
Resource.create(
|
|
83
85
|
attributes={
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
|
|
5
|
+
import sqlalchemy as sa
|
|
6
|
+
from alembic import command
|
|
7
|
+
from alembic.config import Config
|
|
8
|
+
|
|
9
|
+
from ._logger import dbos_logger
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def ensure_dbos_schema(engine: sa.Engine) -> bool:
|
|
13
|
+
"""
|
|
14
|
+
True if using DBOS migrations (DBOS schema and migrations table already exist or were created)
|
|
15
|
+
False if using Alembic migrations (DBOS schema exists, but dbos_migrations table doesn't)
|
|
16
|
+
"""
|
|
17
|
+
with engine.begin() as conn:
|
|
18
|
+
# Check if dbos schema exists
|
|
19
|
+
schema_result = conn.execute(
|
|
20
|
+
sa.text(
|
|
21
|
+
"SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'dbos'"
|
|
22
|
+
)
|
|
23
|
+
)
|
|
24
|
+
schema_existed = schema_result.fetchone() is not None
|
|
25
|
+
|
|
26
|
+
# Create schema if it doesn't exist
|
|
27
|
+
if not schema_existed:
|
|
28
|
+
conn.execute(sa.text("CREATE SCHEMA dbos"))
|
|
29
|
+
|
|
30
|
+
# Check if dbos_migrations table exists
|
|
31
|
+
table_result = conn.execute(
|
|
32
|
+
sa.text(
|
|
33
|
+
"SELECT table_name FROM information_schema.tables WHERE table_schema = 'dbos' AND table_name = 'dbos_migrations'"
|
|
34
|
+
)
|
|
35
|
+
)
|
|
36
|
+
table_exists = table_result.fetchone() is not None
|
|
37
|
+
|
|
38
|
+
if table_exists:
|
|
39
|
+
return True
|
|
40
|
+
elif schema_existed:
|
|
41
|
+
return False
|
|
42
|
+
else:
|
|
43
|
+
conn.execute(
|
|
44
|
+
sa.text(
|
|
45
|
+
"CREATE TABLE dbos.dbos_migrations (version BIGINT NOT NULL PRIMARY KEY)"
|
|
46
|
+
)
|
|
47
|
+
)
|
|
48
|
+
return True
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def run_alembic_migrations(engine: sa.Engine) -> None:
|
|
52
|
+
"""Run system database schema migrations with Alembic.
|
|
53
|
+
This is DEPRECATED in favor of DBOS-managed migrations.
|
|
54
|
+
It is retained only for backwards compatibility and
|
|
55
|
+
will be removed in the next major version."""
|
|
56
|
+
# Run a schema migration for the system database
|
|
57
|
+
migration_dir = os.path.join(
|
|
58
|
+
os.path.dirname(os.path.realpath(__file__)), "_alembic_migrations"
|
|
59
|
+
)
|
|
60
|
+
alembic_cfg = Config()
|
|
61
|
+
alembic_cfg.set_main_option("script_location", migration_dir)
|
|
62
|
+
logging.getLogger("alembic").setLevel(logging.WARNING)
|
|
63
|
+
# Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
|
|
64
|
+
escaped_conn_string = re.sub(
|
|
65
|
+
r"%(?=[0-9A-Fa-f]{2})",
|
|
66
|
+
"%%",
|
|
67
|
+
engine.url.render_as_string(hide_password=False),
|
|
68
|
+
)
|
|
69
|
+
alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
|
|
70
|
+
try:
|
|
71
|
+
command.upgrade(alembic_cfg, "head")
|
|
72
|
+
except Exception as e:
|
|
73
|
+
dbos_logger.warning(
|
|
74
|
+
f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def run_dbos_migrations(engine: sa.Engine) -> None:
|
|
79
|
+
"""Run DBOS-managed migrations by executing each SQL command in dbos_migrations."""
|
|
80
|
+
with engine.begin() as conn:
|
|
81
|
+
# Get current migration version
|
|
82
|
+
result = conn.execute(sa.text("SELECT version FROM dbos.dbos_migrations"))
|
|
83
|
+
current_version = result.fetchone()
|
|
84
|
+
last_applied = current_version[0] if current_version else 0
|
|
85
|
+
|
|
86
|
+
# Apply migrations starting from the next version
|
|
87
|
+
for i, migration_sql in enumerate(dbos_migrations, 1):
|
|
88
|
+
if i <= last_applied:
|
|
89
|
+
continue
|
|
90
|
+
|
|
91
|
+
# Execute the migration
|
|
92
|
+
dbos_logger.info(f"Applying DBOS system database schema migration {i}")
|
|
93
|
+
conn.execute(sa.text(migration_sql))
|
|
94
|
+
|
|
95
|
+
# Update the single row with the new version
|
|
96
|
+
if last_applied == 0:
|
|
97
|
+
conn.execute(
|
|
98
|
+
sa.text(
|
|
99
|
+
"INSERT INTO dbos.dbos_migrations (version) VALUES (:version)"
|
|
100
|
+
),
|
|
101
|
+
{"version": i},
|
|
102
|
+
)
|
|
103
|
+
else:
|
|
104
|
+
conn.execute(
|
|
105
|
+
sa.text("UPDATE dbos.dbos_migrations SET version = :version"),
|
|
106
|
+
{"version": i},
|
|
107
|
+
)
|
|
108
|
+
last_applied = i
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
dbos_migration_one = """
|
|
112
|
+
-- Enable uuid extension for generating UUIDs
|
|
113
|
+
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
|
114
|
+
|
|
115
|
+
CREATE TABLE dbos.workflow_status (
|
|
116
|
+
workflow_uuid TEXT PRIMARY KEY,
|
|
117
|
+
status TEXT,
|
|
118
|
+
name TEXT,
|
|
119
|
+
authenticated_user TEXT,
|
|
120
|
+
assumed_role TEXT,
|
|
121
|
+
authenticated_roles TEXT,
|
|
122
|
+
request TEXT,
|
|
123
|
+
output TEXT,
|
|
124
|
+
error TEXT,
|
|
125
|
+
executor_id TEXT,
|
|
126
|
+
created_at BIGINT NOT NULL DEFAULT (EXTRACT(epoch FROM now()) * 1000::numeric)::bigint,
|
|
127
|
+
updated_at BIGINT NOT NULL DEFAULT (EXTRACT(epoch FROM now()) * 1000::numeric)::bigint,
|
|
128
|
+
application_version TEXT,
|
|
129
|
+
application_id TEXT,
|
|
130
|
+
class_name VARCHAR(255) DEFAULT NULL,
|
|
131
|
+
config_name VARCHAR(255) DEFAULT NULL,
|
|
132
|
+
recovery_attempts BIGINT DEFAULT 0,
|
|
133
|
+
queue_name TEXT,
|
|
134
|
+
workflow_timeout_ms BIGINT,
|
|
135
|
+
workflow_deadline_epoch_ms BIGINT,
|
|
136
|
+
inputs TEXT,
|
|
137
|
+
started_at_epoch_ms BIGINT,
|
|
138
|
+
deduplication_id TEXT,
|
|
139
|
+
priority INTEGER NOT NULL DEFAULT 0
|
|
140
|
+
);
|
|
141
|
+
|
|
142
|
+
CREATE INDEX workflow_status_created_at_index ON dbos.workflow_status (created_at);
|
|
143
|
+
CREATE INDEX workflow_status_executor_id_index ON dbos.workflow_status (executor_id);
|
|
144
|
+
CREATE INDEX workflow_status_status_index ON dbos.workflow_status (status);
|
|
145
|
+
|
|
146
|
+
ALTER TABLE dbos.workflow_status
|
|
147
|
+
ADD CONSTRAINT uq_workflow_status_queue_name_dedup_id
|
|
148
|
+
UNIQUE (queue_name, deduplication_id);
|
|
149
|
+
|
|
150
|
+
CREATE TABLE dbos.operation_outputs (
|
|
151
|
+
workflow_uuid TEXT NOT NULL,
|
|
152
|
+
function_id INTEGER NOT NULL,
|
|
153
|
+
function_name TEXT NOT NULL DEFAULT '',
|
|
154
|
+
output TEXT,
|
|
155
|
+
error TEXT,
|
|
156
|
+
child_workflow_id TEXT,
|
|
157
|
+
PRIMARY KEY (workflow_uuid, function_id),
|
|
158
|
+
FOREIGN KEY (workflow_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
|
|
159
|
+
ON UPDATE CASCADE ON DELETE CASCADE
|
|
160
|
+
);
|
|
161
|
+
|
|
162
|
+
CREATE TABLE dbos.notifications (
|
|
163
|
+
destination_uuid TEXT NOT NULL,
|
|
164
|
+
topic TEXT,
|
|
165
|
+
message TEXT NOT NULL,
|
|
166
|
+
created_at_epoch_ms BIGINT NOT NULL DEFAULT (EXTRACT(epoch FROM now()) * 1000::numeric)::bigint,
|
|
167
|
+
message_uuid TEXT NOT NULL DEFAULT gen_random_uuid(), -- Built-in function
|
|
168
|
+
FOREIGN KEY (destination_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
|
|
169
|
+
ON UPDATE CASCADE ON DELETE CASCADE
|
|
170
|
+
);
|
|
171
|
+
CREATE INDEX idx_workflow_topic ON dbos.notifications (destination_uuid, topic);
|
|
172
|
+
|
|
173
|
+
-- Create notification function
|
|
174
|
+
CREATE OR REPLACE FUNCTION dbos.notifications_function() RETURNS TRIGGER AS $$
|
|
175
|
+
DECLARE
|
|
176
|
+
payload text := NEW.destination_uuid || '::' || NEW.topic;
|
|
177
|
+
BEGIN
|
|
178
|
+
PERFORM pg_notify('dbos_notifications_channel', payload);
|
|
179
|
+
RETURN NEW;
|
|
180
|
+
END;
|
|
181
|
+
$$ LANGUAGE plpgsql;
|
|
182
|
+
|
|
183
|
+
-- Create notification trigger
|
|
184
|
+
CREATE TRIGGER dbos_notifications_trigger
|
|
185
|
+
AFTER INSERT ON dbos.notifications
|
|
186
|
+
FOR EACH ROW EXECUTE FUNCTION dbos.notifications_function();
|
|
187
|
+
|
|
188
|
+
CREATE TABLE dbos.workflow_events (
|
|
189
|
+
workflow_uuid TEXT NOT NULL,
|
|
190
|
+
key TEXT NOT NULL,
|
|
191
|
+
value TEXT NOT NULL,
|
|
192
|
+
PRIMARY KEY (workflow_uuid, key),
|
|
193
|
+
FOREIGN KEY (workflow_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
|
|
194
|
+
ON UPDATE CASCADE ON DELETE CASCADE
|
|
195
|
+
);
|
|
196
|
+
|
|
197
|
+
-- Create events function
|
|
198
|
+
CREATE OR REPLACE FUNCTION dbos.workflow_events_function() RETURNS TRIGGER AS $$
|
|
199
|
+
DECLARE
|
|
200
|
+
payload text := NEW.workflow_uuid || '::' || NEW.key;
|
|
201
|
+
BEGIN
|
|
202
|
+
PERFORM pg_notify('dbos_workflow_events_channel', payload);
|
|
203
|
+
RETURN NEW;
|
|
204
|
+
END;
|
|
205
|
+
$$ LANGUAGE plpgsql;
|
|
206
|
+
|
|
207
|
+
-- Create events trigger
|
|
208
|
+
CREATE TRIGGER dbos_workflow_events_trigger
|
|
209
|
+
AFTER INSERT ON dbos.workflow_events
|
|
210
|
+
FOR EACH ROW EXECUTE FUNCTION dbos.workflow_events_function();
|
|
211
|
+
|
|
212
|
+
CREATE TABLE dbos.streams (
|
|
213
|
+
workflow_uuid TEXT NOT NULL,
|
|
214
|
+
key TEXT NOT NULL,
|
|
215
|
+
value TEXT NOT NULL,
|
|
216
|
+
"offset" INTEGER NOT NULL,
|
|
217
|
+
PRIMARY KEY (workflow_uuid, key, "offset"),
|
|
218
|
+
FOREIGN KEY (workflow_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
|
|
219
|
+
ON UPDATE CASCADE ON DELETE CASCADE
|
|
220
|
+
);
|
|
221
|
+
|
|
222
|
+
CREATE TABLE dbos.event_dispatch_kv (
|
|
223
|
+
service_name TEXT NOT NULL,
|
|
224
|
+
workflow_fn_name TEXT NOT NULL,
|
|
225
|
+
key TEXT NOT NULL,
|
|
226
|
+
value TEXT,
|
|
227
|
+
update_seq NUMERIC(38,0),
|
|
228
|
+
update_time NUMERIC(38,15),
|
|
229
|
+
PRIMARY KEY (service_name, workflow_fn_name, key)
|
|
230
|
+
);
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
dbos_migrations = [dbos_migration_one]
|
|
@@ -1,10 +1,7 @@
|
|
|
1
1
|
import datetime
|
|
2
2
|
import functools
|
|
3
3
|
import json
|
|
4
|
-
import logging
|
|
5
|
-
import os
|
|
6
4
|
import random
|
|
7
|
-
import re
|
|
8
5
|
import threading
|
|
9
6
|
import time
|
|
10
7
|
from enum import Enum
|
|
@@ -25,11 +22,14 @@ from typing import (
|
|
|
25
22
|
import psycopg
|
|
26
23
|
import sqlalchemy as sa
|
|
27
24
|
import sqlalchemy.dialects.postgresql as pg
|
|
28
|
-
from alembic import command
|
|
29
|
-
from alembic.config import Config
|
|
30
25
|
from sqlalchemy.exc import DBAPIError
|
|
31
26
|
from sqlalchemy.sql import func
|
|
32
27
|
|
|
28
|
+
from dbos._migration import (
|
|
29
|
+
ensure_dbos_schema,
|
|
30
|
+
run_alembic_migrations,
|
|
31
|
+
run_dbos_migrations,
|
|
32
|
+
)
|
|
33
33
|
from dbos._utils import INTERNAL_QUEUE_NAME, retriable_postgres_exception
|
|
34
34
|
|
|
35
35
|
from . import _serialization
|
|
@@ -386,41 +386,11 @@ class SystemDatabase:
|
|
|
386
386
|
conn.execute(sa.text(f"CREATE DATABASE {sysdb_name}"))
|
|
387
387
|
engine.dispose()
|
|
388
388
|
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
alembic_cfg.set_main_option("script_location", migration_dir)
|
|
395
|
-
logging.getLogger("alembic").setLevel(logging.WARNING)
|
|
396
|
-
# Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
|
|
397
|
-
escaped_conn_string = re.sub(
|
|
398
|
-
r"%(?=[0-9A-Fa-f]{2})",
|
|
399
|
-
"%%",
|
|
400
|
-
self.engine.url.render_as_string(hide_password=False),
|
|
401
|
-
)
|
|
402
|
-
alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
|
|
403
|
-
try:
|
|
404
|
-
command.upgrade(alembic_cfg, "head")
|
|
405
|
-
except Exception as e:
|
|
406
|
-
dbos_logger.warning(
|
|
407
|
-
f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
|
|
408
|
-
)
|
|
409
|
-
alembic_cfg = Config()
|
|
410
|
-
alembic_cfg.set_main_option("script_location", migration_dir)
|
|
411
|
-
# Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
|
|
412
|
-
escaped_conn_string = re.sub(
|
|
413
|
-
r"%(?=[0-9A-Fa-f]{2})",
|
|
414
|
-
"%%",
|
|
415
|
-
self.engine.url.render_as_string(hide_password=False),
|
|
416
|
-
)
|
|
417
|
-
alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
|
|
418
|
-
try:
|
|
419
|
-
command.upgrade(alembic_cfg, "head")
|
|
420
|
-
except Exception as e:
|
|
421
|
-
dbos_logger.warning(
|
|
422
|
-
f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
|
|
423
|
-
)
|
|
389
|
+
using_dbos_migrations = ensure_dbos_schema(self.engine)
|
|
390
|
+
if not using_dbos_migrations:
|
|
391
|
+
# Complete the Alembic migrations, create the dbos_migrations table
|
|
392
|
+
run_alembic_migrations(self.engine)
|
|
393
|
+
run_dbos_migrations(self.engine)
|
|
424
394
|
|
|
425
395
|
# Destroy the pool when finished
|
|
426
396
|
def destroy(self) -> None:
|
|
@@ -24,10 +24,14 @@ class DBOSTracer:
|
|
|
24
24
|
def __init__(self) -> None:
|
|
25
25
|
self.app_id = os.environ.get("DBOS__APPID", None)
|
|
26
26
|
self.provider: Optional[TracerProvider] = None
|
|
27
|
+
self.disable_otlp: bool = False
|
|
27
28
|
|
|
28
29
|
def config(self, config: ConfigFile) -> None:
|
|
29
30
|
self.otlp_attributes = config.get("telemetry", {}).get("otlp_attributes", {}) # type: ignore
|
|
30
|
-
|
|
31
|
+
self.disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
|
|
32
|
+
if not self.disable_otlp and not isinstance(
|
|
33
|
+
trace.get_tracer_provider(), TracerProvider
|
|
34
|
+
):
|
|
31
35
|
resource = Resource(
|
|
32
36
|
attributes={
|
|
33
37
|
ResourceAttributes.SERVICE_NAME: config["name"],
|
|
@@ -27,7 +27,7 @@ from dbos._context import assert_current_dbos_context, get_local_dbos_context
|
|
|
27
27
|
from dbos._error import (
|
|
28
28
|
DBOSAwaitedWorkflowCancelledError,
|
|
29
29
|
DBOSConflictingRegistrationError,
|
|
30
|
-
|
|
30
|
+
DBOSException,
|
|
31
31
|
)
|
|
32
32
|
from dbos._schemas.system_database import SystemSchema
|
|
33
33
|
from dbos._sys_db import GetWorkflowsInput
|
|
@@ -1684,3 +1684,36 @@ def test_nested_steps(dbos: DBOS) -> None:
|
|
|
1684
1684
|
steps = DBOS.list_workflow_steps(id)
|
|
1685
1685
|
assert len(steps) == 1
|
|
1686
1686
|
assert steps[0]["function_name"] == outer_step.__qualname__
|
|
1687
|
+
|
|
1688
|
+
|
|
1689
|
+
def test_destroy(dbos: DBOS, config: DBOSConfig) -> None:
|
|
1690
|
+
|
|
1691
|
+
@DBOS.workflow()
|
|
1692
|
+
def unblocked_workflow() -> None:
|
|
1693
|
+
return
|
|
1694
|
+
|
|
1695
|
+
blocking_event = threading.Event()
|
|
1696
|
+
|
|
1697
|
+
@DBOS.workflow()
|
|
1698
|
+
def blocked_workflow() -> None:
|
|
1699
|
+
blocking_event.wait()
|
|
1700
|
+
|
|
1701
|
+
unblocked_workflow()
|
|
1702
|
+
|
|
1703
|
+
# Destroy DBOS with no active workflows, verify it is destroyed immediately
|
|
1704
|
+
start = time.time()
|
|
1705
|
+
DBOS.destroy(workflow_completion_timeout_sec=60)
|
|
1706
|
+
assert time.time() - start < 5
|
|
1707
|
+
|
|
1708
|
+
DBOS(config=config)
|
|
1709
|
+
DBOS.launch()
|
|
1710
|
+
|
|
1711
|
+
handle = DBOS.start_workflow(blocked_workflow)
|
|
1712
|
+
|
|
1713
|
+
# Destroy DBOS with an active workflow, verify it waits out the timeout
|
|
1714
|
+
start = time.time()
|
|
1715
|
+
DBOS.destroy(workflow_completion_timeout_sec=3)
|
|
1716
|
+
assert time.time() - start > 3
|
|
1717
|
+
blocking_event.set()
|
|
1718
|
+
with pytest.raises(DBOSException):
|
|
1719
|
+
handle.get_result()
|
|
@@ -1,16 +1,13 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import re
|
|
3
|
-
|
|
4
1
|
import pytest
|
|
5
2
|
import sqlalchemy as sa
|
|
6
|
-
from alembic import command
|
|
7
|
-
from alembic.config import Config
|
|
8
3
|
|
|
9
4
|
# Public API
|
|
10
5
|
from dbos import DBOS, DBOSConfig
|
|
6
|
+
from dbos._migration import dbos_migrations, run_alembic_migrations
|
|
11
7
|
|
|
12
8
|
# Private API because this is a unit test
|
|
13
9
|
from dbos._schemas.system_database import SystemSchema
|
|
10
|
+
from dbos._sys_db import SystemDatabase
|
|
14
11
|
|
|
15
12
|
|
|
16
13
|
def test_systemdb_migration(dbos: DBOS) -> None:
|
|
@@ -32,16 +29,61 @@ def test_systemdb_migration(dbos: DBOS) -> None:
|
|
|
32
29
|
result = connection.execute(sql)
|
|
33
30
|
assert result.fetchall() == []
|
|
34
31
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
32
|
+
# Check dbos_migrations table exists, has one row, and has the right version
|
|
33
|
+
migrations_result = connection.execute(
|
|
34
|
+
sa.text("SELECT version FROM dbos.dbos_migrations")
|
|
35
|
+
)
|
|
36
|
+
migrations_rows = migrations_result.fetchall()
|
|
37
|
+
assert len(migrations_rows) == 1
|
|
38
|
+
assert migrations_rows[0][0] == len(dbos_migrations)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def test_alembic_migrations_compatibility(
|
|
42
|
+
config: DBOSConfig, postgres_db_engine: sa.Engine
|
|
43
|
+
) -> None:
|
|
44
|
+
system_database_url = f"{config['database_url']}_dbos_sys"
|
|
45
|
+
sysdb_name = sa.make_url(system_database_url).database
|
|
39
46
|
|
|
47
|
+
# Drop and recreate the system database
|
|
48
|
+
with postgres_db_engine.connect() as connection:
|
|
49
|
+
connection.execution_options(isolation_level="AUTOCOMMIT")
|
|
50
|
+
connection.execute(sa.text(f'DROP DATABASE IF EXISTS "{sysdb_name}"'))
|
|
51
|
+
connection.execute(sa.text(f'CREATE DATABASE "{sysdb_name}"'))
|
|
52
|
+
|
|
53
|
+
sys_db = SystemDatabase(system_database_url=system_database_url, engine_kwargs={})
|
|
54
|
+
# Run the deprecated Alembic migrations
|
|
55
|
+
run_alembic_migrations(sys_db.engine)
|
|
56
|
+
# Then, run the new migrations to verify they work from a system database
|
|
57
|
+
# that started in Alembic.
|
|
58
|
+
dbos = DBOS(config=config)
|
|
59
|
+
DBOS.launch()
|
|
60
|
+
# Make sure all tables exist
|
|
40
61
|
with dbos._sys_db.engine.connect() as connection:
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
62
|
+
sql = SystemSchema.workflow_status.select()
|
|
63
|
+
result = connection.execute(sql)
|
|
64
|
+
assert result.fetchall() == []
|
|
65
|
+
|
|
66
|
+
sql = SystemSchema.operation_outputs.select()
|
|
67
|
+
result = connection.execute(sql)
|
|
68
|
+
assert result.fetchall() == []
|
|
69
|
+
|
|
70
|
+
sql = SystemSchema.workflow_events.select()
|
|
71
|
+
result = connection.execute(sql)
|
|
72
|
+
assert result.fetchall() == []
|
|
73
|
+
|
|
74
|
+
sql = SystemSchema.notifications.select()
|
|
75
|
+
result = connection.execute(sql)
|
|
76
|
+
assert result.fetchall() == []
|
|
77
|
+
|
|
78
|
+
# Check dbos_migrations table exists, has one row, and has the right version
|
|
79
|
+
migrations_result = connection.execute(
|
|
80
|
+
sa.text("SELECT version FROM dbos.dbos_migrations")
|
|
81
|
+
)
|
|
82
|
+
migrations_rows = migrations_result.fetchall()
|
|
83
|
+
assert len(migrations_rows) == 1
|
|
84
|
+
assert migrations_rows[0][0] == len(dbos_migrations)
|
|
85
|
+
|
|
86
|
+
assert DBOS.list_workflows() == []
|
|
45
87
|
|
|
46
88
|
|
|
47
89
|
def test_custom_sysdb_name_migration(
|
|
@@ -66,36 +108,9 @@ def test_custom_sysdb_name_migration(
|
|
|
66
108
|
result = connection.execute(sql)
|
|
67
109
|
assert result.fetchall() == []
|
|
68
110
|
|
|
69
|
-
# Test migrating down
|
|
70
|
-
rollback_system_db(
|
|
71
|
-
sysdb_url=dbos._sys_db.engine.url.render_as_string(hide_password=False)
|
|
72
|
-
)
|
|
73
|
-
|
|
74
|
-
with dbos._sys_db.engine.connect() as connection:
|
|
75
|
-
with pytest.raises(sa.exc.ProgrammingError) as exc_info:
|
|
76
|
-
sql = SystemSchema.workflow_status.select()
|
|
77
|
-
result = connection.execute(sql)
|
|
78
|
-
assert "does not exist" in str(exc_info.value)
|
|
79
111
|
DBOS.destroy()
|
|
80
112
|
|
|
81
113
|
|
|
82
|
-
def rollback_system_db(sysdb_url: str) -> None:
|
|
83
|
-
migration_dir = os.path.join(
|
|
84
|
-
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
|
|
85
|
-
"dbos",
|
|
86
|
-
"_migrations",
|
|
87
|
-
)
|
|
88
|
-
alembic_cfg = Config()
|
|
89
|
-
alembic_cfg.set_main_option("script_location", migration_dir)
|
|
90
|
-
escaped_conn_string = re.sub(
|
|
91
|
-
r"%(?=[0-9A-Fa-f]{2})",
|
|
92
|
-
"%%",
|
|
93
|
-
sysdb_url,
|
|
94
|
-
)
|
|
95
|
-
alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
|
|
96
|
-
command.downgrade(alembic_cfg, "base") # Rollback all migrations
|
|
97
|
-
|
|
98
|
-
|
|
99
114
|
def test_reset(config: DBOSConfig, postgres_db_engine: sa.Engine) -> None:
|
|
100
115
|
DBOS.destroy()
|
|
101
116
|
dbos = DBOS(config=config)
|
|
@@ -254,3 +254,61 @@ def test_wf_fastapi(dbos_fastapi: Tuple[DBOS, FastAPI]) -> None:
|
|
|
254
254
|
assert spans[0].context is not None
|
|
255
255
|
assert logs[0].log_record.span_id == spans[0].context.span_id
|
|
256
256
|
assert logs[0].log_record.trace_id == spans[0].context.trace_id
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def test_disable_otlp_no_spans(config: DBOSConfig) -> None:
|
|
260
|
+
DBOS.destroy(destroy_registry=True)
|
|
261
|
+
config["otlp_attributes"] = {"foo": "bar"}
|
|
262
|
+
config["disable_otlp"] = True
|
|
263
|
+
DBOS(config=config)
|
|
264
|
+
DBOS.launch()
|
|
265
|
+
|
|
266
|
+
@DBOS.workflow()
|
|
267
|
+
def test_workflow() -> None:
|
|
268
|
+
test_step()
|
|
269
|
+
DBOS.logger.info("This is a test_workflow")
|
|
270
|
+
|
|
271
|
+
@DBOS.step()
|
|
272
|
+
def test_step() -> None:
|
|
273
|
+
DBOS.logger.info("This is a test_step")
|
|
274
|
+
return
|
|
275
|
+
|
|
276
|
+
exporter = InMemorySpanExporter()
|
|
277
|
+
span_processor = SimpleSpanProcessor(exporter)
|
|
278
|
+
provider = tracesdk.TracerProvider()
|
|
279
|
+
provider.add_span_processor(span_processor)
|
|
280
|
+
dbos_tracer.set_provider(provider)
|
|
281
|
+
|
|
282
|
+
# Set up in-memory log exporter
|
|
283
|
+
log_exporter = InMemoryLogExporter() # type: ignore
|
|
284
|
+
log_processor = BatchLogRecordProcessor(log_exporter)
|
|
285
|
+
log_provider = LoggerProvider()
|
|
286
|
+
log_provider.add_log_record_processor(log_processor)
|
|
287
|
+
set_logger_provider(log_provider)
|
|
288
|
+
dbos_logger.addHandler(LoggingHandler(logger_provider=log_provider))
|
|
289
|
+
|
|
290
|
+
test_workflow()
|
|
291
|
+
|
|
292
|
+
log_processor.force_flush(timeout_millis=5000)
|
|
293
|
+
logs = log_exporter.get_finished_logs()
|
|
294
|
+
assert len(logs) == 2
|
|
295
|
+
for log in logs:
|
|
296
|
+
assert log.log_record.attributes is not None
|
|
297
|
+
assert (
|
|
298
|
+
log.log_record.attributes["applicationVersion"] == GlobalParams.app_version
|
|
299
|
+
)
|
|
300
|
+
assert log.log_record.attributes["executorID"] == GlobalParams.executor_id
|
|
301
|
+
assert log.log_record.attributes["foo"] == "bar"
|
|
302
|
+
# We disable OTLP, so no span_id or trace_id should be present
|
|
303
|
+
assert log.log_record.span_id is not None and log.log_record.span_id == 0
|
|
304
|
+
assert log.log_record.trace_id is not None and log.log_record.trace_id == 0
|
|
305
|
+
assert (
|
|
306
|
+
log.log_record.body == "This is a test_step"
|
|
307
|
+
or log.log_record.body == "This is a test_workflow"
|
|
308
|
+
)
|
|
309
|
+
assert log.log_record.attributes.get("traceId") is None
|
|
310
|
+
|
|
311
|
+
spans = exporter.get_finished_spans()
|
|
312
|
+
|
|
313
|
+
# No spans should be created since OTLP is disabled
|
|
314
|
+
assert len(spans) == 0
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|