dbos 1.12.0a3__py3-none-any.whl → 1.13.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (39) hide show
  1. dbos/_alembic_migrations/versions/471b60d64126_dbos_migrations.py +35 -0
  2. dbos/_app_db.py +217 -80
  3. dbos/_client.py +3 -2
  4. dbos/_context.py +4 -0
  5. dbos/_core.py +7 -8
  6. dbos/_dbos.py +28 -18
  7. dbos/_dbos_config.py +29 -20
  8. dbos/_fastapi.py +1 -1
  9. dbos/_logger.py +3 -1
  10. dbos/_migration.py +322 -0
  11. dbos/_sys_db.py +123 -200
  12. dbos/_sys_db_postgres.py +173 -0
  13. dbos/_sys_db_sqlite.py +185 -0
  14. dbos/_tracer.py +5 -1
  15. dbos/_utils.py +10 -1
  16. dbos/cli/cli.py +5 -15
  17. dbos/cli/migration.py +2 -2
  18. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/METADATA +1 -1
  19. dbos-1.13.0a5.dist-info/RECORD +78 -0
  20. dbos-1.12.0a3.dist-info/RECORD +0 -74
  21. /dbos/{_migrations → _alembic_migrations}/env.py +0 -0
  22. /dbos/{_migrations → _alembic_migrations}/script.py.mako +0 -0
  23. /dbos/{_migrations → _alembic_migrations}/versions/01ce9f07bd10_streaming.py +0 -0
  24. /dbos/{_migrations → _alembic_migrations}/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  25. /dbos/{_migrations → _alembic_migrations}/versions/27ac6900c6ad_add_queue_dedup.py +0 -0
  26. /dbos/{_migrations → _alembic_migrations}/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  27. /dbos/{_migrations → _alembic_migrations}/versions/5c361fc04708_added_system_tables.py +0 -0
  28. /dbos/{_migrations → _alembic_migrations}/versions/66478e1b95e5_consolidate_queues.py +0 -0
  29. /dbos/{_migrations → _alembic_migrations}/versions/83f3732ae8e7_workflow_timeout.py +0 -0
  30. /dbos/{_migrations → _alembic_migrations}/versions/933e86bdac6a_add_queue_priority.py +0 -0
  31. /dbos/{_migrations → _alembic_migrations}/versions/a3b18ad34abe_added_triggers.py +0 -0
  32. /dbos/{_migrations → _alembic_migrations}/versions/d76646551a6b_job_queue_limiter.py +0 -0
  33. /dbos/{_migrations → _alembic_migrations}/versions/d76646551a6c_workflow_queue.py +0 -0
  34. /dbos/{_migrations → _alembic_migrations}/versions/d994145b47b6_consolidate_inputs.py +0 -0
  35. /dbos/{_migrations → _alembic_migrations}/versions/eab0cc1d9a14_job_queue.py +0 -0
  36. /dbos/{_migrations → _alembic_migrations}/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
  37. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/WHEEL +0 -0
  38. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/entry_points.txt +0 -0
  39. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/licenses/LICENSE +0 -0
dbos/_sys_db.py CHANGED
@@ -1,12 +1,10 @@
1
1
  import datetime
2
2
  import functools
3
3
  import json
4
- import logging
5
- import os
6
4
  import random
7
- import re
8
5
  import threading
9
6
  import time
7
+ from abc import ABC, abstractmethod
10
8
  from enum import Enum
11
9
  from typing import (
12
10
  TYPE_CHECKING,
@@ -22,15 +20,15 @@ from typing import (
22
20
  cast,
23
21
  )
24
22
 
25
- import psycopg
26
23
  import sqlalchemy as sa
27
- import sqlalchemy.dialects.postgresql as pg
28
- from alembic import command
29
- from alembic.config import Config
30
24
  from sqlalchemy.exc import DBAPIError
31
25
  from sqlalchemy.sql import func
32
26
 
33
- from dbos._utils import INTERNAL_QUEUE_NAME, retriable_postgres_exception
27
+ from dbos._utils import (
28
+ INTERNAL_QUEUE_NAME,
29
+ retriable_postgres_exception,
30
+ retriable_sqlite_exception,
31
+ )
34
32
 
35
33
  from . import _serialization
36
34
  from ._context import get_local_dbos_context
@@ -316,10 +314,12 @@ def db_retry(
316
314
  while True:
317
315
  try:
318
316
  return func(*args, **kwargs)
319
- except DBAPIError as e:
317
+ except Exception as e:
320
318
 
321
319
  # Determine if this is a retriable exception
322
- if not retriable_postgres_exception(e):
320
+ if not retriable_postgres_exception(
321
+ e
322
+ ) and not retriable_sqlite_exception(e):
323
323
  raise
324
324
 
325
325
  retries += 1
@@ -339,7 +339,7 @@ def db_retry(
339
339
  return decorator
340
340
 
341
341
 
342
- class SystemDatabase:
342
+ class SystemDatabase(ABC):
343
343
 
344
344
  def __init__(
345
345
  self,
@@ -348,16 +348,13 @@ class SystemDatabase:
348
348
  engine_kwargs: Dict[str, Any],
349
349
  debug_mode: bool = False,
350
350
  ):
351
- # Set driver
352
- url = sa.make_url(system_database_url).set(drivername="postgresql+psycopg")
351
+ import sqlalchemy.dialects.postgresql as pg
352
+ import sqlalchemy.dialects.sqlite as sq
353
353
 
354
- self.engine = sa.create_engine(
355
- url,
356
- **engine_kwargs,
357
- )
354
+ self.dialect = sq if system_database_url.startswith("sqlite") else pg
355
+ self.engine = self._create_engine(system_database_url, engine_kwargs)
358
356
  self._engine_kwargs = engine_kwargs
359
357
 
360
- self.notification_conn: Optional[psycopg.connection.Connection] = None
361
358
  self.notifications_map = ThreadSafeConditionDict()
362
359
  self.workflow_events_map = ThreadSafeConditionDict()
363
360
 
@@ -365,70 +362,29 @@ class SystemDatabase:
365
362
  self._run_background_processes = True
366
363
  self._debug_mode = debug_mode
367
364
 
368
- # Run migrations
365
+ @abstractmethod
366
+ def _create_engine(
367
+ self, system_database_url: str, engine_kwargs: Dict[str, Any]
368
+ ) -> sa.Engine:
369
+ """Create a database engine specific to the database type."""
370
+ pass
371
+
372
+ @abstractmethod
369
373
  def run_migrations(self) -> None:
370
- if self._debug_mode:
371
- dbos_logger.warning("System database migrations are skipped in debug mode.")
372
- return
373
- system_db_url = self.engine.url
374
- sysdb_name = system_db_url.database
375
- # If the system database does not already exist, create it
376
- engine = sa.create_engine(
377
- system_db_url.set(database="postgres"), **self._engine_kwargs
378
- )
379
- with engine.connect() as conn:
380
- conn.execution_options(isolation_level="AUTOCOMMIT")
381
- if not conn.execute(
382
- sa.text("SELECT 1 FROM pg_database WHERE datname=:db_name"),
383
- parameters={"db_name": sysdb_name},
384
- ).scalar():
385
- dbos_logger.info(f"Creating system database {sysdb_name}")
386
- conn.execute(sa.text(f"CREATE DATABASE {sysdb_name}"))
387
- engine.dispose()
388
-
389
- # Run a schema migration for the system database
390
- migration_dir = os.path.join(
391
- os.path.dirname(os.path.realpath(__file__)), "_migrations"
392
- )
393
- alembic_cfg = Config()
394
- alembic_cfg.set_main_option("script_location", migration_dir)
395
- logging.getLogger("alembic").setLevel(logging.WARNING)
396
- # Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
397
- escaped_conn_string = re.sub(
398
- r"%(?=[0-9A-Fa-f]{2})",
399
- "%%",
400
- self.engine.url.render_as_string(hide_password=False),
401
- )
402
- alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
403
- try:
404
- command.upgrade(alembic_cfg, "head")
405
- except Exception as e:
406
- dbos_logger.warning(
407
- f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
408
- )
409
- alembic_cfg = Config()
410
- alembic_cfg.set_main_option("script_location", migration_dir)
411
- # Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
412
- escaped_conn_string = re.sub(
413
- r"%(?=[0-9A-Fa-f]{2})",
414
- "%%",
415
- self.engine.url.render_as_string(hide_password=False),
416
- )
417
- alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
418
- try:
419
- command.upgrade(alembic_cfg, "head")
420
- except Exception as e:
421
- dbos_logger.warning(
422
- f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
423
- )
374
+ """Run database migrations specific to the database type."""
375
+ pass
424
376
 
425
377
  # Destroy the pool when finished
426
378
  def destroy(self) -> None:
427
379
  self._run_background_processes = False
428
- if self.notification_conn is not None:
429
- self.notification_conn.close()
380
+ self._cleanup_connections()
430
381
  self.engine.dispose()
431
382
 
383
+ @abstractmethod
384
+ def _cleanup_connections(self) -> None:
385
+ """Clean up database-specific connections."""
386
+ pass
387
+
432
388
  def _insert_workflow_status(
433
389
  self,
434
390
  status: WorkflowStatusInternal,
@@ -436,6 +392,7 @@ class SystemDatabase:
436
392
  *,
437
393
  max_recovery_attempts: Optional[int],
438
394
  ) -> tuple[WorkflowStatuses, Optional[int]]:
395
+ """Insert or update workflow status using PostgreSQL upsert operations."""
439
396
  if self._debug_mode:
440
397
  raise Exception("called insert_workflow_status in debug mode")
441
398
  wf_status: WorkflowStatuses = status["status"]
@@ -451,14 +408,14 @@ class SystemDatabase:
451
408
  ),
452
409
  else_=SystemSchema.workflow_status.c.recovery_attempts,
453
410
  ),
454
- "updated_at": func.extract("epoch", func.now()) * 1000,
411
+ "updated_at": sa.func.extract("epoch", sa.func.now()) * 1000,
455
412
  }
456
413
  # Don't update an existing executor ID when enqueueing a workflow.
457
414
  if wf_status != WorkflowStatusString.ENQUEUED.value:
458
415
  update_values["executor_id"] = status["executor_id"]
459
416
 
460
417
  cmd = (
461
- pg.insert(SystemSchema.workflow_status)
418
+ self.dialect.insert(SystemSchema.workflow_status)
462
419
  .values(
463
420
  workflow_uuid=status["workflow_uuid"],
464
421
  status=status["status"],
@@ -489,13 +446,21 @@ class SystemDatabase:
489
446
  )
490
447
  )
491
448
 
492
- cmd = cmd.returning(SystemSchema.workflow_status.c.recovery_attempts, SystemSchema.workflow_status.c.status, SystemSchema.workflow_status.c.workflow_deadline_epoch_ms, SystemSchema.workflow_status.c.name, SystemSchema.workflow_status.c.class_name, SystemSchema.workflow_status.c.config_name, SystemSchema.workflow_status.c.queue_name) # type: ignore
449
+ cmd = cmd.returning(
450
+ SystemSchema.workflow_status.c.recovery_attempts,
451
+ SystemSchema.workflow_status.c.status,
452
+ SystemSchema.workflow_status.c.workflow_deadline_epoch_ms,
453
+ SystemSchema.workflow_status.c.name,
454
+ SystemSchema.workflow_status.c.class_name,
455
+ SystemSchema.workflow_status.c.config_name,
456
+ SystemSchema.workflow_status.c.queue_name,
457
+ )
493
458
 
494
459
  try:
495
460
  results = conn.execute(cmd)
496
461
  except DBAPIError as dbapi_error:
497
462
  # Unique constraint violation for the deduplication ID
498
- if dbapi_error.orig.sqlstate == "23505": # type: ignore
463
+ if self._is_unique_constraint_violation(dbapi_error):
499
464
  assert status["deduplication_id"] is not None
500
465
  assert status["queue_name"] is not None
501
466
  raise DBOSQueueDeduplicatedError(
@@ -621,7 +586,8 @@ class SystemDatabase:
621
586
  raise Exception("called resume_workflow in debug mode")
622
587
  with self.engine.begin() as c:
623
588
  # Execute with snapshot isolation in case of concurrent calls on the same workflow
624
- c.execute(sa.text("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ"))
589
+ if self.engine.dialect.name == "postgresql":
590
+ c.execute(sa.text("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ"))
625
591
  # Check the status of the workflow. If it is complete, do nothing.
626
592
  status_row = c.execute(
627
593
  sa.select(
@@ -667,7 +633,7 @@ class SystemDatabase:
667
633
  # Create an entry for the forked workflow with the same
668
634
  # initial values as the original.
669
635
  c.execute(
670
- pg.insert(SystemSchema.workflow_status).values(
636
+ sa.insert(SystemSchema.workflow_status).values(
671
637
  workflow_uuid=forked_workflow_id,
672
638
  status=WorkflowStatusString.ENQUEUED.value,
673
639
  name=status["name"],
@@ -881,7 +847,7 @@ class SystemDatabase:
881
847
  query = query.offset(input.offset)
882
848
 
883
849
  with self.engine.begin() as c:
884
- rows = c.execute(query)
850
+ rows = c.execute(query).fetchall()
885
851
 
886
852
  infos: List[WorkflowStatus] = []
887
853
  for row in rows:
@@ -992,7 +958,7 @@ class SystemDatabase:
992
958
  query = query.offset(input["offset"])
993
959
 
994
960
  with self.engine.begin() as c:
995
- rows = c.execute(query)
961
+ rows = c.execute(query).fetchall()
996
962
 
997
963
  infos: List[WorkflowStatus] = []
998
964
  for row in rows:
@@ -1096,7 +1062,7 @@ class SystemDatabase:
1096
1062
  error = result["error"]
1097
1063
  output = result["output"]
1098
1064
  assert error is None or output is None, "Only one of error or output can be set"
1099
- sql = pg.insert(SystemSchema.operation_outputs).values(
1065
+ sql = sa.insert(SystemSchema.operation_outputs).values(
1100
1066
  workflow_uuid=result["workflow_uuid"],
1101
1067
  function_id=result["function_id"],
1102
1068
  function_name=result["function_name"],
@@ -1106,7 +1072,7 @@ class SystemDatabase:
1106
1072
  try:
1107
1073
  conn.execute(sql)
1108
1074
  except DBAPIError as dbapi_error:
1109
- if dbapi_error.orig.sqlstate == "23505": # type: ignore
1075
+ if self._is_unique_constraint_violation(dbapi_error):
1110
1076
  raise DBOSWorkflowConflictIDError(result["workflow_uuid"])
1111
1077
  raise
1112
1078
 
@@ -1127,7 +1093,7 @@ class SystemDatabase:
1127
1093
  # Because there's no corresponding check, we do nothing on conflict
1128
1094
  # and do not raise a DBOSWorkflowConflictIDError
1129
1095
  sql = (
1130
- pg.insert(SystemSchema.operation_outputs)
1096
+ self.dialect.insert(SystemSchema.operation_outputs)
1131
1097
  .values(
1132
1098
  workflow_uuid=ctx.workflow_id,
1133
1099
  function_id=ctx.function_id,
@@ -1152,7 +1118,7 @@ class SystemDatabase:
1152
1118
  if self._debug_mode:
1153
1119
  raise Exception("called record_child_workflow in debug mode")
1154
1120
 
1155
- sql = pg.insert(SystemSchema.operation_outputs).values(
1121
+ sql = sa.insert(SystemSchema.operation_outputs).values(
1156
1122
  workflow_uuid=parentUUID,
1157
1123
  function_id=functionID,
1158
1124
  function_name=functionName,
@@ -1162,10 +1128,20 @@ class SystemDatabase:
1162
1128
  with self.engine.begin() as c:
1163
1129
  c.execute(sql)
1164
1130
  except DBAPIError as dbapi_error:
1165
- if dbapi_error.orig.sqlstate == "23505": # type: ignore
1131
+ if self._is_unique_constraint_violation(dbapi_error):
1166
1132
  raise DBOSWorkflowConflictIDError(parentUUID)
1167
1133
  raise
1168
1134
 
1135
+ @abstractmethod
1136
+ def _is_unique_constraint_violation(self, dbapi_error: DBAPIError) -> bool:
1137
+ """Check if the error is a unique constraint violation."""
1138
+ pass
1139
+
1140
+ @abstractmethod
1141
+ def _is_foreign_key_violation(self, dbapi_error: DBAPIError) -> bool:
1142
+ """Check if the error is a foreign key violation."""
1143
+ pass
1144
+
1169
1145
  def _check_operation_execution_txn(
1170
1146
  self,
1171
1147
  workflow_id: str,
@@ -1291,15 +1267,14 @@ class SystemDatabase:
1291
1267
 
1292
1268
  try:
1293
1269
  c.execute(
1294
- pg.insert(SystemSchema.notifications).values(
1270
+ sa.insert(SystemSchema.notifications).values(
1295
1271
  destination_uuid=destination_uuid,
1296
1272
  topic=topic,
1297
1273
  message=_serialization.serialize(message),
1298
1274
  )
1299
1275
  )
1300
1276
  except DBAPIError as dbapi_error:
1301
- # Foreign key violation
1302
- if dbapi_error.orig.sqlstate == "23503": # type: ignore
1277
+ if self._is_foreign_key_violation(dbapi_error):
1303
1278
  raise DBOSNonExistentWorkflowError(destination_uuid)
1304
1279
  raise
1305
1280
  output: OperationResultInternal = {
@@ -1374,29 +1349,25 @@ class SystemDatabase:
1374
1349
 
1375
1350
  # Transactionally consume and return the message if it's in the database, otherwise return null.
1376
1351
  with self.engine.begin() as c:
1377
- oldest_entry_cte = (
1378
- sa.select(
1379
- SystemSchema.notifications.c.destination_uuid,
1380
- SystemSchema.notifications.c.topic,
1381
- SystemSchema.notifications.c.message,
1382
- SystemSchema.notifications.c.created_at_epoch_ms,
1383
- )
1384
- .where(
1385
- SystemSchema.notifications.c.destination_uuid == workflow_uuid,
1386
- SystemSchema.notifications.c.topic == topic,
1387
- )
1388
- .order_by(SystemSchema.notifications.c.created_at_epoch_ms.asc())
1389
- .limit(1)
1390
- .cte("oldest_entry")
1391
- )
1392
1352
  delete_stmt = (
1393
1353
  sa.delete(SystemSchema.notifications)
1394
1354
  .where(
1395
- SystemSchema.notifications.c.destination_uuid
1396
- == oldest_entry_cte.c.destination_uuid,
1397
- SystemSchema.notifications.c.topic == oldest_entry_cte.c.topic,
1398
- SystemSchema.notifications.c.created_at_epoch_ms
1399
- == oldest_entry_cte.c.created_at_epoch_ms,
1355
+ SystemSchema.notifications.c.destination_uuid == workflow_uuid,
1356
+ SystemSchema.notifications.c.topic == topic,
1357
+ SystemSchema.notifications.c.message_uuid
1358
+ == (
1359
+ sa.select(SystemSchema.notifications.c.message_uuid)
1360
+ .where(
1361
+ SystemSchema.notifications.c.destination_uuid
1362
+ == workflow_uuid,
1363
+ SystemSchema.notifications.c.topic == topic,
1364
+ )
1365
+ .order_by(
1366
+ SystemSchema.notifications.c.created_at_epoch_ms.asc()
1367
+ )
1368
+ .limit(1)
1369
+ .scalar_subquery()
1370
+ ),
1400
1371
  )
1401
1372
  .returning(SystemSchema.notifications.c.message)
1402
1373
  )
@@ -1418,62 +1389,47 @@ class SystemDatabase:
1418
1389
  )
1419
1390
  return message
1420
1391
 
1392
+ @abstractmethod
1421
1393
  def _notification_listener(self) -> None:
1422
- while self._run_background_processes:
1423
- try:
1424
- # since we're using the psycopg connection directly, we need a url without the "+pycopg" suffix
1425
- url = sa.URL.create(
1426
- "postgresql", **self.engine.url.translate_connect_args()
1427
- )
1428
- # Listen to notifications
1429
- self.notification_conn = psycopg.connect(
1430
- url.render_as_string(hide_password=False), autocommit=True
1431
- )
1394
+ """Listen for database notifications using database-specific mechanisms."""
1395
+ pass
1432
1396
 
1433
- self.notification_conn.execute("LISTEN dbos_notifications_channel")
1434
- self.notification_conn.execute("LISTEN dbos_workflow_events_channel")
1397
+ @staticmethod
1398
+ def reset_system_database(database_url: str) -> None:
1399
+ """Reset the system database by calling the appropriate implementation."""
1400
+ if database_url.startswith("sqlite"):
1401
+ from ._sys_db_sqlite import SQLiteSystemDatabase
1435
1402
 
1436
- while self._run_background_processes:
1437
- gen = self.notification_conn.notifies()
1438
- for notify in gen:
1439
- channel = notify.channel
1440
- dbos_logger.debug(
1441
- f"Received notification on channel: {channel}, payload: {notify.payload}"
1442
- )
1443
- if channel == "dbos_notifications_channel":
1444
- if notify.payload:
1445
- condition = self.notifications_map.get(notify.payload)
1446
- if condition is None:
1447
- # No condition found for this payload
1448
- continue
1449
- condition.acquire()
1450
- condition.notify_all()
1451
- condition.release()
1452
- dbos_logger.debug(
1453
- f"Signaled notifications condition for {notify.payload}"
1454
- )
1455
- elif channel == "dbos_workflow_events_channel":
1456
- if notify.payload:
1457
- condition = self.workflow_events_map.get(notify.payload)
1458
- if condition is None:
1459
- # No condition found for this payload
1460
- continue
1461
- condition.acquire()
1462
- condition.notify_all()
1463
- condition.release()
1464
- dbos_logger.debug(
1465
- f"Signaled workflow_events condition for {notify.payload}"
1466
- )
1467
- else:
1468
- dbos_logger.error(f"Unknown channel: {channel}")
1469
- except Exception as e:
1470
- if self._run_background_processes:
1471
- dbos_logger.warning(f"Notification listener error: {e}")
1472
- time.sleep(1)
1473
- # Then the loop will try to reconnect and restart the listener
1474
- finally:
1475
- if self.notification_conn is not None:
1476
- self.notification_conn.close()
1403
+ SQLiteSystemDatabase._reset_system_database(database_url)
1404
+ else:
1405
+ from ._sys_db_postgres import PostgresSystemDatabase
1406
+
1407
+ PostgresSystemDatabase._reset_system_database(database_url)
1408
+
1409
+ @staticmethod
1410
+ def create(
1411
+ system_database_url: str,
1412
+ engine_kwargs: Dict[str, Any],
1413
+ debug_mode: bool = False,
1414
+ ) -> "SystemDatabase":
1415
+ """Factory method to create the appropriate SystemDatabase implementation based on URL."""
1416
+ if system_database_url.startswith("sqlite"):
1417
+ from ._sys_db_sqlite import SQLiteSystemDatabase
1418
+
1419
+ return SQLiteSystemDatabase(
1420
+ system_database_url=system_database_url,
1421
+ engine_kwargs=engine_kwargs,
1422
+ debug_mode=debug_mode,
1423
+ )
1424
+ else:
1425
+ # Default to PostgreSQL for postgresql://, postgres://, or other URLs
1426
+ from ._sys_db_postgres import PostgresSystemDatabase
1427
+
1428
+ return PostgresSystemDatabase(
1429
+ system_database_url=system_database_url,
1430
+ engine_kwargs=engine_kwargs,
1431
+ debug_mode=debug_mode,
1432
+ )
1477
1433
 
1478
1434
  @db_retry()
1479
1435
  def sleep(
@@ -1537,9 +1493,8 @@ class SystemDatabase:
1537
1493
  return # Already sent before
1538
1494
  else:
1539
1495
  dbos_logger.debug(f"Running set_event, id: {function_id}, key: {key}")
1540
-
1541
1496
  c.execute(
1542
- pg.insert(SystemSchema.workflow_events)
1497
+ self.dialect.insert(SystemSchema.workflow_events)
1543
1498
  .values(
1544
1499
  workflow_uuid=workflow_uuid,
1545
1500
  key=key,
@@ -1661,7 +1616,8 @@ class SystemDatabase:
1661
1616
  limiter_period_ms = int(queue.limiter["period"] * 1000)
1662
1617
  with self.engine.begin() as c:
1663
1618
  # Execute with snapshot isolation to ensure multiple workers respect limits
1664
- c.execute(sa.text("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ"))
1619
+ if self.engine.dialect.name == "postgresql":
1620
+ c.execute(sa.text("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ"))
1665
1621
 
1666
1622
  # If there is a limiter, compute how many functions have started in its period.
1667
1623
  if queue.limiter is not None:
@@ -2066,36 +2022,3 @@ class SystemDatabase:
2066
2022
  return cutoff_epoch_timestamp_ms, [
2067
2023
  row[0] for row in pending_enqueued_result
2068
2024
  ]
2069
-
2070
-
2071
- def reset_system_database(postgres_db_url: sa.URL, sysdb_name: str) -> None:
2072
- try:
2073
- # Connect to postgres default database
2074
- engine = sa.create_engine(
2075
- postgres_db_url.set(drivername="postgresql+psycopg"),
2076
- connect_args={"connect_timeout": 10},
2077
- )
2078
-
2079
- with engine.connect() as conn:
2080
- # Set autocommit required for database dropping
2081
- conn.execution_options(isolation_level="AUTOCOMMIT")
2082
-
2083
- # Terminate existing connections
2084
- conn.execute(
2085
- sa.text(
2086
- """
2087
- SELECT pg_terminate_backend(pg_stat_activity.pid)
2088
- FROM pg_stat_activity
2089
- WHERE pg_stat_activity.datname = :db_name
2090
- AND pid <> pg_backend_pid()
2091
- """
2092
- ),
2093
- {"db_name": sysdb_name},
2094
- )
2095
-
2096
- # Drop the database
2097
- conn.execute(sa.text(f"DROP DATABASE IF EXISTS {sysdb_name}"))
2098
-
2099
- except sa.exc.SQLAlchemyError as e:
2100
- dbos_logger.error(f"Error resetting system database: {str(e)}")
2101
- raise e