dbos 1.12.0a3__py3-none-any.whl → 1.13.0a5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (39) hide show
  1. dbos/_alembic_migrations/versions/471b60d64126_dbos_migrations.py +35 -0
  2. dbos/_app_db.py +217 -80
  3. dbos/_client.py +3 -2
  4. dbos/_context.py +4 -0
  5. dbos/_core.py +7 -8
  6. dbos/_dbos.py +28 -18
  7. dbos/_dbos_config.py +29 -20
  8. dbos/_fastapi.py +1 -1
  9. dbos/_logger.py +3 -1
  10. dbos/_migration.py +322 -0
  11. dbos/_sys_db.py +123 -200
  12. dbos/_sys_db_postgres.py +173 -0
  13. dbos/_sys_db_sqlite.py +185 -0
  14. dbos/_tracer.py +5 -1
  15. dbos/_utils.py +10 -1
  16. dbos/cli/cli.py +5 -15
  17. dbos/cli/migration.py +2 -2
  18. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/METADATA +1 -1
  19. dbos-1.13.0a5.dist-info/RECORD +78 -0
  20. dbos-1.12.0a3.dist-info/RECORD +0 -74
  21. /dbos/{_migrations → _alembic_migrations}/env.py +0 -0
  22. /dbos/{_migrations → _alembic_migrations}/script.py.mako +0 -0
  23. /dbos/{_migrations → _alembic_migrations}/versions/01ce9f07bd10_streaming.py +0 -0
  24. /dbos/{_migrations → _alembic_migrations}/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  25. /dbos/{_migrations → _alembic_migrations}/versions/27ac6900c6ad_add_queue_dedup.py +0 -0
  26. /dbos/{_migrations → _alembic_migrations}/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  27. /dbos/{_migrations → _alembic_migrations}/versions/5c361fc04708_added_system_tables.py +0 -0
  28. /dbos/{_migrations → _alembic_migrations}/versions/66478e1b95e5_consolidate_queues.py +0 -0
  29. /dbos/{_migrations → _alembic_migrations}/versions/83f3732ae8e7_workflow_timeout.py +0 -0
  30. /dbos/{_migrations → _alembic_migrations}/versions/933e86bdac6a_add_queue_priority.py +0 -0
  31. /dbos/{_migrations → _alembic_migrations}/versions/a3b18ad34abe_added_triggers.py +0 -0
  32. /dbos/{_migrations → _alembic_migrations}/versions/d76646551a6b_job_queue_limiter.py +0 -0
  33. /dbos/{_migrations → _alembic_migrations}/versions/d76646551a6c_workflow_queue.py +0 -0
  34. /dbos/{_migrations → _alembic_migrations}/versions/d994145b47b6_consolidate_inputs.py +0 -0
  35. /dbos/{_migrations → _alembic_migrations}/versions/eab0cc1d9a14_job_queue.py +0 -0
  36. /dbos/{_migrations → _alembic_migrations}/versions/f4b9b32ba814_functionname_childid_op_outputs.py +0 -0
  37. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/WHEEL +0 -0
  38. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/entry_points.txt +0 -0
  39. {dbos-1.12.0a3.dist-info → dbos-1.13.0a5.dist-info}/licenses/LICENSE +0 -0
dbos/_dbos_config.py CHANGED
@@ -33,6 +33,9 @@ class DBOSConfig(TypedDict, total=False):
33
33
  admin_port (int): Admin port
34
34
  run_admin_server (bool): Whether to run the DBOS admin server
35
35
  otlp_attributes (dict[str, str]): A set of custom attributes to apply OTLP-exported logs and traces
36
+ application_version (str): Application version
37
+ executor_id (str): Executor ID, used to identify the application instance in distributed environments
38
+ disable_otlp (bool): If True, disables OTLP tracing and logging. Defaults to False.
36
39
  """
37
40
 
38
41
  name: str
@@ -49,6 +52,7 @@ class DBOSConfig(TypedDict, total=False):
49
52
  otlp_attributes: Optional[dict[str, str]]
50
53
  application_version: Optional[str]
51
54
  executor_id: Optional[str]
55
+ disable_otlp: Optional[bool]
52
56
 
53
57
 
54
58
  class RuntimeConfig(TypedDict, total=False):
@@ -91,6 +95,7 @@ class TelemetryConfig(TypedDict, total=False):
91
95
  logs: Optional[LoggerConfig]
92
96
  OTLPExporter: Optional[OTLPExporterConfig]
93
97
  otlp_attributes: Optional[dict[str, str]]
98
+ disable_otlp: Optional[bool]
94
99
 
95
100
 
96
101
  class ConfigFile(TypedDict, total=False):
@@ -157,6 +162,7 @@ def translate_dbos_config_to_config_file(config: DBOSConfig) -> ConfigFile:
157
162
  telemetry: TelemetryConfig = {
158
163
  "OTLPExporter": {"tracesEndpoint": [], "logsEndpoint": []},
159
164
  "otlp_attributes": config.get("otlp_attributes", {}),
165
+ "disable_otlp": config.get("disable_otlp", False),
160
166
  }
161
167
  # For mypy
162
168
  assert telemetry["OTLPExporter"] is not None
@@ -290,19 +296,12 @@ def process_config(
290
296
  """
291
297
  If a database_url is provided, pass it as is in the config.
292
298
 
293
- Else, build a database_url from defaults.
299
+ Else, default to SQLite.
294
300
 
295
301
  Also build SQL Alchemy "kwargs" base on user input + defaults.
296
302
  Specifically, db_engine_kwargs takes precedence over app_db_pool_size
297
303
 
298
304
  In debug mode, apply overrides from DBOS_DBHOST, DBOS_DBPORT, DBOS_DBUSER, and DBOS_DBPASSWORD.
299
-
300
- Default configuration:
301
- - Hostname: localhost
302
- - Port: 5432
303
- - Username: postgres
304
- - Password: $PGPASSWORD
305
- - Database name: transformed application name.
306
305
  """
307
306
 
308
307
  if "name" not in data:
@@ -344,9 +343,14 @@ def process_config(
344
343
 
345
344
  url = make_url(data["database_url"])
346
345
 
347
- if not data["database"].get("sys_db_name"):
346
+ if data["database_url"].startswith("sqlite"):
347
+ data["system_database_url"] = data["database_url"]
348
+ else:
348
349
  assert url.database is not None
349
- data["database"]["sys_db_name"] = url.database + SystemSchema.sysdb_suffix
350
+ if not data["database"].get("sys_db_name"):
351
+ data["database"]["sys_db_name"] = (
352
+ url.database + SystemSchema.sysdb_suffix
353
+ )
350
354
 
351
355
  # Gather connect_timeout from the URL if provided. It should be used in engine kwargs if not provided there (instead of our default)
352
356
  connect_timeout_str = url.query.get("connect_timeout")
@@ -374,23 +378,24 @@ def process_config(
374
378
  ).render_as_string(hide_password=False)
375
379
  else:
376
380
  _app_db_name = _app_name_to_db_name(data["name"])
377
- _password = os.environ.get("PGPASSWORD", "dbos")
378
- data["database_url"] = (
379
- f"postgres://postgres:{_password}@localhost:5432/{_app_db_name}?connect_timeout=10&sslmode=prefer"
380
- )
381
- if not data["database"].get("sys_db_name"):
382
- data["database"]["sys_db_name"] = _app_db_name + SystemSchema.sysdb_suffix
383
- assert data["database_url"] is not None
381
+ data["database_url"] = f"sqlite:///{_app_db_name}.sqlite"
382
+ data["system_database_url"] = data["database_url"]
384
383
 
385
384
  configure_db_engine_parameters(data["database"], connect_timeout=connect_timeout)
386
385
 
387
386
  # Pretty-print where we've loaded database connection information from, respecting the log level
387
+ assert data["database_url"] is not None
388
388
  if not silent and logs["logLevel"] == "INFO" or logs["logLevel"] == "DEBUG":
389
389
  log_url = make_url(data["database_url"]).render_as_string(hide_password=True)
390
390
  print(f"[bold blue]Using database connection string: {log_url}[/bold blue]")
391
- print(
392
- f"[bold blue]Database engine parameters: {data['database']['db_engine_kwargs']}[/bold blue]"
393
- )
391
+ if data["database_url"].startswith("sqlite"):
392
+ print(
393
+ f"[bold blue]Using SQLite as a system database. The SQLite system database is for development and testing. PostgreSQL is recommended for production use.[/bold blue]"
394
+ )
395
+ else:
396
+ print(
397
+ f"[bold blue]Database engine parameters: {data['database']['db_engine_kwargs']}[/bold blue]"
398
+ )
394
399
 
395
400
  # Return data as ConfigFile type
396
401
  return data
@@ -439,6 +444,8 @@ def configure_db_engine_parameters(
439
444
 
440
445
 
441
446
  def is_valid_database_url(database_url: str) -> bool:
447
+ if database_url.startswith("sqlite"):
448
+ return True
442
449
  url = make_url(database_url)
443
450
  required_fields = [
444
451
  ("username", "Username must be specified in the connection URL"),
@@ -553,6 +560,8 @@ def get_system_database_url(config: ConfigFile) -> str:
553
560
  return config["system_database_url"]
554
561
  else:
555
562
  assert config["database_url"] is not None
563
+ if config["database_url"].startswith("sqlite"):
564
+ return config["database_url"]
556
565
  app_db_url = make_url(config["database_url"])
557
566
  if config["database"].get("sys_db_name") is not None:
558
567
  sys_db_name = config["database"]["sys_db_name"]
dbos/_fastapi.py CHANGED
@@ -49,7 +49,7 @@ class LifespanMiddleware:
49
49
  if not self.dbos._launched:
50
50
  self.dbos._launch()
51
51
  elif message["type"] == "lifespan.shutdown.complete":
52
- self.dbos._destroy()
52
+ self.dbos.destroy()
53
53
  await send(message)
54
54
 
55
55
  # Call the original app with our wrapped functions
dbos/_logger.py CHANGED
@@ -77,7 +77,9 @@ def config_logger(config: "ConfigFile") -> None:
77
77
  otlp_logs_endpoints = (
78
78
  config.get("telemetry", {}).get("OTLPExporter", {}).get("logsEndpoint") # type: ignore
79
79
  )
80
- if otlp_logs_endpoints:
80
+ disable_otlp = config.get("telemetry", {}).get("disable_otlp", False) # type: ignore
81
+
82
+ if not disable_otlp and otlp_logs_endpoints:
81
83
  log_provider = PatchedOTLPLoggerProvider(
82
84
  Resource.create(
83
85
  attributes={
dbos/_migration.py ADDED
@@ -0,0 +1,322 @@
1
+ import logging
2
+ import os
3
+ import re
4
+ import sys
5
+
6
+ import sqlalchemy as sa
7
+ from alembic import command
8
+ from alembic.config import Config
9
+
10
+ from ._logger import dbos_logger
11
+
12
+
13
+ def ensure_dbos_schema(engine: sa.Engine) -> bool:
14
+ """
15
+ True if using DBOS migrations (DBOS schema and migrations table already exist or were created)
16
+ False if using Alembic migrations (DBOS schema exists, but dbos_migrations table doesn't)
17
+ """
18
+ with engine.begin() as conn:
19
+ # Check if dbos schema exists
20
+ schema_result = conn.execute(
21
+ sa.text(
22
+ "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'dbos'"
23
+ )
24
+ )
25
+ schema_existed = schema_result.fetchone() is not None
26
+
27
+ # Create schema if it doesn't exist
28
+ if not schema_existed:
29
+ conn.execute(sa.text("CREATE SCHEMA dbos"))
30
+
31
+ # Check if dbos_migrations table exists
32
+ table_result = conn.execute(
33
+ sa.text(
34
+ "SELECT table_name FROM information_schema.tables WHERE table_schema = 'dbos' AND table_name = 'dbos_migrations'"
35
+ )
36
+ )
37
+ table_exists = table_result.fetchone() is not None
38
+
39
+ if table_exists:
40
+ return True
41
+ elif schema_existed:
42
+ return False
43
+ else:
44
+ conn.execute(
45
+ sa.text(
46
+ "CREATE TABLE dbos.dbos_migrations (version BIGINT NOT NULL PRIMARY KEY)"
47
+ )
48
+ )
49
+ return True
50
+
51
+
52
+ def run_alembic_migrations(engine: sa.Engine) -> None:
53
+ """Run system database schema migrations with Alembic.
54
+ This is DEPRECATED in favor of DBOS-managed migrations.
55
+ It is retained only for backwards compatibility and
56
+ will be removed in the next major version."""
57
+ # Run a schema migration for the system database
58
+ migration_dir = os.path.join(
59
+ os.path.dirname(os.path.realpath(__file__)), "_alembic_migrations"
60
+ )
61
+ alembic_cfg = Config()
62
+ alembic_cfg.set_main_option("script_location", migration_dir)
63
+ logging.getLogger("alembic").setLevel(logging.WARNING)
64
+ # Alembic requires the % in URL-escaped parameters to itself be escaped to %%.
65
+ escaped_conn_string = re.sub(
66
+ r"%(?=[0-9A-Fa-f]{2})",
67
+ "%%",
68
+ engine.url.render_as_string(hide_password=False),
69
+ )
70
+ alembic_cfg.set_main_option("sqlalchemy.url", escaped_conn_string)
71
+ try:
72
+ command.upgrade(alembic_cfg, "head")
73
+ except Exception as e:
74
+ dbos_logger.warning(
75
+ f"Exception during system database construction. This is most likely because the system database was configured using a later version of DBOS: {e}"
76
+ )
77
+
78
+
79
+ def run_dbos_migrations(engine: sa.Engine) -> None:
80
+ """Run DBOS-managed migrations by executing each SQL command in dbos_migrations."""
81
+ with engine.begin() as conn:
82
+ # Get current migration version
83
+ result = conn.execute(sa.text("SELECT version FROM dbos.dbos_migrations"))
84
+ current_version = result.fetchone()
85
+ last_applied = current_version[0] if current_version else 0
86
+
87
+ # Apply migrations starting from the next version
88
+ for i, migration_sql in enumerate(dbos_migrations, 1):
89
+ if i <= last_applied:
90
+ continue
91
+
92
+ # Execute the migration
93
+ dbos_logger.info(f"Applying DBOS system database schema migration {i}")
94
+ conn.execute(sa.text(migration_sql))
95
+
96
+ # Update the single row with the new version
97
+ if last_applied == 0:
98
+ conn.execute(
99
+ sa.text(
100
+ "INSERT INTO dbos.dbos_migrations (version) VALUES (:version)"
101
+ ),
102
+ {"version": i},
103
+ )
104
+ else:
105
+ conn.execute(
106
+ sa.text("UPDATE dbos.dbos_migrations SET version = :version"),
107
+ {"version": i},
108
+ )
109
+ last_applied = i
110
+
111
+
112
+ dbos_migration_one = """
113
+ -- Enable uuid extension for generating UUIDs
114
+ CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
115
+
116
+ CREATE TABLE dbos.workflow_status (
117
+ workflow_uuid TEXT PRIMARY KEY,
118
+ status TEXT,
119
+ name TEXT,
120
+ authenticated_user TEXT,
121
+ assumed_role TEXT,
122
+ authenticated_roles TEXT,
123
+ request TEXT,
124
+ output TEXT,
125
+ error TEXT,
126
+ executor_id TEXT,
127
+ created_at BIGINT NOT NULL DEFAULT (EXTRACT(epoch FROM now()) * 1000::numeric)::bigint,
128
+ updated_at BIGINT NOT NULL DEFAULT (EXTRACT(epoch FROM now()) * 1000::numeric)::bigint,
129
+ application_version TEXT,
130
+ application_id TEXT,
131
+ class_name VARCHAR(255) DEFAULT NULL,
132
+ config_name VARCHAR(255) DEFAULT NULL,
133
+ recovery_attempts BIGINT DEFAULT 0,
134
+ queue_name TEXT,
135
+ workflow_timeout_ms BIGINT,
136
+ workflow_deadline_epoch_ms BIGINT,
137
+ inputs TEXT,
138
+ started_at_epoch_ms BIGINT,
139
+ deduplication_id TEXT,
140
+ priority INTEGER NOT NULL DEFAULT 0
141
+ );
142
+
143
+ CREATE INDEX workflow_status_created_at_index ON dbos.workflow_status (created_at);
144
+ CREATE INDEX workflow_status_executor_id_index ON dbos.workflow_status (executor_id);
145
+ CREATE INDEX workflow_status_status_index ON dbos.workflow_status (status);
146
+
147
+ ALTER TABLE dbos.workflow_status
148
+ ADD CONSTRAINT uq_workflow_status_queue_name_dedup_id
149
+ UNIQUE (queue_name, deduplication_id);
150
+
151
+ CREATE TABLE dbos.operation_outputs (
152
+ workflow_uuid TEXT NOT NULL,
153
+ function_id INTEGER NOT NULL,
154
+ function_name TEXT NOT NULL DEFAULT '',
155
+ output TEXT,
156
+ error TEXT,
157
+ child_workflow_id TEXT,
158
+ PRIMARY KEY (workflow_uuid, function_id),
159
+ FOREIGN KEY (workflow_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
160
+ ON UPDATE CASCADE ON DELETE CASCADE
161
+ );
162
+
163
+ CREATE TABLE dbos.notifications (
164
+ destination_uuid TEXT NOT NULL,
165
+ topic TEXT,
166
+ message TEXT NOT NULL,
167
+ created_at_epoch_ms BIGINT NOT NULL DEFAULT (EXTRACT(epoch FROM now()) * 1000::numeric)::bigint,
168
+ message_uuid TEXT NOT NULL DEFAULT gen_random_uuid(), -- Built-in function
169
+ FOREIGN KEY (destination_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
170
+ ON UPDATE CASCADE ON DELETE CASCADE
171
+ );
172
+ CREATE INDEX idx_workflow_topic ON dbos.notifications (destination_uuid, topic);
173
+
174
+ -- Create notification function
175
+ CREATE OR REPLACE FUNCTION dbos.notifications_function() RETURNS TRIGGER AS $$
176
+ DECLARE
177
+ payload text := NEW.destination_uuid || '::' || NEW.topic;
178
+ BEGIN
179
+ PERFORM pg_notify('dbos_notifications_channel', payload);
180
+ RETURN NEW;
181
+ END;
182
+ $$ LANGUAGE plpgsql;
183
+
184
+ -- Create notification trigger
185
+ CREATE TRIGGER dbos_notifications_trigger
186
+ AFTER INSERT ON dbos.notifications
187
+ FOR EACH ROW EXECUTE FUNCTION dbos.notifications_function();
188
+
189
+ CREATE TABLE dbos.workflow_events (
190
+ workflow_uuid TEXT NOT NULL,
191
+ key TEXT NOT NULL,
192
+ value TEXT NOT NULL,
193
+ PRIMARY KEY (workflow_uuid, key),
194
+ FOREIGN KEY (workflow_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
195
+ ON UPDATE CASCADE ON DELETE CASCADE
196
+ );
197
+
198
+ -- Create events function
199
+ CREATE OR REPLACE FUNCTION dbos.workflow_events_function() RETURNS TRIGGER AS $$
200
+ DECLARE
201
+ payload text := NEW.workflow_uuid || '::' || NEW.key;
202
+ BEGIN
203
+ PERFORM pg_notify('dbos_workflow_events_channel', payload);
204
+ RETURN NEW;
205
+ END;
206
+ $$ LANGUAGE plpgsql;
207
+
208
+ -- Create events trigger
209
+ CREATE TRIGGER dbos_workflow_events_trigger
210
+ AFTER INSERT ON dbos.workflow_events
211
+ FOR EACH ROW EXECUTE FUNCTION dbos.workflow_events_function();
212
+
213
+ CREATE TABLE dbos.streams (
214
+ workflow_uuid TEXT NOT NULL,
215
+ key TEXT NOT NULL,
216
+ value TEXT NOT NULL,
217
+ "offset" INTEGER NOT NULL,
218
+ PRIMARY KEY (workflow_uuid, key, "offset"),
219
+ FOREIGN KEY (workflow_uuid) REFERENCES dbos.workflow_status(workflow_uuid)
220
+ ON UPDATE CASCADE ON DELETE CASCADE
221
+ );
222
+
223
+ CREATE TABLE dbos.event_dispatch_kv (
224
+ service_name TEXT NOT NULL,
225
+ workflow_fn_name TEXT NOT NULL,
226
+ key TEXT NOT NULL,
227
+ value TEXT,
228
+ update_seq NUMERIC(38,0),
229
+ update_time NUMERIC(38,15),
230
+ PRIMARY KEY (service_name, workflow_fn_name, key)
231
+ );
232
+ """
233
+
234
+
235
+ def get_sqlite_timestamp_expr() -> str:
236
+ """Get SQLite timestamp expression with millisecond precision for Python >= 3.12."""
237
+ if sys.version_info >= (3, 12):
238
+ return "(unixepoch('subsec') * 1000)"
239
+ else:
240
+ return "(strftime('%s','now') * 1000)"
241
+
242
+
243
+ sqlite_migration_one = f"""
244
+ CREATE TABLE workflow_status (
245
+ workflow_uuid TEXT PRIMARY KEY,
246
+ status TEXT,
247
+ name TEXT,
248
+ authenticated_user TEXT,
249
+ assumed_role TEXT,
250
+ authenticated_roles TEXT,
251
+ request TEXT,
252
+ output TEXT,
253
+ error TEXT,
254
+ executor_id TEXT,
255
+ created_at INTEGER NOT NULL DEFAULT {get_sqlite_timestamp_expr()},
256
+ updated_at INTEGER NOT NULL DEFAULT {get_sqlite_timestamp_expr()},
257
+ application_version TEXT,
258
+ application_id TEXT,
259
+ class_name TEXT DEFAULT NULL,
260
+ config_name TEXT DEFAULT NULL,
261
+ recovery_attempts INTEGER DEFAULT 0,
262
+ queue_name TEXT,
263
+ workflow_timeout_ms INTEGER,
264
+ workflow_deadline_epoch_ms INTEGER,
265
+ inputs TEXT,
266
+ started_at_epoch_ms INTEGER,
267
+ deduplication_id TEXT,
268
+ priority INTEGER NOT NULL DEFAULT 0
269
+ );
270
+
271
+ CREATE INDEX workflow_status_created_at_index ON workflow_status (created_at);
272
+ CREATE INDEX workflow_status_executor_id_index ON workflow_status (executor_id);
273
+ CREATE INDEX workflow_status_status_index ON workflow_status (status);
274
+
275
+ CREATE UNIQUE INDEX uq_workflow_status_queue_name_dedup_id
276
+ ON workflow_status (queue_name, deduplication_id);
277
+
278
+ CREATE TABLE operation_outputs (
279
+ workflow_uuid TEXT NOT NULL,
280
+ function_id INTEGER NOT NULL,
281
+ function_name TEXT NOT NULL DEFAULT '',
282
+ output TEXT,
283
+ error TEXT,
284
+ child_workflow_id TEXT,
285
+ PRIMARY KEY (workflow_uuid, function_id),
286
+ FOREIGN KEY (workflow_uuid) REFERENCES workflow_status(workflow_uuid)
287
+ ON UPDATE CASCADE ON DELETE CASCADE
288
+ );
289
+
290
+ CREATE TABLE notifications (
291
+ destination_uuid TEXT NOT NULL,
292
+ topic TEXT,
293
+ message TEXT NOT NULL,
294
+ created_at_epoch_ms INTEGER NOT NULL DEFAULT {get_sqlite_timestamp_expr()},
295
+ message_uuid TEXT NOT NULL DEFAULT (hex(randomblob(16))),
296
+ FOREIGN KEY (destination_uuid) REFERENCES workflow_status(workflow_uuid)
297
+ ON UPDATE CASCADE ON DELETE CASCADE
298
+ );
299
+ CREATE INDEX idx_workflow_topic ON notifications (destination_uuid, topic);
300
+
301
+ CREATE TABLE workflow_events (
302
+ workflow_uuid TEXT NOT NULL,
303
+ key TEXT NOT NULL,
304
+ value TEXT NOT NULL,
305
+ PRIMARY KEY (workflow_uuid, key),
306
+ FOREIGN KEY (workflow_uuid) REFERENCES workflow_status(workflow_uuid)
307
+ ON UPDATE CASCADE ON DELETE CASCADE
308
+ );
309
+
310
+ CREATE TABLE streams (
311
+ workflow_uuid TEXT NOT NULL,
312
+ key TEXT NOT NULL,
313
+ value TEXT NOT NULL,
314
+ "offset" INTEGER NOT NULL,
315
+ PRIMARY KEY (workflow_uuid, key, "offset"),
316
+ FOREIGN KEY (workflow_uuid) REFERENCES workflow_status(workflow_uuid)
317
+ ON UPDATE CASCADE ON DELETE CASCADE
318
+ );
319
+ """
320
+
321
+ dbos_migrations = [dbos_migration_one]
322
+ sqlite_migrations = [sqlite_migration_one]