dbos 0.19.0a4__tar.gz → 0.20.0a2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbos might be problematic. Click here for more details.

Files changed (87) hide show
  1. {dbos-0.19.0a4 → dbos-0.20.0a2}/PKG-INFO +21 -16
  2. {dbos-0.19.0a4 → dbos-0.20.0a2}/README.md +20 -15
  3. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_admin_server.py +45 -2
  4. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_context.py +11 -2
  5. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_core.py +45 -5
  6. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_dbos.py +19 -0
  7. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_error.py +11 -0
  8. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_fastapi.py +6 -2
  9. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_flask.py +6 -2
  10. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_kafka.py +17 -1
  11. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_queue.py +1 -0
  12. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_sys_db.py +69 -37
  13. dbos-0.20.0a2/dbos/_workflow_commands.py +171 -0
  14. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/cli.py +140 -1
  15. {dbos-0.19.0a4 → dbos-0.20.0a2}/pyproject.toml +1 -1
  16. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/queuedworkflow.py +1 -0
  17. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_admin_server.py +119 -1
  18. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_classdecorators.py +1 -0
  19. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_dbos.py +6 -3
  20. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_fastapi.py +20 -1
  21. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_flask.py +20 -1
  22. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_kafka.py +37 -1
  23. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_queue.py +115 -2
  24. dbos-0.20.0a2/tests/test_workflow_cmds.py +216 -0
  25. {dbos-0.19.0a4 → dbos-0.20.0a2}/LICENSE +0 -0
  26. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/__init__.py +0 -0
  27. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_app_db.py +0 -0
  28. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_classproperty.py +0 -0
  29. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_cloudutils/authentication.py +0 -0
  30. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_cloudutils/cloudutils.py +0 -0
  31. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_cloudutils/databases.py +0 -0
  32. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_croniter.py +0 -0
  33. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_db_wizard.py +0 -0
  34. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_dbos_config.py +0 -0
  35. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_kafka_message.py +0 -0
  36. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_logger.py +0 -0
  37. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/env.py +0 -0
  38. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/script.py.mako +0 -0
  39. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/versions/04ca4f231047_workflow_queues_executor_id.py +0 -0
  40. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/versions/50f3227f0b4b_fix_job_queue.py +0 -0
  41. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/versions/5c361fc04708_added_system_tables.py +0 -0
  42. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/versions/a3b18ad34abe_added_triggers.py +0 -0
  43. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/versions/d76646551a6b_job_queue_limiter.py +0 -0
  44. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/versions/d76646551a6c_workflow_queue.py +0 -0
  45. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_migrations/versions/eab0cc1d9a14_job_queue.py +0 -0
  46. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_outcome.py +0 -0
  47. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_recovery.py +0 -0
  48. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_registrations.py +0 -0
  49. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_request.py +0 -0
  50. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_roles.py +0 -0
  51. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_scheduler.py +0 -0
  52. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_schemas/__init__.py +0 -0
  53. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_schemas/application_database.py +0 -0
  54. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_schemas/system_database.py +0 -0
  55. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_serialization.py +0 -0
  56. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/README.md +0 -0
  57. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/__package/__init__.py +0 -0
  58. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/__package/main.py +0 -0
  59. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/__package/schema.py +0 -0
  60. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/alembic.ini +0 -0
  61. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/dbos-config.yaml.dbos +0 -0
  62. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/migrations/env.py.dbos +0 -0
  63. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/migrations/script.py.mako +0 -0
  64. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/migrations/versions/2024_07_31_180642_init.py +0 -0
  65. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_templates/hello/start_postgres_docker.py +0 -0
  66. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/_tracer.py +0 -0
  67. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/dbos-config.schema.json +0 -0
  68. {dbos-0.19.0a4 → dbos-0.20.0a2}/dbos/py.typed +0 -0
  69. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/__init__.py +0 -0
  70. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/atexit_no_ctor.py +0 -0
  71. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/atexit_no_launch.py +0 -0
  72. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/classdefs.py +0 -0
  73. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/conftest.py +0 -0
  74. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/more_classdefs.py +0 -0
  75. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_async.py +0 -0
  76. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_concurrency.py +0 -0
  77. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_config.py +0 -0
  78. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_croniter.py +0 -0
  79. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_failures.py +0 -0
  80. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_fastapi_roles.py +0 -0
  81. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_outcome.py +0 -0
  82. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_package.py +0 -0
  83. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_scheduler.py +0 -0
  84. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_schema_migration.py +0 -0
  85. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_singleton.py +0 -0
  86. {dbos-0.19.0a4 → dbos-0.20.0a2}/tests/test_spans.py +0 -0
  87. {dbos-0.19.0a4 → dbos-0.20.0a2}/version/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dbos
3
- Version: 0.19.0a4
3
+ Version: 0.20.0a2
4
4
  Summary: Ultra-lightweight durable execution in Python
5
5
  Author-Email: "DBOS, Inc." <contact@dbos.dev>
6
6
  License: MIT
@@ -28,14 +28,14 @@ Description-Content-Type: text/markdown
28
28
 
29
29
  <div align="center">
30
30
 
31
- # DBOS Transact: Ultra-Lightweight Durable Execution
31
+ # DBOS Transact: A Lightweight Durable Execution Library Built on Postgres
32
32
 
33
33
  #### [Documentation](https://docs.dbos.dev/) &nbsp;&nbsp;•&nbsp;&nbsp; [Examples](https://docs.dbos.dev/examples) &nbsp;&nbsp;•&nbsp;&nbsp; [Github](https://github.com/dbos-inc) &nbsp;&nbsp;•&nbsp;&nbsp; [Discord](https://discord.com/invite/jsmC6pXGgX)
34
34
  </div>
35
35
 
36
36
  ---
37
37
 
38
- DBOS Transact is a Python library providing **ultra-lightweight durable execution**.
38
+ DBOS Transact is a Python library for **ultra-lightweight durable execution**.
39
39
  For example:
40
40
 
41
41
  ```python
@@ -55,18 +55,23 @@ def workflow()
55
55
 
56
56
  Durable execution means your program is **resilient to any failure**.
57
57
  If it is ever interrupted or crashes, all your workflows will automatically resume from the last completed step.
58
- If you want to see durable execution in action, check out [this demo app](https://demo-widget-store.cloud.dbos.dev/) (source code [here](https://github.com/dbos-inc/dbos-demo-apps/tree/main/python/widget-store)).
59
- No matter how many times you try to crash it, it always resumes from exactly where it left off!
58
+ Durable execution helps solve many common problems:
60
59
 
61
- Under the hood, DBOS Transact works by storing your program's execution state (which workflows are currently executing and which steps they've completed) in a Postgres database.
62
- So all you need to use it is a Postgres database to connect to&mdash;there's no need for a "workflow server."
63
- This approach is also incredibly fast, for example [25x faster than AWS Step Functions](https://www.dbos.dev/blog/dbos-vs-aws-step-functions-benchmark).
60
+ - Orchestrating long-running or business-critical workflows so they seamlessly recover from any failure.
61
+ - Running reliable background jobs with no timeouts.
62
+ - Processing incoming events (e.g. from Kafka) exactly once.
63
+ - Running a fault-tolerant distributed task queue.
64
+ - Running a reliable cron scheduler.
65
+ - Operating an AI agent, or anything that connects to an unreliable or non-deterministic API.
64
66
 
65
- Some more cool features include:
67
+ What’s unique about DBOS's implementation of durable execution is that it’s implemented in a **lightweight library** that’s **totally backed by Postgres**.
68
+ To use DBOS, just `pip install` it and annotate your program with DBOS decorators.
69
+ Under the hood, those decorators store your program's execution state (which workflows are currently executing and which steps they've completed) in a Postgres database.
70
+ If your program crashes or is interrupted, they automatically recover its workflows from their stored state.
71
+ So all you need to use DBOS is Postgres&mdash;there are no other dependencies you have to manage, no separate workflow server.
66
72
 
67
- - Scheduled jobs&mdash;run your workflows exactly-once per time interval.
68
- - Exactly-once event processing&mdash;use workflows to process incoming events (for example, from a Kafka topic) exactly-once.
69
- - Observability&mdash;all workflows automatically emit [OpenTelemetry](https://opentelemetry.io/) traces.
73
+ One big advantage of this approach is that you can add DBOS to **any** Python application&mdash;**it’s just a library**.
74
+ You can use DBOS to add reliable background jobs or cron scheduling or queues to your app with no external dependencies except Postgres.
70
75
 
71
76
  ## Getting Started
72
77
 
@@ -77,7 +82,7 @@ pip install dbos
77
82
  dbos init --config
78
83
  ```
79
84
 
80
- Then, try it out with this simple program (requires Postgres):
85
+ Then, try it out with this simple program:
81
86
 
82
87
  ```python
83
88
  from fastapi import FastAPI
@@ -107,14 +112,14 @@ def fastapi_endpoint():
107
112
  dbos_workflow()
108
113
  ```
109
114
 
110
- Save the program into `main.py`, edit `dbos-config.yaml` to configure your Postgres connection settings, and start it with `fastapi run`.
115
+ Save the program into `main.py` and start it with `fastapi run`.
111
116
  Visit `localhost:8000` in your browser to start the workflow.
112
117
  When prompted, press `Control + \` to force quit your application.
113
118
  It should crash midway through the workflow, having completed step one but not step two.
114
119
  Then, restart your app with `fastapi run`.
115
120
  It should resume the workflow from where it left off, completing step two without re-executing step one.
116
121
 
117
- To learn how to build more complex workflows, see our [programming guide](https://docs.dbos.dev/python/programming-guide) or [examples](https://docs.dbos.dev/examples).
122
+ To learn how to build more complex workflows, see the [programming guide](https://docs.dbos.dev/python/programming-guide) or [examples](https://docs.dbos.dev/examples).
118
123
 
119
124
  ## Documentation
120
125
 
@@ -125,7 +130,7 @@ To learn how to build more complex workflows, see our [programming guide](https:
125
130
 
126
131
  - [**AI-Powered Slackbot**](https://docs.dbos.dev/python/examples/rag-slackbot) &mdash; A Slackbot that answers questions about previous Slack conversations, using DBOS to durably orchestrate its RAG pipeline.
127
132
  - [**Widget Store**](https://docs.dbos.dev/python/examples/widget-store) &mdash; An online storefront that uses DBOS durable workflows to be resilient to any failure.
128
- - [**Earthquake Tracker**](https://docs.dbos.dev/python/examples/earthquake-tracker) &mdash; A real-time earthquake dashboard that uses DBOS to stream data from the USGS into Postgres, then visualizes it with Streamlit.
133
+ - [**Scheduled Reminders**](https://docs.dbos.dev/python/examples/scheduled-reminders) &mdash; In just three lines of code, schedule an email to send days, weeks, or months in the future.
129
134
 
130
135
  More examples [here](https://docs.dbos.dev/examples)!
131
136
 
@@ -1,14 +1,14 @@
1
1
 
2
2
  <div align="center">
3
3
 
4
- # DBOS Transact: Ultra-Lightweight Durable Execution
4
+ # DBOS Transact: A Lightweight Durable Execution Library Built on Postgres
5
5
 
6
6
  #### [Documentation](https://docs.dbos.dev/) &nbsp;&nbsp;•&nbsp;&nbsp; [Examples](https://docs.dbos.dev/examples) &nbsp;&nbsp;•&nbsp;&nbsp; [Github](https://github.com/dbos-inc) &nbsp;&nbsp;•&nbsp;&nbsp; [Discord](https://discord.com/invite/jsmC6pXGgX)
7
7
  </div>
8
8
 
9
9
  ---
10
10
 
11
- DBOS Transact is a Python library providing **ultra-lightweight durable execution**.
11
+ DBOS Transact is a Python library for **ultra-lightweight durable execution**.
12
12
  For example:
13
13
 
14
14
  ```python
@@ -28,18 +28,23 @@ def workflow()
28
28
 
29
29
  Durable execution means your program is **resilient to any failure**.
30
30
  If it is ever interrupted or crashes, all your workflows will automatically resume from the last completed step.
31
- If you want to see durable execution in action, check out [this demo app](https://demo-widget-store.cloud.dbos.dev/) (source code [here](https://github.com/dbos-inc/dbos-demo-apps/tree/main/python/widget-store)).
32
- No matter how many times you try to crash it, it always resumes from exactly where it left off!
31
+ Durable execution helps solve many common problems:
33
32
 
34
- Under the hood, DBOS Transact works by storing your program's execution state (which workflows are currently executing and which steps they've completed) in a Postgres database.
35
- So all you need to use it is a Postgres database to connect to&mdash;there's no need for a "workflow server."
36
- This approach is also incredibly fast, for example [25x faster than AWS Step Functions](https://www.dbos.dev/blog/dbos-vs-aws-step-functions-benchmark).
33
+ - Orchestrating long-running or business-critical workflows so they seamlessly recover from any failure.
34
+ - Running reliable background jobs with no timeouts.
35
+ - Processing incoming events (e.g. from Kafka) exactly once.
36
+ - Running a fault-tolerant distributed task queue.
37
+ - Running a reliable cron scheduler.
38
+ - Operating an AI agent, or anything that connects to an unreliable or non-deterministic API.
37
39
 
38
- Some more cool features include:
40
+ What’s unique about DBOS's implementation of durable execution is that it’s implemented in a **lightweight library** that’s **totally backed by Postgres**.
41
+ To use DBOS, just `pip install` it and annotate your program with DBOS decorators.
42
+ Under the hood, those decorators store your program's execution state (which workflows are currently executing and which steps they've completed) in a Postgres database.
43
+ If your program crashes or is interrupted, they automatically recover its workflows from their stored state.
44
+ So all you need to use DBOS is Postgres&mdash;there are no other dependencies you have to manage, no separate workflow server.
39
45
 
40
- - Scheduled jobs&mdash;run your workflows exactly-once per time interval.
41
- - Exactly-once event processing&mdash;use workflows to process incoming events (for example, from a Kafka topic) exactly-once.
42
- - Observability&mdash;all workflows automatically emit [OpenTelemetry](https://opentelemetry.io/) traces.
46
+ One big advantage of this approach is that you can add DBOS to **any** Python application&mdash;**it’s just a library**.
47
+ You can use DBOS to add reliable background jobs or cron scheduling or queues to your app with no external dependencies except Postgres.
43
48
 
44
49
  ## Getting Started
45
50
 
@@ -50,7 +55,7 @@ pip install dbos
50
55
  dbos init --config
51
56
  ```
52
57
 
53
- Then, try it out with this simple program (requires Postgres):
58
+ Then, try it out with this simple program:
54
59
 
55
60
  ```python
56
61
  from fastapi import FastAPI
@@ -80,14 +85,14 @@ def fastapi_endpoint():
80
85
  dbos_workflow()
81
86
  ```
82
87
 
83
- Save the program into `main.py`, edit `dbos-config.yaml` to configure your Postgres connection settings, and start it with `fastapi run`.
88
+ Save the program into `main.py` and start it with `fastapi run`.
84
89
  Visit `localhost:8000` in your browser to start the workflow.
85
90
  When prompted, press `Control + \` to force quit your application.
86
91
  It should crash midway through the workflow, having completed step one but not step two.
87
92
  Then, restart your app with `fastapi run`.
88
93
  It should resume the workflow from where it left off, completing step two without re-executing step one.
89
94
 
90
- To learn how to build more complex workflows, see our [programming guide](https://docs.dbos.dev/python/programming-guide) or [examples](https://docs.dbos.dev/examples).
95
+ To learn how to build more complex workflows, see the [programming guide](https://docs.dbos.dev/python/programming-guide) or [examples](https://docs.dbos.dev/examples).
91
96
 
92
97
  ## Documentation
93
98
 
@@ -98,7 +103,7 @@ To learn how to build more complex workflows, see our [programming guide](https:
98
103
 
99
104
  - [**AI-Powered Slackbot**](https://docs.dbos.dev/python/examples/rag-slackbot) &mdash; A Slackbot that answers questions about previous Slack conversations, using DBOS to durably orchestrate its RAG pipeline.
100
105
  - [**Widget Store**](https://docs.dbos.dev/python/examples/widget-store) &mdash; An online storefront that uses DBOS durable workflows to be resilient to any failure.
101
- - [**Earthquake Tracker**](https://docs.dbos.dev/python/examples/earthquake-tracker) &mdash; A real-time earthquake dashboard that uses DBOS to stream data from the USGS into Postgres, then visualizes it with Streamlit.
106
+ - [**Scheduled Reminders**](https://docs.dbos.dev/python/examples/scheduled-reminders) &mdash; In just three lines of code, schedule an email to send days, weeks, or months in the future.
102
107
 
103
108
  More examples [here](https://docs.dbos.dev/examples)!
104
109
 
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
+ import re
4
5
  import threading
5
6
  from functools import partial
6
7
  from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
@@ -15,6 +16,9 @@ if TYPE_CHECKING:
15
16
  _health_check_path = "/dbos-healthz"
16
17
  _workflow_recovery_path = "/dbos-workflow-recovery"
17
18
  _deactivate_path = "/deactivate"
19
+ # /workflows/:workflow_id/cancel
20
+ # /workflows/:workflow_id/resume
21
+ # /workflows/:workflow_id/restart
18
22
 
19
23
 
20
24
  class AdminServer:
@@ -79,12 +83,51 @@ class AdminRequestHandler(BaseHTTPRequestHandler):
79
83
  self._end_headers()
80
84
  self.wfile.write(json.dumps(workflow_ids).encode("utf-8"))
81
85
  else:
82
- self.send_response(404)
83
- self._end_headers()
86
+
87
+ restart_match = re.match(
88
+ r"^/workflows/(?P<workflow_id>[^/]+)/restart$", self.path
89
+ )
90
+ resume_match = re.match(
91
+ r"^/workflows/(?P<workflow_id>[^/]+)/resume$", self.path
92
+ )
93
+ cancel_match = re.match(
94
+ r"^/workflows/(?P<workflow_id>[^/]+)/cancel$", self.path
95
+ )
96
+
97
+ if restart_match:
98
+ workflow_id = restart_match.group("workflow_id")
99
+ self._handle_restart(workflow_id)
100
+ elif resume_match:
101
+ workflow_id = resume_match.group("workflow_id")
102
+ self._handle_resume(workflow_id)
103
+ elif cancel_match:
104
+ workflow_id = cancel_match.group("workflow_id")
105
+ self._handle_cancel(workflow_id)
106
+ else:
107
+ self.send_response(404)
108
+ self._end_headers()
84
109
 
85
110
  def log_message(self, format: str, *args: Any) -> None:
86
111
  return # Disable admin server request logging
87
112
 
113
+ def _handle_restart(self, workflow_id: str) -> None:
114
+ self.dbos.restart_workflow(workflow_id)
115
+ print("Restarting workflow", workflow_id)
116
+ self.send_response(204)
117
+ self._end_headers()
118
+
119
+ def _handle_resume(self, workflow_id: str) -> None:
120
+ print("Resuming workflow", workflow_id)
121
+ self.dbos.resume_workflow(workflow_id)
122
+ self.send_response(204)
123
+ self._end_headers()
124
+
125
+ def _handle_cancel(self, workflow_id: str) -> None:
126
+ print("Cancelling workflow", workflow_id)
127
+ self.dbos.cancel_workflow(workflow_id)
128
+ self.send_response(204)
129
+ self._end_headers()
130
+
88
131
 
89
132
  # Be consistent with DBOS-TS response.
90
133
  class PerfUtilization(TypedDict):
@@ -57,6 +57,7 @@ class DBOSContext:
57
57
  self.request: Optional["Request"] = None
58
58
 
59
59
  self.id_assigned_for_next_workflow: str = ""
60
+ self.is_within_set_workflow_id_block: bool = False
60
61
 
61
62
  self.parent_workflow_id: str = ""
62
63
  self.parent_workflow_fid: int = -1
@@ -78,6 +79,7 @@ class DBOSContext:
78
79
  rv.logger = self.logger
79
80
  rv.id_assigned_for_next_workflow = self.id_assigned_for_next_workflow
80
81
  self.id_assigned_for_next_workflow = ""
82
+ rv.is_within_set_workflow_id_block = self.is_within_set_workflow_id_block
81
83
  rv.parent_workflow_id = self.workflow_id
82
84
  rv.parent_workflow_fid = self.function_id
83
85
  rv.in_recovery = self.in_recovery
@@ -95,6 +97,10 @@ class DBOSContext:
95
97
  if len(self.id_assigned_for_next_workflow) > 0:
96
98
  wfid = self.id_assigned_for_next_workflow
97
99
  else:
100
+ if self.is_within_set_workflow_id_block:
101
+ self.logger.warning(
102
+ f"Multiple workflows started in the same SetWorkflowID block. Only the first workflow is assigned the specified workflow ID; subsequent workflows will use a generated workflow ID."
103
+ )
98
104
  wfid = str(uuid.uuid4())
99
105
  return wfid
100
106
 
@@ -286,7 +292,7 @@ class DBOSContextSwap:
286
292
 
287
293
  class SetWorkflowID:
288
294
  """
289
- Set the workflow ID to be used for the enclosed workflow invocation.
295
+ Set the workflow ID to be used for the enclosed workflow invocation. Note: Only the first workflow will be started with the specified workflow ID within a `with SetWorkflowID` block.
290
296
 
291
297
  Typical Usage
292
298
  ```
@@ -311,7 +317,9 @@ class SetWorkflowID:
311
317
  if ctx is None:
312
318
  self.created_ctx = True
313
319
  _set_local_dbos_context(DBOSContext())
314
- assert_current_dbos_context().id_assigned_for_next_workflow = self.wfid
320
+ ctx = assert_current_dbos_context()
321
+ ctx.id_assigned_for_next_workflow = self.wfid
322
+ ctx.is_within_set_workflow_id_block = True
315
323
  return self
316
324
 
317
325
  def __exit__(
@@ -321,6 +329,7 @@ class SetWorkflowID:
321
329
  traceback: Optional[TracebackType],
322
330
  ) -> Literal[False]:
323
331
  # Code to clean up the basic context if we created it
332
+ assert_current_dbos_context().is_within_set_workflow_id_block = False
324
333
  if self.created_ctx:
325
334
  _clear_local_dbos_context()
326
335
  return False # Did not handle
@@ -188,6 +188,7 @@ def _init_workflow(
188
188
  wf_status = dbos._sys_db.update_workflow_status(
189
189
  status, False, ctx.in_recovery, max_recovery_attempts=max_recovery_attempts
190
190
  )
191
+ # TODO: Modify the inputs if they were changed by `update_workflow_inputs`
191
192
  dbos._sys_db.update_workflow_inputs(wfid, _serialization.serialize_args(inputs))
192
193
  else:
193
194
  # Buffer the inputs for single-transaction workflows, but don't buffer the status
@@ -265,7 +266,9 @@ def _execute_workflow_wthread(
265
266
  raise
266
267
 
267
268
 
268
- def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[Any]":
269
+ def execute_workflow_by_id(
270
+ dbos: "DBOS", workflow_id: str, startNew: bool = False
271
+ ) -> "WorkflowHandle[Any]":
269
272
  status = dbos._sys_db.get_workflow_status(workflow_id)
270
273
  if not status:
271
274
  raise DBOSRecoveryError(workflow_id, "Workflow status not found")
@@ -292,7 +295,8 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
292
295
  workflow_id,
293
296
  f"Cannot execute workflow because instance '{iname}' is not registered",
294
297
  )
295
- with SetWorkflowID(workflow_id):
298
+
299
+ if startNew:
296
300
  return start_workflow(
297
301
  dbos,
298
302
  wf_func,
@@ -302,6 +306,17 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
302
306
  *inputs["args"],
303
307
  **inputs["kwargs"],
304
308
  )
309
+ else:
310
+ with SetWorkflowID(workflow_id):
311
+ return start_workflow(
312
+ dbos,
313
+ wf_func,
314
+ status["queue_name"],
315
+ True,
316
+ dbos._registry.instance_info_map[iname],
317
+ *inputs["args"],
318
+ **inputs["kwargs"],
319
+ )
305
320
  elif status["class_name"] is not None:
306
321
  class_name = status["class_name"]
307
322
  if class_name not in dbos._registry.class_info_map:
@@ -309,7 +324,8 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
309
324
  workflow_id,
310
325
  f"Cannot execute workflow because class '{class_name}' is not registered",
311
326
  )
312
- with SetWorkflowID(workflow_id):
327
+
328
+ if startNew:
313
329
  return start_workflow(
314
330
  dbos,
315
331
  wf_func,
@@ -319,8 +335,19 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
319
335
  *inputs["args"],
320
336
  **inputs["kwargs"],
321
337
  )
338
+ else:
339
+ with SetWorkflowID(workflow_id):
340
+ return start_workflow(
341
+ dbos,
342
+ wf_func,
343
+ status["queue_name"],
344
+ True,
345
+ dbos._registry.class_info_map[class_name],
346
+ *inputs["args"],
347
+ **inputs["kwargs"],
348
+ )
322
349
  else:
323
- with SetWorkflowID(workflow_id):
350
+ if startNew:
324
351
  return start_workflow(
325
352
  dbos,
326
353
  wf_func,
@@ -329,6 +356,16 @@ def execute_workflow_by_id(dbos: "DBOS", workflow_id: str) -> "WorkflowHandle[An
329
356
  *inputs["args"],
330
357
  **inputs["kwargs"],
331
358
  )
359
+ else:
360
+ with SetWorkflowID(workflow_id):
361
+ return start_workflow(
362
+ dbos,
363
+ wf_func,
364
+ status["queue_name"],
365
+ True,
366
+ *inputs["args"],
367
+ **inputs["kwargs"],
368
+ )
332
369
 
333
370
 
334
371
  @overload
@@ -422,6 +459,9 @@ def start_workflow(
422
459
  or wf_status == WorkflowStatusString.ERROR.value
423
460
  or wf_status == WorkflowStatusString.SUCCESS.value
424
461
  ):
462
+ dbos.logger.debug(
463
+ f"Workflow {new_wf_id} already completed with status {wf_status}. Directly returning a workflow handle."
464
+ )
425
465
  return WorkflowHandlePolling(new_wf_id, dbos)
426
466
 
427
467
  if fself is not None:
@@ -494,7 +534,7 @@ def workflow_wrapper(
494
534
  temp_wf_type=get_temp_workflow_type(func),
495
535
  max_recovery_attempts=max_recovery_attempts,
496
536
  )
497
-
537
+ # TODO: maybe modify the parameters if they've been changed by `_init_workflow`
498
538
  dbos.logger.debug(
499
539
  f"Running workflow, id: {ctx.workflow_id}, name: {get_dbos_func_name(func)}"
500
540
  )
@@ -56,6 +56,7 @@ from ._registrations import (
56
56
  )
57
57
  from ._roles import default_required_roles, required_roles
58
58
  from ._scheduler import ScheduledWorkflow, scheduled
59
+ from ._sys_db import WorkflowStatusString
59
60
  from ._tracer import dbos_tracer
60
61
 
61
62
  if TYPE_CHECKING:
@@ -231,6 +232,7 @@ class DBOS:
231
232
  f"DBOS configured multiple times with conflicting information"
232
233
  )
233
234
  config = _dbos_global_registry.config
235
+
234
236
  _dbos_global_instance = super().__new__(cls)
235
237
  _dbos_global_instance.__init__(fastapi=fastapi, config=config, flask=flask) # type: ignore
236
238
  else:
@@ -767,6 +769,11 @@ class DBOS:
767
769
  """Execute a workflow by ID (for recovery)."""
768
770
  return execute_workflow_by_id(_get_dbos_instance(), workflow_id)
769
771
 
772
+ @classmethod
773
+ def restart_workflow(cls, workflow_id: str) -> None:
774
+ """Execute a workflow by ID (for recovery)."""
775
+ execute_workflow_by_id(_get_dbos_instance(), workflow_id, True)
776
+
770
777
  @classmethod
771
778
  def recover_pending_workflows(
772
779
  cls, executor_ids: List[str] = ["local"]
@@ -774,6 +781,18 @@ class DBOS:
774
781
  """Find all PENDING workflows and execute them."""
775
782
  return recover_pending_workflows(_get_dbos_instance(), executor_ids)
776
783
 
784
+ @classmethod
785
+ def cancel_workflow(cls, workflow_id: str) -> None:
786
+ """Cancel a workflow by ID."""
787
+ _get_dbos_instance()._sys_db.set_workflow_status(
788
+ workflow_id, WorkflowStatusString.CANCELLED, False
789
+ )
790
+
791
+ @classmethod
792
+ def resume_workflow(cls, workflow_id: str) -> None:
793
+ """Resume a workflow by ID."""
794
+ execute_workflow_by_id(_get_dbos_instance(), workflow_id, False)
795
+
777
796
  @classproperty
778
797
  def logger(cls) -> Logger:
779
798
  """Return the DBOS `Logger` for the current context."""
@@ -35,6 +35,7 @@ class DBOSErrorCode(Enum):
35
35
  DeadLetterQueueError = 6
36
36
  MaxStepRetriesExceeded = 7
37
37
  NotAuthorized = 8
38
+ ConflictingWorkflowError = 9
38
39
 
39
40
 
40
41
  class DBOSWorkflowConflictIDError(DBOSException):
@@ -47,6 +48,16 @@ class DBOSWorkflowConflictIDError(DBOSException):
47
48
  )
48
49
 
49
50
 
51
+ class DBOSConflictingWorkflowError(DBOSException):
52
+ """Exception raised different workflows started with the same workflow ID."""
53
+
54
+ def __init__(self, workflow_id: str, message: Optional[str] = None):
55
+ super().__init__(
56
+ f"Conflicting workflow invocation with the same ID ({workflow_id}): {message}",
57
+ dbos_error_code=DBOSErrorCode.ConflictingWorkflowError.value,
58
+ )
59
+
60
+
50
61
  class DBOSRecoveryError(DBOSException):
51
62
  """Exception raised when a workflow recovery fails."""
52
63
 
@@ -94,7 +94,11 @@ def setup_fastapi_middleware(app: FastAPI, dbos: DBOS) -> None:
94
94
  with EnterDBOSHandler(attributes):
95
95
  ctx = assert_current_dbos_context()
96
96
  ctx.request = _make_request(request)
97
- workflow_id = request.headers.get("dbos-idempotency-key", "")
98
- with SetWorkflowID(workflow_id):
97
+ workflow_id = request.headers.get("dbos-idempotency-key")
98
+ if workflow_id is not None:
99
+ # Set the workflow ID for the handler
100
+ with SetWorkflowID(workflow_id):
101
+ response = await call_next(request)
102
+ else:
99
103
  response = await call_next(request)
100
104
  return response
@@ -34,8 +34,12 @@ class FlaskMiddleware:
34
34
  with EnterDBOSHandler(attributes):
35
35
  ctx = assert_current_dbos_context()
36
36
  ctx.request = _make_request(request)
37
- workflow_id = request.headers.get("dbos-idempotency-key", "")
38
- with SetWorkflowID(workflow_id):
37
+ workflow_id = request.headers.get("dbos-idempotency-key")
38
+ if workflow_id is not None:
39
+ # Set the workflow ID for the handler
40
+ with SetWorkflowID(workflow_id):
41
+ response = self.app(environ, start_response)
42
+ else:
39
43
  response = self.app(environ, start_response)
40
44
  return response
41
45
 
@@ -1,3 +1,4 @@
1
+ import re
1
2
  import threading
2
3
  from typing import TYPE_CHECKING, Any, Callable, NoReturn
3
4
 
@@ -19,6 +20,14 @@ _kafka_queue: Queue
19
20
  _in_order_kafka_queues: dict[str, Queue] = {}
20
21
 
21
22
 
23
+ def safe_group_name(method_name: str, topics: list[str]) -> str:
24
+ safe_group_id = "-".join(
25
+ re.sub(r"[^a-zA-Z0-9\-]", "", str(r)) for r in [method_name, *topics]
26
+ )
27
+
28
+ return f"dbos-kafka-group-{safe_group_id}"[:255]
29
+
30
+
22
31
  def _kafka_consumer_loop(
23
32
  func: _KafkaConsumerWorkflow,
24
33
  config: dict[str, Any],
@@ -34,6 +43,12 @@ def _kafka_consumer_loop(
34
43
  if "auto.offset.reset" not in config:
35
44
  config["auto.offset.reset"] = "earliest"
36
45
 
46
+ if config.get("group.id") is None:
47
+ config["group.id"] = safe_group_name(func.__qualname__, topics)
48
+ dbos_logger.warning(
49
+ f"Consumer group ID not found. Using generated group.id {config['group.id']}"
50
+ )
51
+
37
52
  consumer = Consumer(config)
38
53
  try:
39
54
  consumer.subscribe(topics)
@@ -71,8 +86,9 @@ def _kafka_consumer_loop(
71
86
  topic=cmsg.topic(),
72
87
  value=cmsg.value(),
73
88
  )
89
+ groupID = config.get("group.id")
74
90
  with SetWorkflowID(
75
- f"kafka-unique-id-{msg.topic}-{msg.partition}-{msg.offset}"
91
+ f"kafka-unique-id-{msg.topic}-{msg.partition}-{groupID}-{msg.offset}"
76
92
  ):
77
93
  if in_order:
78
94
  assert msg.topic is not None
@@ -36,6 +36,7 @@ class Queue:
36
36
  name: str,
37
37
  concurrency: Optional[int] = None,
38
38
  limiter: Optional[QueueRateLimit] = None,
39
+ *, # Disable positional arguments from here on
39
40
  worker_concurrency: Optional[int] = None,
40
41
  ) -> None:
41
42
  if (