orchestrator-core 4.6.5__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orchestrator/__init__.py +1 -1
- orchestrator/api/api_v1/api.py +4 -0
- orchestrator/api/api_v1/endpoints/processes.py +25 -9
- orchestrator/api/api_v1/endpoints/schedules.py +44 -0
- orchestrator/app.py +34 -1
- orchestrator/cli/scheduler.py +126 -11
- orchestrator/cli/search/resize_embedding.py +3 -0
- orchestrator/db/models.py +26 -0
- orchestrator/graphql/schemas/process.py +2 -2
- orchestrator/graphql/schemas/workflow.py +1 -1
- orchestrator/llm_settings.py +0 -1
- orchestrator/migrations/versions/schema/2020-10-19_a76b9185b334_add_generic_workflows_to_core.py +1 -0
- orchestrator/migrations/versions/schema/2021-04-06_3c8b9185c221_add_validate_products_task.py +1 -0
- orchestrator/migrations/versions/schema/2025-11-18_961eddbd4c13_create_linker_table_workflow_apscheduler.py +106 -0
- orchestrator/migrations/versions/schema/2025-12-10_9736496e3eba_set_is_task_true_on_certain_tasks.py +40 -0
- orchestrator/schedules/__init__.py +8 -7
- orchestrator/schedules/scheduler.py +27 -1
- orchestrator/schedules/scheduling.py +5 -1
- orchestrator/schedules/service.py +253 -0
- orchestrator/schemas/schedules.py +71 -0
- orchestrator/search/agent/prompts.py +10 -6
- orchestrator/search/agent/tools.py +55 -15
- orchestrator/search/aggregations/base.py +6 -2
- orchestrator/search/query/builder.py +75 -3
- orchestrator/search/query/mixins.py +57 -2
- orchestrator/search/query/queries.py +15 -1
- orchestrator/search/query/validation.py +43 -0
- orchestrator/services/processes.py +0 -7
- orchestrator/services/workflows.py +4 -0
- orchestrator/settings.py +48 -0
- orchestrator/utils/auth.py +2 -2
- orchestrator/websocket/__init__.py +14 -0
- orchestrator/workflow.py +1 -1
- orchestrator/workflows/__init__.py +1 -0
- orchestrator/workflows/modify_note.py +10 -1
- orchestrator/workflows/removed_workflow.py +8 -1
- orchestrator/workflows/tasks/cleanup_tasks_log.py +9 -2
- orchestrator/workflows/tasks/resume_workflows.py +4 -0
- orchestrator/workflows/tasks/validate_product_type.py +7 -1
- orchestrator/workflows/tasks/validate_products.py +9 -1
- orchestrator/{schedules → workflows/tasks}/validate_subscriptions.py +16 -3
- orchestrator/workflows/translations/en-GB.json +2 -1
- {orchestrator_core-4.6.5.dist-info → orchestrator_core-4.7.0.dist-info}/METADATA +11 -11
- {orchestrator_core-4.6.5.dist-info → orchestrator_core-4.7.0.dist-info}/RECORD +46 -43
- orchestrator/schedules/resume_workflows.py +0 -21
- orchestrator/schedules/task_vacuum.py +0 -21
- {orchestrator_core-4.6.5.dist-info → orchestrator_core-4.7.0.dist-info}/WHEEL +0 -0
- {orchestrator_core-4.6.5.dist-info → orchestrator_core-4.7.0.dist-info}/licenses/LICENSE +0 -0
orchestrator/__init__.py
CHANGED
orchestrator/api/api_v1/api.py
CHANGED
|
@@ -22,6 +22,7 @@ from orchestrator.api.api_v1.endpoints import (
|
|
|
22
22
|
product_blocks,
|
|
23
23
|
products,
|
|
24
24
|
resource_types,
|
|
25
|
+
schedules,
|
|
25
26
|
settings,
|
|
26
27
|
subscription_customer_descriptions,
|
|
27
28
|
subscriptions,
|
|
@@ -88,6 +89,9 @@ api_router.include_router(
|
|
|
88
89
|
api_router.include_router(
|
|
89
90
|
ws.router, prefix="/ws", tags=["Core", "Events"]
|
|
90
91
|
) # Auth on the websocket is handled in the Websocket Manager
|
|
92
|
+
api_router.include_router(
|
|
93
|
+
schedules.router, prefix="/schedules", tags=["Core", "Schedules"], dependencies=[Depends(authorize)]
|
|
94
|
+
)
|
|
91
95
|
|
|
92
96
|
if llm_settings.SEARCH_ENABLED:
|
|
93
97
|
from orchestrator.api.api_v1.endpoints import search
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
|
|
14
14
|
"""Module that implements process related API endpoints."""
|
|
15
15
|
|
|
16
|
+
import asyncio
|
|
16
17
|
import struct
|
|
17
18
|
import zlib
|
|
18
19
|
from http import HTTPStatus
|
|
@@ -62,10 +63,12 @@ from orchestrator.utils.enrich_process import enrich_process
|
|
|
62
63
|
from orchestrator.websocket import (
|
|
63
64
|
WS_CHANNELS,
|
|
64
65
|
broadcast_invalidate_status_counts,
|
|
66
|
+
broadcast_invalidate_status_counts_async,
|
|
65
67
|
broadcast_process_update_to_websocket,
|
|
66
68
|
websocket_manager,
|
|
67
69
|
)
|
|
68
70
|
from orchestrator.workflow import ProcessStat, ProcessStatus, StepList, Workflow
|
|
71
|
+
from orchestrator.workflows import get_workflow
|
|
69
72
|
from pydantic_forms.types import JSON, State
|
|
70
73
|
|
|
71
74
|
router = APIRouter()
|
|
@@ -175,7 +178,7 @@ def delete(process_id: UUID) -> None:
|
|
|
175
178
|
status_code=HTTPStatus.CREATED,
|
|
176
179
|
dependencies=[Depends(check_global_lock, use_cache=False)],
|
|
177
180
|
)
|
|
178
|
-
def new_process(
|
|
181
|
+
async def new_process(
|
|
179
182
|
workflow_key: str,
|
|
180
183
|
request: Request,
|
|
181
184
|
json_data: list[dict[str, Any]] | None = Body(...),
|
|
@@ -183,8 +186,21 @@ def new_process(
|
|
|
183
186
|
user_model: OIDCUserModel | None = Depends(authenticate),
|
|
184
187
|
) -> dict[str, UUID]:
|
|
185
188
|
broadcast_func = api_broadcast_process_data(request)
|
|
186
|
-
|
|
187
|
-
|
|
189
|
+
|
|
190
|
+
workflow = get_workflow(workflow_key)
|
|
191
|
+
if not workflow:
|
|
192
|
+
raise_status(HTTPStatus.NOT_FOUND, "Workflow does not exist")
|
|
193
|
+
|
|
194
|
+
if not await workflow.authorize_callback(user_model):
|
|
195
|
+
raise_status(HTTPStatus.FORBIDDEN, f"User is not authorized to execute '{workflow_key}' workflow")
|
|
196
|
+
|
|
197
|
+
process_id = await asyncio.to_thread(
|
|
198
|
+
start_process,
|
|
199
|
+
workflow_key,
|
|
200
|
+
user_inputs=json_data,
|
|
201
|
+
user_model=user_model,
|
|
202
|
+
user=user,
|
|
203
|
+
broadcast_func=broadcast_func,
|
|
188
204
|
)
|
|
189
205
|
|
|
190
206
|
return {"id": process_id}
|
|
@@ -196,14 +212,14 @@ def new_process(
|
|
|
196
212
|
status_code=HTTPStatus.NO_CONTENT,
|
|
197
213
|
dependencies=[Depends(check_global_lock, use_cache=False)],
|
|
198
214
|
)
|
|
199
|
-
def resume_process_endpoint(
|
|
215
|
+
async def resume_process_endpoint(
|
|
200
216
|
process_id: UUID,
|
|
201
217
|
request: Request,
|
|
202
218
|
json_data: JSON = Body(...),
|
|
203
219
|
user: str = Depends(user_name),
|
|
204
220
|
user_model: OIDCUserModel | None = Depends(authenticate),
|
|
205
221
|
) -> None:
|
|
206
|
-
process = _get_process
|
|
222
|
+
process = await asyncio.to_thread(_get_process, process_id)
|
|
207
223
|
|
|
208
224
|
if not can_be_resumed(process.last_status):
|
|
209
225
|
raise_status(HTTPStatus.CONFLICT, f"Resuming a {process.last_status.lower()} workflow is not possible")
|
|
@@ -211,16 +227,16 @@ def resume_process_endpoint(
|
|
|
211
227
|
pstat = load_process(process)
|
|
212
228
|
auth_resume, auth_retry = get_auth_callbacks(get_steps_to_evaluate_for_rbac(pstat), pstat.workflow)
|
|
213
229
|
if process.last_status == ProcessStatus.SUSPENDED:
|
|
214
|
-
if auth_resume is not None and not auth_resume(user_model):
|
|
230
|
+
if auth_resume is not None and not (await auth_resume(user_model)):
|
|
215
231
|
raise_status(HTTPStatus.FORBIDDEN, "User is not authorized to resume step")
|
|
216
232
|
elif process.last_status in (ProcessStatus.FAILED, ProcessStatus.WAITING):
|
|
217
|
-
if auth_retry is not None and not auth_retry(user_model):
|
|
233
|
+
if auth_retry is not None and not (await auth_retry(user_model)):
|
|
218
234
|
raise_status(HTTPStatus.FORBIDDEN, "User is not authorized to retry step")
|
|
219
235
|
|
|
220
|
-
|
|
236
|
+
await broadcast_invalidate_status_counts_async()
|
|
221
237
|
broadcast_func = api_broadcast_process_data(request)
|
|
222
238
|
|
|
223
|
-
resume_process
|
|
239
|
+
await asyncio.to_thread(resume_process, process, user=user, user_inputs=json_data, broadcast_func=broadcast_func)
|
|
224
240
|
|
|
225
241
|
|
|
226
242
|
@router.post(
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# Copyright 2019-2025 SURF.
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
from http import HTTPStatus
|
|
14
|
+
|
|
15
|
+
import structlog
|
|
16
|
+
from fastapi.routing import APIRouter
|
|
17
|
+
|
|
18
|
+
from orchestrator.schedules.service import add_scheduled_task_to_queue
|
|
19
|
+
from orchestrator.schemas.schedules import APSchedulerJobCreate, APSchedulerJobDelete, APSchedulerJobUpdate
|
|
20
|
+
|
|
21
|
+
logger = structlog.get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
router: APIRouter = APIRouter()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@router.post("/", status_code=HTTPStatus.CREATED)
|
|
27
|
+
def create_scheduled_task(payload: APSchedulerJobCreate) -> dict[str, str]:
|
|
28
|
+
"""Create a scheduled task."""
|
|
29
|
+
add_scheduled_task_to_queue(payload)
|
|
30
|
+
return {"message": "Added to Create Queue", "status": "CREATED"}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@router.put("/", status_code=HTTPStatus.OK)
|
|
34
|
+
async def update_scheduled_task(payload: APSchedulerJobUpdate) -> dict[str, str]:
|
|
35
|
+
"""Update a scheduled task."""
|
|
36
|
+
add_scheduled_task_to_queue(payload)
|
|
37
|
+
return {"message": "Added to Update Queue", "status": "UPDATED"}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@router.delete("/", status_code=HTTPStatus.OK)
|
|
41
|
+
async def delete_scheduled_task(payload: APSchedulerJobDelete) -> dict[str, str]:
|
|
42
|
+
"""Delete a scheduled task."""
|
|
43
|
+
add_scheduled_task_to_queue(payload)
|
|
44
|
+
return {"message": "Added to Delete Queue", "status": "DELETED"}
|
orchestrator/app.py
CHANGED
|
@@ -57,7 +57,8 @@ from orchestrator.graphql.types import ScalarOverrideType, StrawberryModelType
|
|
|
57
57
|
from orchestrator.log_config import LOGGER_OVERRIDES
|
|
58
58
|
from orchestrator.metrics import ORCHESTRATOR_METRICS_REGISTRY, initialize_default_metrics
|
|
59
59
|
from orchestrator.services.process_broadcast_thread import ProcessDataBroadcastThread
|
|
60
|
-
from orchestrator.settings import AppSettings, ExecutorType, app_settings
|
|
60
|
+
from orchestrator.settings import AppSettings, ExecutorType, app_settings, get_authorizers
|
|
61
|
+
from orchestrator.utils.auth import Authorizer
|
|
61
62
|
from orchestrator.version import GIT_COMMIT_HASH
|
|
62
63
|
from orchestrator.websocket import init_websocket_manager
|
|
63
64
|
from pydantic_forms.exception_handlers.fastapi import form_error_handler
|
|
@@ -311,6 +312,38 @@ class OrchestratorCore(FastAPI):
|
|
|
311
312
|
"""
|
|
312
313
|
self.auth_manager.graphql_authorization = graphql_authorization_instance
|
|
313
314
|
|
|
315
|
+
def register_internal_authorize_callback(self, callback: Authorizer) -> None:
|
|
316
|
+
"""Registers the authorize_callback for WFO's internal workflows and tasks.
|
|
317
|
+
|
|
318
|
+
Since RBAC policies are applied to workflows via decorator, this enables registration of callbacks
|
|
319
|
+
for workflows defined in orchestrator-core itself.
|
|
320
|
+
However, this assignment MUST be made before any workflows are run.
|
|
321
|
+
|
|
322
|
+
Args:
|
|
323
|
+
callback (Authorizer): The async Authorizer to run for the `authorize_callback` argument of internal workflows.
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
None
|
|
327
|
+
"""
|
|
328
|
+
authorizers = get_authorizers()
|
|
329
|
+
authorizers.internal_authorize_callback = callback
|
|
330
|
+
|
|
331
|
+
def register_internal_retry_auth_callback(self, callback: Authorizer) -> None:
|
|
332
|
+
"""Registers the retry_auth_callback for WFO's internal workflows and tasks.
|
|
333
|
+
|
|
334
|
+
Since RBAC policies are applied to workflows via decorator, this enables registration of callbacks
|
|
335
|
+
for workflows defined in orchestrator-core itself.
|
|
336
|
+
However, this assignment MUST be made before any workflows are run.
|
|
337
|
+
|
|
338
|
+
Args:
|
|
339
|
+
callback (Authorizer): The async Authorizer to run for the `retry_auth_callback` argument of internal workflows.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
None
|
|
343
|
+
"""
|
|
344
|
+
authorizers = get_authorizers()
|
|
345
|
+
authorizers.internal_retry_auth_callback = callback
|
|
346
|
+
|
|
314
347
|
|
|
315
348
|
main_typer_app = typer.Typer()
|
|
316
349
|
main_typer_app.add_typer(cli_app, name="orchestrator", help="The orchestrator CLI commands")
|
orchestrator/cli/scheduler.py
CHANGED
|
@@ -10,42 +10,104 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
|
|
14
|
-
|
|
15
13
|
import time
|
|
14
|
+
from typing import cast
|
|
16
15
|
|
|
17
16
|
import typer
|
|
17
|
+
from redis import Redis
|
|
18
18
|
|
|
19
19
|
from orchestrator.schedules.scheduler import (
|
|
20
20
|
get_all_scheduler_tasks,
|
|
21
21
|
get_scheduler,
|
|
22
22
|
get_scheduler_task,
|
|
23
23
|
)
|
|
24
|
+
from orchestrator.schedules.service import (
|
|
25
|
+
SCHEDULER_QUEUE,
|
|
26
|
+
add_scheduled_task_to_queue,
|
|
27
|
+
workflow_scheduler_queue,
|
|
28
|
+
)
|
|
29
|
+
from orchestrator.schemas.schedules import APSchedulerJobCreate
|
|
30
|
+
from orchestrator.services.workflows import get_workflow_by_name
|
|
31
|
+
from orchestrator.settings import app_settings
|
|
32
|
+
from orchestrator.utils.redis_client import create_redis_client
|
|
24
33
|
|
|
25
34
|
app: typer.Typer = typer.Typer()
|
|
26
35
|
|
|
27
36
|
|
|
28
37
|
@app.command()
|
|
29
38
|
def run() -> None:
|
|
30
|
-
"""
|
|
31
|
-
|
|
32
|
-
|
|
39
|
+
"""Starts the scheduler in the foreground.
|
|
40
|
+
|
|
41
|
+
While running, this process will:
|
|
42
|
+
|
|
43
|
+
* Periodically wake up when the next schedule is due for execution, and run it
|
|
44
|
+
* Process schedule changes made through the schedule API
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def _get_scheduled_task_item_from_queue(redis_conn: Redis) -> tuple[str, bytes] | None:
|
|
48
|
+
"""Get an item from the Redis Queue for scheduler tasks."""
|
|
49
|
+
try:
|
|
50
|
+
return redis_conn.brpop(SCHEDULER_QUEUE, timeout=1)
|
|
51
|
+
except ConnectionError as e:
|
|
52
|
+
typer.echo(f"There was a connection error with Redis. Retrying in 3 seconds... {e}")
|
|
53
|
+
time.sleep(3)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
typer.echo(f"There was an unexpected error with Redis. Retrying in 1 second... {e}")
|
|
33
56
|
time.sleep(1)
|
|
34
57
|
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
with get_scheduler() as scheduler_connection:
|
|
61
|
+
redis_connection = create_redis_client(app_settings.CACHE_URI)
|
|
62
|
+
while True:
|
|
63
|
+
item = _get_scheduled_task_item_from_queue(redis_connection)
|
|
64
|
+
if not item:
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
workflow_scheduler_queue(item, scheduler_connection)
|
|
68
|
+
|
|
35
69
|
|
|
36
70
|
@app.command()
|
|
37
71
|
def show_schedule() -> None:
|
|
38
|
-
"""
|
|
72
|
+
"""The `show-schedule` command shows an overview of the scheduled jobs."""
|
|
73
|
+
from rich.console import Console
|
|
74
|
+
from rich.table import Table
|
|
39
75
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
76
|
+
from orchestrator.schedules.service import get_linker_entries_by_schedule_ids
|
|
77
|
+
|
|
78
|
+
console = Console()
|
|
79
|
+
|
|
80
|
+
table = Table(title="Scheduled Tasks")
|
|
81
|
+
table.add_column("id", no_wrap=True)
|
|
82
|
+
table.add_column("name")
|
|
83
|
+
table.add_column("source")
|
|
84
|
+
table.add_column("next run time")
|
|
85
|
+
table.add_column("trigger")
|
|
86
|
+
|
|
87
|
+
scheduled_tasks = get_all_scheduler_tasks()
|
|
88
|
+
_schedule_ids = [task.id for task in scheduled_tasks]
|
|
89
|
+
api_managed = {str(i.schedule_id) for i in get_linker_entries_by_schedule_ids(_schedule_ids)}
|
|
90
|
+
|
|
91
|
+
for task in scheduled_tasks:
|
|
92
|
+
source = "API" if task.id in api_managed else "decorator"
|
|
93
|
+
run_time = str(task.next_run_time.replace(microsecond=0))
|
|
94
|
+
table.add_row(task.id, task.name, source, str(run_time), str(task.trigger))
|
|
95
|
+
|
|
96
|
+
console.print(table)
|
|
44
97
|
|
|
45
98
|
|
|
46
99
|
@app.command()
|
|
47
100
|
def force(task_id: str) -> None:
|
|
48
|
-
"""Force the execution of (a) scheduler(s) based on a
|
|
101
|
+
"""Force the execution of (a) scheduler(s) based on a schedule ID.
|
|
102
|
+
|
|
103
|
+
Use the `show-schedule` command to determine the ID of the schedule to execute.
|
|
104
|
+
|
|
105
|
+
CLI Arguments:
|
|
106
|
+
```sh
|
|
107
|
+
Arguments:
|
|
108
|
+
SCHEDULE_ID ID of the schedule to execute
|
|
109
|
+
```
|
|
110
|
+
"""
|
|
49
111
|
task = get_scheduler_task(task_id)
|
|
50
112
|
|
|
51
113
|
if not task:
|
|
@@ -59,3 +121,56 @@ def force(task_id: str) -> None:
|
|
|
59
121
|
except Exception as e:
|
|
60
122
|
typer.echo(f"Task execution failed: {e}")
|
|
61
123
|
raise typer.Exit(code=1)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
@app.command()
|
|
127
|
+
def load_initial_schedule() -> None:
|
|
128
|
+
"""The `load-initial-schedule` command loads the initial schedule using the scheduler API.
|
|
129
|
+
|
|
130
|
+
The initial schedules are:
|
|
131
|
+
- Task Resume Workflows
|
|
132
|
+
- Task Clean Up Tasks
|
|
133
|
+
- Task Validate Subscriptions
|
|
134
|
+
|
|
135
|
+
!!! Warning
|
|
136
|
+
This command is not idempotent.
|
|
137
|
+
|
|
138
|
+
Please run `show-schedule` first to determine if the schedules already exist.
|
|
139
|
+
"""
|
|
140
|
+
initial_schedules = [
|
|
141
|
+
{
|
|
142
|
+
"name": "Task Resume Workflows",
|
|
143
|
+
"workflow_name": "task_resume_workflows",
|
|
144
|
+
"workflow_id": "",
|
|
145
|
+
"trigger": "interval",
|
|
146
|
+
"trigger_kwargs": {"hours": 1},
|
|
147
|
+
},
|
|
148
|
+
{
|
|
149
|
+
"name": "Task Clean Up Tasks",
|
|
150
|
+
"workflow_name": "task_clean_up_tasks",
|
|
151
|
+
"workflow_id": "",
|
|
152
|
+
"trigger": "interval",
|
|
153
|
+
"trigger_kwargs": {"hours": 6},
|
|
154
|
+
},
|
|
155
|
+
{
|
|
156
|
+
"name": "Task Validate Subscriptions",
|
|
157
|
+
"workflow_name": "task_validate_subscriptions",
|
|
158
|
+
"workflow_id": "",
|
|
159
|
+
"trigger": "cron",
|
|
160
|
+
"trigger_kwargs": {"hour": 0, "minute": 10},
|
|
161
|
+
},
|
|
162
|
+
]
|
|
163
|
+
|
|
164
|
+
for schedule in initial_schedules:
|
|
165
|
+
# enrich with workflow id
|
|
166
|
+
workflow_name = cast(str, schedule.get("workflow_name"))
|
|
167
|
+
workflow = get_workflow_by_name(workflow_name)
|
|
168
|
+
|
|
169
|
+
if not workflow:
|
|
170
|
+
typer.echo(f"Workflow '{schedule['workflow_name']}' not found. Skipping schedule.")
|
|
171
|
+
continue
|
|
172
|
+
|
|
173
|
+
schedule["workflow_id"] = workflow.workflow_id
|
|
174
|
+
|
|
175
|
+
typer.echo(f"Initial Schedule: {schedule}")
|
|
176
|
+
add_scheduled_task_to_queue(APSchedulerJobCreate(**schedule)) # type: ignore
|
|
@@ -75,6 +75,9 @@ def alter_embedding_column_dimension(new_dimension: int) -> None:
|
|
|
75
75
|
db.session.execute(text(f"ALTER TABLE search_queries ADD COLUMN query_embedding vector({new_dimension})"))
|
|
76
76
|
|
|
77
77
|
db.session.commit()
|
|
78
|
+
|
|
79
|
+
db.session.close()
|
|
80
|
+
|
|
78
81
|
logger.info(f"Altered embedding columns to dimension {new_dimension} in ai_search_index and search_queries")
|
|
79
82
|
|
|
80
83
|
except SQLAlchemyError as e:
|
orchestrator/db/models.py
CHANGED
|
@@ -29,9 +29,11 @@ from sqlalchemy import (
|
|
|
29
29
|
CheckConstraint,
|
|
30
30
|
Column,
|
|
31
31
|
Enum,
|
|
32
|
+
Float,
|
|
32
33
|
ForeignKey,
|
|
33
34
|
Index,
|
|
34
35
|
Integer,
|
|
36
|
+
LargeBinary,
|
|
35
37
|
PrimaryKeyConstraint,
|
|
36
38
|
Select,
|
|
37
39
|
String,
|
|
@@ -796,3 +798,27 @@ class AiSearchIndex(BaseModel):
|
|
|
796
798
|
content_hash = mapped_column(String(64), nullable=False, index=True)
|
|
797
799
|
|
|
798
800
|
__table_args__ = (PrimaryKeyConstraint("entity_id", "path", name="pk_ai_search_index"),)
|
|
801
|
+
|
|
802
|
+
|
|
803
|
+
class APSchedulerJobStoreModel(BaseModel):
|
|
804
|
+
__tablename__ = "apscheduler_jobs"
|
|
805
|
+
|
|
806
|
+
id = mapped_column(String(191), primary_key=True)
|
|
807
|
+
next_run_time = mapped_column(Float, nullable=True)
|
|
808
|
+
job_state = mapped_column(LargeBinary, nullable=False)
|
|
809
|
+
|
|
810
|
+
|
|
811
|
+
class WorkflowApschedulerJob(BaseModel):
|
|
812
|
+
__tablename__ = "workflows_apscheduler_jobs"
|
|
813
|
+
|
|
814
|
+
workflow_id = mapped_column(
|
|
815
|
+
UUIDType, ForeignKey("workflows.workflow_id", ondelete="CASCADE"), primary_key=True, nullable=False
|
|
816
|
+
)
|
|
817
|
+
|
|
818
|
+
# Notice the VARCHAR(512) for schedule_id to accommodate longer IDs so
|
|
819
|
+
# that if APScheduler changes its ID format in the future, we are covered.
|
|
820
|
+
schedule_id = mapped_column(
|
|
821
|
+
String(512), ForeignKey("apscheduler_jobs.id", ondelete="CASCADE"), primary_key=True, nullable=False
|
|
822
|
+
)
|
|
823
|
+
|
|
824
|
+
__table_args__ = (UniqueConstraint("workflow_id", "schedule_id", name="uq_workflow_schedule"),)
|
|
@@ -89,8 +89,8 @@ class ProcessType:
|
|
|
89
89
|
auth_resume, auth_retry = get_auth_callbacks(get_steps_to_evaluate_for_rbac(process), workflow)
|
|
90
90
|
|
|
91
91
|
return FormUserPermissionsType(
|
|
92
|
-
retryAllowed=bool(auth_retry and auth_retry(oidc_user)),
|
|
93
|
-
resumeAllowed=bool(auth_resume and auth_resume(oidc_user)),
|
|
92
|
+
retryAllowed=bool(auth_retry and await auth_retry(oidc_user)),
|
|
93
|
+
resumeAllowed=bool(auth_resume and await auth_resume(oidc_user)),
|
|
94
94
|
)
|
|
95
95
|
|
|
96
96
|
@authenticated_field(description="Returns list of subscriptions of the process") # type: ignore
|
orchestrator/llm_settings.py
CHANGED
orchestrator/migrations/versions/schema/2020-10-19_a76b9185b334_add_generic_workflows_to_core.py
CHANGED
|
@@ -17,6 +17,7 @@ down_revision = "c112305b07d3"
|
|
|
17
17
|
branch_labels = None
|
|
18
18
|
depends_on = None
|
|
19
19
|
|
|
20
|
+
# NOTE: this migration forgot to insert these workflows with is_task=true. Make sure to correct that if you copy this.
|
|
20
21
|
workflows = [
|
|
21
22
|
{"name": "modify_note", "description": "Modify Note", "workflow_id": uuid4(), "target": "MODIFY"},
|
|
22
23
|
{"name": "task_clean_up_tasks", "description": "Clean up old tasks", "workflow_id": uuid4(), "target": "SYSTEM"},
|
orchestrator/migrations/versions/schema/2021-04-06_3c8b9185c221_add_validate_products_task.py
CHANGED
|
@@ -17,6 +17,7 @@ down_revision = "3323bcb934e7"
|
|
|
17
17
|
branch_labels = None
|
|
18
18
|
depends_on = None
|
|
19
19
|
|
|
20
|
+
# NOTE: this migration forgot to insert these workflows with is_task=true. Make sure to correct that if you copy this.
|
|
20
21
|
workflows = [
|
|
21
22
|
{"name": "task_validate_products", "description": "Validate products", "workflow_id": uuid4(), "target": "SYSTEM"},
|
|
22
23
|
]
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
"""Create linker table workflow_apscheduler.
|
|
2
|
+
|
|
3
|
+
Revision ID: 961eddbd4c13
|
|
4
|
+
Revises: 850dccac3b02
|
|
5
|
+
Create Date: 2025-11-18 10:38:57.211087
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from uuid import uuid4
|
|
10
|
+
|
|
11
|
+
import sqlalchemy as sa
|
|
12
|
+
from alembic import op
|
|
13
|
+
|
|
14
|
+
# revision identifiers, used by Alembic.
|
|
15
|
+
revision = "961eddbd4c13"
|
|
16
|
+
down_revision = "850dccac3b02"
|
|
17
|
+
branch_labels = None
|
|
18
|
+
depends_on = None
|
|
19
|
+
|
|
20
|
+
# NOTE: this migration forgot to insert these workflows with is_task=true. Make sure to correct that if you copy this.
|
|
21
|
+
workflows = [
|
|
22
|
+
{
|
|
23
|
+
"name": "task_validate_subscriptions",
|
|
24
|
+
"description": "Validate subscriptions",
|
|
25
|
+
"workflow_id": uuid4(),
|
|
26
|
+
"target": "SYSTEM",
|
|
27
|
+
},
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _create_workflows() -> None:
|
|
32
|
+
conn = op.get_bind()
|
|
33
|
+
for workflow in workflows:
|
|
34
|
+
conn.execute(
|
|
35
|
+
sa.text(
|
|
36
|
+
"INSERT INTO workflows VALUES (:workflow_id, :name, :target, :description, now()) ON CONFLICT DO NOTHING"
|
|
37
|
+
),
|
|
38
|
+
workflow,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _downgrade_create_workflows() -> None:
|
|
43
|
+
conn = op.get_bind()
|
|
44
|
+
for workflow in workflows:
|
|
45
|
+
conn.execute(sa.text("DELETE FROM workflows WHERE name = :name"), {"name": workflow["name"]})
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _create_apscheduler_jobs_table_if_not_exists() -> None:
|
|
49
|
+
# Check if the apscheduler_jobs table exists and create it if it does not exist.
|
|
50
|
+
conn = op.get_bind()
|
|
51
|
+
inspector = sa.inspect(conn)
|
|
52
|
+
if "apscheduler_jobs" not in inspector.get_table_names():
|
|
53
|
+
op.execute(
|
|
54
|
+
sa.text(
|
|
55
|
+
"""
|
|
56
|
+
CREATE TABLE apscheduler_jobs
|
|
57
|
+
(
|
|
58
|
+
id VARCHAR(191) NOT NULL PRIMARY KEY,
|
|
59
|
+
next_run_time DOUBLE PRECISION,
|
|
60
|
+
job_state bytea NOT NULL
|
|
61
|
+
);
|
|
62
|
+
"""
|
|
63
|
+
)
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _create_workflows_table_if_not_exists() -> None:
|
|
68
|
+
# Notice the VARCHAR(512) for schedule_id to accommodate longer IDs
|
|
69
|
+
# This so that if APScheduler changes its ID format in the future, we are covered.
|
|
70
|
+
op.execute(
|
|
71
|
+
sa.text(
|
|
72
|
+
"""
|
|
73
|
+
CREATE TABLE workflows_apscheduler_jobs (
|
|
74
|
+
workflow_id UUID NOT NULL,
|
|
75
|
+
schedule_id VARCHAR(512) NOT NULL,
|
|
76
|
+
PRIMARY KEY (workflow_id, schedule_id),
|
|
77
|
+
CONSTRAINT fk_workflow
|
|
78
|
+
FOREIGN KEY (workflow_id) REFERENCES public.workflows (workflow_id)
|
|
79
|
+
ON DELETE CASCADE,
|
|
80
|
+
CONSTRAINT fk_schedule
|
|
81
|
+
FOREIGN KEY (schedule_id) REFERENCES public.apscheduler_jobs (id)
|
|
82
|
+
ON DELETE CASCADE,
|
|
83
|
+
CONSTRAINT uq_workflow_schedule UNIQUE (workflow_id, schedule_id)
|
|
84
|
+
);
|
|
85
|
+
"""
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
op.create_index("ix_workflows_apscheduler_jobs_schedule_id", "workflows_apscheduler_jobs", ["schedule_id"])
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def upgrade() -> None:
|
|
93
|
+
_create_apscheduler_jobs_table_if_not_exists()
|
|
94
|
+
_create_workflows_table_if_not_exists()
|
|
95
|
+
_create_workflows()
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def downgrade() -> None:
|
|
99
|
+
op.execute(
|
|
100
|
+
sa.text(
|
|
101
|
+
"""
|
|
102
|
+
DROP TABLE IF EXISTS workflows_apscheduler_jobs;
|
|
103
|
+
"""
|
|
104
|
+
)
|
|
105
|
+
)
|
|
106
|
+
_downgrade_create_workflows()
|
orchestrator/migrations/versions/schema/2025-12-10_9736496e3eba_set_is_task_true_on_certain_tasks.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Set is_task=true on certain tasks.
|
|
2
|
+
|
|
3
|
+
This is required to make them appear in the completed tasks in the UI, and for the cleanup task to be able to
|
|
4
|
+
remove them.
|
|
5
|
+
|
|
6
|
+
Revision ID: 9736496e3eba
|
|
7
|
+
Revises: 961eddbd4c13
|
|
8
|
+
Create Date: 2025-12-10 16:42:29.060382
|
|
9
|
+
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
from alembic import op
|
|
14
|
+
|
|
15
|
+
# revision identifiers, used by Alembic.
|
|
16
|
+
revision = "9736496e3eba"
|
|
17
|
+
down_revision = "961eddbd4c13"
|
|
18
|
+
branch_labels = None
|
|
19
|
+
depends_on = None
|
|
20
|
+
|
|
21
|
+
task_names = [
|
|
22
|
+
# Added in a76b9185b334
|
|
23
|
+
"task_clean_up_tasks",
|
|
24
|
+
"task_resume_workflows",
|
|
25
|
+
# Added in 3c8b9185c221
|
|
26
|
+
"task_validate_products",
|
|
27
|
+
# Added in 961eddbd4c13
|
|
28
|
+
"task_validate_subscriptions",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def upgrade() -> None:
|
|
33
|
+
conn = op.get_bind()
|
|
34
|
+
query = sa.text("UPDATE workflows SET is_task=true WHERE name = :task_name and is_task=false")
|
|
35
|
+
for task_name in task_names:
|
|
36
|
+
conn.execute(query, parameters={"task_name": task_name})
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def downgrade() -> None:
|
|
40
|
+
pass # Does not make sense to downgrade back to a 'bad' state.
|
|
@@ -10,16 +10,17 @@
|
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
+
import warnings
|
|
13
14
|
|
|
14
|
-
|
|
15
|
-
from orchestrator.schedules.resume_workflows import run_resume_workflows
|
|
16
|
-
from orchestrator.schedules.task_vacuum import vacuum_tasks
|
|
17
15
|
from orchestrator.schedules.validate_products import validate_products
|
|
18
|
-
from orchestrator.schedules.validate_subscriptions import validate_subscriptions
|
|
19
16
|
|
|
17
|
+
warnings.warn(
|
|
18
|
+
"ALL_SCHEDULERS is deprecated and will be removed in 5.0.0. "
|
|
19
|
+
"Scheduling tasks can now be handled entirely through the API. "
|
|
20
|
+
"For more details, please consult https://workfloworchestrator.org/orchestrator-core/guides/upgrading/4.7/",
|
|
21
|
+
DeprecationWarning,
|
|
22
|
+
stacklevel=2,
|
|
23
|
+
)
|
|
20
24
|
ALL_SCHEDULERS: list = [
|
|
21
|
-
run_resume_workflows,
|
|
22
|
-
vacuum_tasks,
|
|
23
|
-
validate_subscriptions,
|
|
24
25
|
validate_products,
|
|
25
26
|
]
|