orchestrator-core 4.4.1__py3-none-any.whl → 4.5.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. orchestrator/__init__.py +26 -2
  2. orchestrator/agentic_app.py +84 -0
  3. orchestrator/api/api_v1/api.py +10 -0
  4. orchestrator/api/api_v1/endpoints/search.py +277 -0
  5. orchestrator/app.py +32 -0
  6. orchestrator/cli/index_llm.py +73 -0
  7. orchestrator/cli/main.py +22 -1
  8. orchestrator/cli/resize_embedding.py +135 -0
  9. orchestrator/cli/search_explore.py +208 -0
  10. orchestrator/cli/speedtest.py +151 -0
  11. orchestrator/db/models.py +37 -1
  12. orchestrator/llm_settings.py +51 -0
  13. orchestrator/migrations/versions/schema/2025-08-12_52b37b5b2714_search_index_model_for_llm_integration.py +95 -0
  14. orchestrator/schemas/search.py +117 -0
  15. orchestrator/search/__init__.py +12 -0
  16. orchestrator/search/agent/__init__.py +8 -0
  17. orchestrator/search/agent/agent.py +47 -0
  18. orchestrator/search/agent/prompts.py +87 -0
  19. orchestrator/search/agent/state.py +8 -0
  20. orchestrator/search/agent/tools.py +236 -0
  21. orchestrator/search/core/__init__.py +0 -0
  22. orchestrator/search/core/embedding.py +64 -0
  23. orchestrator/search/core/exceptions.py +22 -0
  24. orchestrator/search/core/types.py +281 -0
  25. orchestrator/search/core/validators.py +27 -0
  26. orchestrator/search/docs/index.md +37 -0
  27. orchestrator/search/docs/running_local_text_embedding_inference.md +45 -0
  28. orchestrator/search/filters/__init__.py +27 -0
  29. orchestrator/search/filters/base.py +275 -0
  30. orchestrator/search/filters/date_filters.py +75 -0
  31. orchestrator/search/filters/definitions.py +93 -0
  32. orchestrator/search/filters/ltree_filters.py +43 -0
  33. orchestrator/search/filters/numeric_filter.py +60 -0
  34. orchestrator/search/indexing/__init__.py +3 -0
  35. orchestrator/search/indexing/indexer.py +323 -0
  36. orchestrator/search/indexing/registry.py +88 -0
  37. orchestrator/search/indexing/tasks.py +53 -0
  38. orchestrator/search/indexing/traverse.py +322 -0
  39. orchestrator/search/retrieval/__init__.py +3 -0
  40. orchestrator/search/retrieval/builder.py +113 -0
  41. orchestrator/search/retrieval/engine.py +152 -0
  42. orchestrator/search/retrieval/pagination.py +83 -0
  43. orchestrator/search/retrieval/retriever.py +447 -0
  44. orchestrator/search/retrieval/utils.py +106 -0
  45. orchestrator/search/retrieval/validation.py +174 -0
  46. orchestrator/search/schemas/__init__.py +0 -0
  47. orchestrator/search/schemas/parameters.py +116 -0
  48. orchestrator/search/schemas/results.py +64 -0
  49. orchestrator/services/settings_env_variables.py +2 -2
  50. orchestrator/settings.py +1 -1
  51. {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.0a2.dist-info}/METADATA +8 -3
  52. {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.0a2.dist-info}/RECORD +54 -11
  53. {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.0a2.dist-info}/WHEEL +0 -0
  54. {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.0a2.dist-info}/licenses/LICENSE +0 -0
orchestrator/__init__.py CHANGED
@@ -13,15 +13,39 @@
13
13
 
14
14
  """This is the orchestrator workflow engine."""
15
15
 
16
- __version__ = "4.4.1"
16
+ __version__ = "4.5.0a2"
17
17
 
18
- from orchestrator.app import OrchestratorCore
18
+
19
+ from structlog import get_logger
20
+
21
+ logger = get_logger(__name__)
22
+
23
+ logger.info("Starting the orchestrator", version=__version__)
24
+
25
+ from orchestrator.llm_settings import llm_settings
19
26
  from orchestrator.settings import app_settings
27
+
28
+ if llm_settings.LLM_ENABLED:
29
+ try:
30
+ from importlib import import_module
31
+
32
+ import_module("pydantic_ai")
33
+ from orchestrator.agentic_app import AgenticOrchestratorCore as OrchestratorCore
34
+
35
+ except ImportError:
36
+ logger.error(
37
+ "Unable to import 'pydantic_ai' module, please install the orchestrator with llm dependencies. `pip install orchestrator-core[llm]",
38
+ )
39
+ exit(1)
40
+ else:
41
+ from orchestrator.app import OrchestratorCore # type: ignore[assignment]
42
+
20
43
  from orchestrator.workflow import begin, conditional, done, focussteps, inputstep, retrystep, step, steplens, workflow
21
44
 
22
45
  __all__ = [
23
46
  "OrchestratorCore",
24
47
  "app_settings",
48
+ "llm_settings",
25
49
  "step",
26
50
  "inputstep",
27
51
  "workflow",
@@ -0,0 +1,84 @@
1
+ #!/usr/bin/env python3
2
+ """The main application module.
3
+
4
+ This module contains the main `AgenticOrchestratorCore` class for the `FastAPI` backend and
5
+ provides the ability to run the CLI.
6
+ """
7
+ # Copyright 2019-2025 SURF
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ from typing import Any
20
+
21
+ import typer
22
+ from pydantic_ai.models.openai import OpenAIModel
23
+ from pydantic_ai.toolsets import FunctionToolset
24
+ from structlog import get_logger
25
+
26
+ from orchestrator.app import OrchestratorCore
27
+ from orchestrator.cli.main import app as cli_app
28
+ from orchestrator.llm_settings import LLMSettings, llm_settings
29
+
30
+ logger = get_logger(__name__)
31
+
32
+
33
+ class AgenticOrchestratorCore(OrchestratorCore):
34
+ def __init__(
35
+ self,
36
+ *args: Any,
37
+ llm_model: OpenAIModel | str = "gpt-4o-mini",
38
+ llm_settings: LLMSettings = llm_settings,
39
+ agent_tools: list[FunctionToolset] | None = None,
40
+ **kwargs: Any,
41
+ ) -> None:
42
+ """Initialize the `AgenticOrchestratorCore` class.
43
+
44
+ This class takes the same arguments as the `OrchestratorCore` class.
45
+
46
+ Args:
47
+ *args: All the normal arguments passed to the `OrchestratorCore` class.
48
+ llm_model: An OpenAI model class or string, not limited to OpenAI models (gpt-4o-mini etc)
49
+ llm_settings: A class of settings for the LLM
50
+ agent_tools: A list of tools that can be used by the agent
51
+ **kwargs: Additional arguments passed to the `OrchestratorCore` class.
52
+
53
+ Returns:
54
+ None
55
+ """
56
+ self.llm_model = llm_model
57
+ self.agent_tools = agent_tools
58
+ self.llm_settings = llm_settings
59
+
60
+ super().__init__(*args, **kwargs)
61
+
62
+ logger.info("Mounting the agent")
63
+ self.register_llm_integration()
64
+
65
+ def register_llm_integration(self) -> None:
66
+ """Mount the Agent endpoint.
67
+
68
+ This helper mounts the agent endpoint on the application.
69
+
70
+ Returns:
71
+ None
72
+
73
+ """
74
+ from orchestrator.search.agent import build_agent_app
75
+
76
+ agent_app = build_agent_app(self.llm_model, self.agent_tools)
77
+ self.mount("/agent", agent_app)
78
+
79
+
80
+ main_typer_app = typer.Typer()
81
+ main_typer_app.add_typer(cli_app, name="orchestrator", help="The orchestrator CLI commands")
82
+
83
+ if __name__ == "__main__":
84
+ main_typer_app()
@@ -30,6 +30,7 @@ from orchestrator.api.api_v1.endpoints import (
30
30
  workflows,
31
31
  ws,
32
32
  )
33
+ from orchestrator.llm_settings import llm_settings
33
34
  from orchestrator.security import authorize
34
35
 
35
36
  api_router = APIRouter()
@@ -83,3 +84,12 @@ api_router.include_router(
83
84
  tags=["Core", "Translations"],
84
85
  )
85
86
  api_router.include_router(ws.router, prefix="/ws", tags=["Core", "Events"])
87
+
88
+ if llm_settings.LLM_ENABLED:
89
+ from orchestrator.api.api_v1.endpoints import search
90
+
91
+ api_router.include_router(
92
+ search.router,
93
+ prefix="/search",
94
+ tags=["Core", "Search"],
95
+ )
@@ -0,0 +1,277 @@
1
+ from typing import Any, Literal, overload
2
+
3
+ from fastapi import APIRouter, HTTPException, Query, status
4
+ from sqlalchemy import case, select
5
+ from sqlalchemy.orm import selectinload
6
+
7
+ from orchestrator.db import (
8
+ ProcessTable,
9
+ ProductTable,
10
+ WorkflowTable,
11
+ db,
12
+ )
13
+ from orchestrator.domain.base import SubscriptionModel
14
+ from orchestrator.schemas.search import (
15
+ PageInfoSchema,
16
+ PathsResponse,
17
+ ProcessSearchResult,
18
+ ProcessSearchSchema,
19
+ ProductSearchResult,
20
+ ProductSearchSchema,
21
+ SearchResultsSchema,
22
+ SubscriptionSearchResult,
23
+ WorkflowSearchResult,
24
+ WorkflowSearchSchema,
25
+ )
26
+ from orchestrator.search.core.exceptions import InvalidCursorError
27
+ from orchestrator.search.core.types import EntityType, UIType
28
+ from orchestrator.search.filters.definitions import generate_definitions
29
+ from orchestrator.search.indexing.registry import ENTITY_CONFIG_REGISTRY
30
+ from orchestrator.search.retrieval import execute_search
31
+ from orchestrator.search.retrieval.builder import build_paths_query, create_path_autocomplete_lquery, process_path_rows
32
+ from orchestrator.search.retrieval.pagination import (
33
+ create_next_page_cursor,
34
+ process_pagination_cursor,
35
+ )
36
+ from orchestrator.search.retrieval.validation import is_lquery_syntactically_valid
37
+ from orchestrator.search.schemas.parameters import (
38
+ BaseSearchParameters,
39
+ ProcessSearchParameters,
40
+ ProductSearchParameters,
41
+ SubscriptionSearchParameters,
42
+ WorkflowSearchParameters,
43
+ )
44
+ from orchestrator.search.schemas.results import SearchResult, TypeDefinition
45
+ from orchestrator.services.subscriptions import format_special_types
46
+
47
+ router = APIRouter()
48
+
49
+
50
+ def _create_search_result_item(
51
+ entity: WorkflowTable | ProductTable | ProcessTable, entity_type: EntityType, search_info: SearchResult
52
+ ) -> WorkflowSearchResult | ProductSearchResult | ProcessSearchResult | None:
53
+ match entity_type:
54
+ case EntityType.WORKFLOW:
55
+ workflow_data = WorkflowSearchSchema.model_validate(entity)
56
+ return WorkflowSearchResult(
57
+ workflow=workflow_data,
58
+ score=search_info.score,
59
+ perfect_match=search_info.perfect_match,
60
+ matching_field=search_info.matching_field,
61
+ )
62
+ case EntityType.PRODUCT:
63
+ product_data = ProductSearchSchema.model_validate(entity)
64
+ return ProductSearchResult(
65
+ product=product_data,
66
+ score=search_info.score,
67
+ perfect_match=search_info.perfect_match,
68
+ matching_field=search_info.matching_field,
69
+ )
70
+ case EntityType.PROCESS:
71
+ process_data = ProcessSearchSchema.model_validate(entity)
72
+ return ProcessSearchResult(
73
+ process=process_data,
74
+ score=search_info.score,
75
+ perfect_match=search_info.perfect_match,
76
+ matching_field=search_info.matching_field,
77
+ )
78
+ case _:
79
+ return None
80
+
81
+
82
+ @overload
83
+ async def _perform_search_and_fetch(
84
+ search_params: BaseSearchParameters,
85
+ entity_type: Literal[EntityType.WORKFLOW],
86
+ eager_loads: list[Any],
87
+ cursor: str | None = None,
88
+ ) -> SearchResultsSchema[WorkflowSearchResult]: ...
89
+
90
+
91
+ @overload
92
+ async def _perform_search_and_fetch(
93
+ search_params: BaseSearchParameters,
94
+ entity_type: Literal[EntityType.PRODUCT],
95
+ eager_loads: list[Any],
96
+ cursor: str | None = None,
97
+ ) -> SearchResultsSchema[ProductSearchResult]: ...
98
+
99
+
100
+ @overload
101
+ async def _perform_search_and_fetch(
102
+ search_params: BaseSearchParameters,
103
+ entity_type: Literal[EntityType.PROCESS],
104
+ eager_loads: list[Any],
105
+ cursor: str | None = None,
106
+ ) -> SearchResultsSchema[ProcessSearchResult]: ...
107
+
108
+
109
+ async def _perform_search_and_fetch(
110
+ search_params: BaseSearchParameters,
111
+ entity_type: EntityType,
112
+ eager_loads: list[Any],
113
+ cursor: str | None = None,
114
+ ) -> SearchResultsSchema[Any]:
115
+ try:
116
+ pagination_params = await process_pagination_cursor(cursor, search_params)
117
+ except InvalidCursorError:
118
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid pagination cursor")
119
+
120
+ search_response = await execute_search(
121
+ search_params=search_params,
122
+ db_session=db.session,
123
+ pagination_params=pagination_params,
124
+ )
125
+ if not search_response.results:
126
+ return SearchResultsSchema(search_metadata=search_response.metadata)
127
+
128
+ next_page_cursor = create_next_page_cursor(search_response.results, pagination_params, search_params.limit)
129
+ has_next_page = next_page_cursor is not None
130
+ page_info = PageInfoSchema(has_next_page=has_next_page, next_page_cursor=next_page_cursor)
131
+
132
+ config = ENTITY_CONFIG_REGISTRY[entity_type]
133
+ entity_ids = [res.entity_id for res in search_response.results]
134
+ pk_column = getattr(config.table, config.pk_name)
135
+ ordering_case = case({entity_id: i for i, entity_id in enumerate(entity_ids)}, value=pk_column)
136
+
137
+ stmt = select(config.table).options(*eager_loads).filter(pk_column.in_(entity_ids)).order_by(ordering_case)
138
+ entities = db.session.scalars(stmt).all()
139
+
140
+ search_info_map = {res.entity_id: res for res in search_response.results}
141
+ data = []
142
+ for entity in entities:
143
+ entity_id = getattr(entity, config.pk_name)
144
+ search_info = search_info_map.get(str(entity_id))
145
+ if not search_info:
146
+ continue
147
+
148
+ search_result_item = _create_search_result_item(entity, entity_type, search_info)
149
+ if search_result_item:
150
+ data.append(search_result_item)
151
+
152
+ return SearchResultsSchema(data=data, page_info=page_info, search_metadata=search_response.metadata)
153
+
154
+
155
+ @router.post(
156
+ "/subscriptions",
157
+ response_model=SearchResultsSchema[SubscriptionSearchResult],
158
+ )
159
+ async def search_subscriptions(
160
+ search_params: SubscriptionSearchParameters,
161
+ cursor: str | None = None,
162
+ ) -> SearchResultsSchema[SubscriptionSearchResult]:
163
+ try:
164
+ pagination_params = await process_pagination_cursor(cursor, search_params)
165
+ except InvalidCursorError:
166
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid pagination cursor")
167
+
168
+ search_response = await execute_search(
169
+ search_params=search_params,
170
+ db_session=db.session,
171
+ pagination_params=pagination_params,
172
+ )
173
+
174
+ if not search_response.results:
175
+ return SearchResultsSchema(search_metadata=search_response.metadata)
176
+
177
+ next_page_cursor = create_next_page_cursor(search_response.results, pagination_params, search_params.limit)
178
+ has_next_page = next_page_cursor is not None
179
+ page_info = PageInfoSchema(has_next_page=has_next_page, next_page_cursor=next_page_cursor)
180
+
181
+ search_info_map = {res.entity_id: res for res in search_response.results}
182
+ results_data = []
183
+ for sub_id, search_info in search_info_map.items():
184
+ subscription_model = SubscriptionModel.from_subscription(sub_id)
185
+ sub_data = subscription_model.model_dump(exclude_unset=False)
186
+ search_result_item = SubscriptionSearchResult(
187
+ subscription=format_special_types(sub_data),
188
+ score=search_info.score,
189
+ perfect_match=search_info.perfect_match,
190
+ matching_field=search_info.matching_field,
191
+ )
192
+ results_data.append(search_result_item)
193
+
194
+ return SearchResultsSchema(data=results_data, page_info=page_info, search_metadata=search_response.metadata)
195
+
196
+
197
+ @router.post("/workflows", response_model=SearchResultsSchema[WorkflowSearchResult])
198
+ async def search_workflows(
199
+ search_params: WorkflowSearchParameters,
200
+ cursor: str | None = None,
201
+ ) -> SearchResultsSchema[WorkflowSearchResult]:
202
+ return await _perform_search_and_fetch(
203
+ search_params=search_params,
204
+ entity_type=EntityType.WORKFLOW,
205
+ eager_loads=[selectinload(WorkflowTable.products)],
206
+ cursor=cursor,
207
+ )
208
+
209
+
210
+ @router.post("/products", response_model=SearchResultsSchema[ProductSearchResult])
211
+ async def search_products(
212
+ search_params: ProductSearchParameters,
213
+ cursor: str | None = None,
214
+ ) -> SearchResultsSchema[ProductSearchResult]:
215
+ return await _perform_search_and_fetch(
216
+ search_params=search_params,
217
+ entity_type=EntityType.PRODUCT,
218
+ eager_loads=[
219
+ selectinload(ProductTable.workflows),
220
+ selectinload(ProductTable.fixed_inputs),
221
+ selectinload(ProductTable.product_blocks),
222
+ ],
223
+ cursor=cursor,
224
+ )
225
+
226
+
227
+ @router.post("/processes", response_model=SearchResultsSchema[ProcessSearchResult])
228
+ async def search_processes(
229
+ search_params: ProcessSearchParameters,
230
+ cursor: str | None = None,
231
+ ) -> SearchResultsSchema[ProcessSearchResult]:
232
+ return await _perform_search_and_fetch(
233
+ search_params=search_params,
234
+ entity_type=EntityType.PROCESS,
235
+ eager_loads=[
236
+ selectinload(ProcessTable.workflow),
237
+ ],
238
+ cursor=cursor,
239
+ )
240
+
241
+
242
+ @router.get(
243
+ "/paths",
244
+ response_model=PathsResponse,
245
+ response_model_exclude_none=True,
246
+ )
247
+ async def list_paths(
248
+ prefix: str = Query("", min_length=0),
249
+ q: str | None = Query(None, description="Query for path suggestions"),
250
+ entity_type: EntityType = Query(EntityType.SUBSCRIPTION),
251
+ limit: int = Query(10, ge=1, le=10),
252
+ ) -> PathsResponse:
253
+
254
+ if prefix:
255
+ lquery_pattern = create_path_autocomplete_lquery(prefix)
256
+
257
+ if not is_lquery_syntactically_valid(lquery_pattern, db.session):
258
+ raise HTTPException(
259
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
260
+ detail=f"Prefix '{prefix}' creates an invalid search pattern.",
261
+ )
262
+ stmt = build_paths_query(entity_type=entity_type, prefix=prefix, q=q)
263
+ stmt = stmt.limit(limit)
264
+ rows = db.session.execute(stmt).all()
265
+
266
+ leaves, components = process_path_rows(rows)
267
+ return PathsResponse(leaves=leaves, components=components)
268
+
269
+
270
+ @router.get(
271
+ "/definitions",
272
+ response_model=dict[UIType, TypeDefinition],
273
+ response_model_exclude_none=True,
274
+ )
275
+ async def get_definitions() -> dict[UIType, TypeDefinition]:
276
+ """Provide a static definition of operators and schemas for each UI type."""
277
+ return generate_definitions()
orchestrator/app.py CHANGED
@@ -90,6 +90,22 @@ class OrchestratorCore(FastAPI):
90
90
  base_settings: AppSettings = app_settings,
91
91
  **kwargs: Any,
92
92
  ) -> None:
93
+ """Initialize the Orchestrator.
94
+
95
+ Args:
96
+ title: Name of the application.
97
+ description: Description of the application.
98
+ openapi_url: Location of the OpenAPI endpoint.
99
+ docs_url: Location of the docs endpoint.
100
+ redoc_url: Location of the redoc endpoint.
101
+ version: Version of the application.
102
+ default_response_class: Override the default response class.
103
+ base_settings: Settings for the application.
104
+ **kwargs: Any additional keyword arguments are sent to the
105
+
106
+ Returns:
107
+ None
108
+ """
93
109
  initialise_logging(LOGGER_OVERRIDES)
94
110
  init_model_loaders()
95
111
  if base_settings.ENABLE_GRAPHQL_STATS_EXTENSION:
@@ -163,6 +179,22 @@ class OrchestratorCore(FastAPI):
163
179
  release: str | None = GIT_COMMIT_HASH,
164
180
  **sentry_kwargs: Any,
165
181
  ) -> None:
182
+ """Register sentry to your application.
183
+
184
+ Sentry is an application monitoring toolkit.
185
+
186
+ Args:
187
+ sentry_dsn: The location where sentry traces are posted to.
188
+ trace_sample_rate: The sample rate
189
+ server_name: The name of the application
190
+ environment: Production or development
191
+ release: Version of the application
192
+ **sentry_kwargs: Any sentry keyword arguments
193
+
194
+ Returns:
195
+ None
196
+
197
+ """
166
198
  logger.info("Adding Sentry middleware to app", app=self.title)
167
199
  if self.base_settings.EXECUTOR == ExecutorType.WORKER:
168
200
  from sentry_sdk.integrations.celery import CeleryIntegration
@@ -0,0 +1,73 @@
1
+ import typer
2
+
3
+ from orchestrator.search.core.types import EntityType
4
+ from orchestrator.search.indexing import run_indexing_for_entity
5
+
6
+ app = typer.Typer(
7
+ name="index",
8
+ help="Index search indexes",
9
+ )
10
+
11
+
12
+ @app.command("subscriptions")
13
+ def subscriptions_command(
14
+ subscription_id: str | None = typer.Option(None, help="UUID (default = all)"),
15
+ dry_run: bool = typer.Option(False, help="No DB writes"),
16
+ force_index: bool = typer.Option(False, help="Force re-index (ignore hash cache)"),
17
+ ) -> None:
18
+ """Index subscription_search_index."""
19
+ run_indexing_for_entity(
20
+ entity_kind=EntityType.SUBSCRIPTION,
21
+ entity_id=subscription_id,
22
+ dry_run=dry_run,
23
+ force_index=force_index,
24
+ )
25
+
26
+
27
+ @app.command("products")
28
+ def products_command(
29
+ product_id: str | None = typer.Option(None, help="UUID (default = all)"),
30
+ dry_run: bool = typer.Option(False, help="No DB writes"),
31
+ force_index: bool = typer.Option(False, help="Force re-index (ignore hash cache)"),
32
+ ) -> None:
33
+ """Index product_search_index."""
34
+ run_indexing_for_entity(
35
+ entity_kind=EntityType.PRODUCT,
36
+ entity_id=product_id,
37
+ dry_run=dry_run,
38
+ force_index=force_index,
39
+ )
40
+
41
+
42
+ @app.command("processes")
43
+ def processes_command(
44
+ process_id: str | None = typer.Option(None, help="UUID (default = all)"),
45
+ dry_run: bool = typer.Option(False, help="No DB writes"),
46
+ force_index: bool = typer.Option(False, help="Force re-index (ignore hash cache)"),
47
+ ) -> None:
48
+ """Index process_search_index."""
49
+ run_indexing_for_entity(
50
+ entity_kind=EntityType.PROCESS,
51
+ entity_id=process_id,
52
+ dry_run=dry_run,
53
+ force_index=force_index,
54
+ )
55
+
56
+
57
+ @app.command("workflows")
58
+ def workflows_command(
59
+ workflow_id: str | None = typer.Option(None, help="UUID (default = all)"),
60
+ dry_run: bool = typer.Option(False, help="No DB writes"),
61
+ force_index: bool = typer.Option(False, help="Force re-index (ignore hash cache)"),
62
+ ) -> None:
63
+ """Index workflow_search_index."""
64
+ run_indexing_for_entity(
65
+ entity_kind=EntityType.WORKFLOW,
66
+ entity_id=workflow_id,
67
+ dry_run=dry_run,
68
+ force_index=force_index,
69
+ )
70
+
71
+
72
+ if __name__ == "__main__":
73
+ app()
orchestrator/cli/main.py CHANGED
@@ -13,13 +13,34 @@
13
13
 
14
14
  import typer
15
15
 
16
- from orchestrator.cli import database, generate, scheduler
16
+ from orchestrator.cli import (
17
+ database,
18
+ generate,
19
+ scheduler,
20
+ )
21
+ from orchestrator.llm_settings import llm_settings
17
22
 
18
23
  app = typer.Typer()
19
24
  app.add_typer(scheduler.app, name="scheduler", help="Access all the scheduler functions")
20
25
  app.add_typer(database.app, name="db", help="Interact with the application database")
21
26
  app.add_typer(generate.app, name="generate", help="Generate products, workflows and other artifacts")
22
27
 
28
+ if llm_settings.LLM_ENABLED:
29
+ from orchestrator.cli import index_llm, resize_embedding, search_explore, speedtest
30
+
31
+ app.add_typer(index_llm.app, name="index", help="(Re-)Index the search table.")
32
+ app.add_typer(search_explore.app, name="search", help="Try out different search types.")
33
+ app.add_typer(
34
+ resize_embedding.app,
35
+ name="embedding",
36
+ help="Resize the vector dimension of the embedding column in the search table.",
37
+ )
38
+ app.add_typer(
39
+ speedtest.app,
40
+ name="speedtest",
41
+ help="Search performance testing and analysis.",
42
+ )
43
+
23
44
 
24
45
  if __name__ == "__main__":
25
46
  app()