orchestrator-core 4.4.1__py3-none-any.whl → 4.5.1a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orchestrator/__init__.py +26 -2
- orchestrator/agentic_app.py +84 -0
- orchestrator/api/api_v1/api.py +10 -0
- orchestrator/api/api_v1/endpoints/search.py +277 -0
- orchestrator/app.py +32 -0
- orchestrator/cli/index_llm.py +73 -0
- orchestrator/cli/main.py +22 -1
- orchestrator/cli/resize_embedding.py +135 -0
- orchestrator/cli/search_explore.py +208 -0
- orchestrator/cli/speedtest.py +151 -0
- orchestrator/db/models.py +37 -1
- orchestrator/llm_settings.py +51 -0
- orchestrator/migrations/versions/schema/2025-08-12_52b37b5b2714_search_index_model_for_llm_integration.py +95 -0
- orchestrator/schemas/search.py +117 -0
- orchestrator/search/__init__.py +12 -0
- orchestrator/search/agent/__init__.py +8 -0
- orchestrator/search/agent/agent.py +47 -0
- orchestrator/search/agent/prompts.py +62 -0
- orchestrator/search/agent/state.py +8 -0
- orchestrator/search/agent/tools.py +121 -0
- orchestrator/search/core/__init__.py +0 -0
- orchestrator/search/core/embedding.py +64 -0
- orchestrator/search/core/exceptions.py +22 -0
- orchestrator/search/core/types.py +281 -0
- orchestrator/search/core/validators.py +27 -0
- orchestrator/search/docs/index.md +37 -0
- orchestrator/search/docs/running_local_text_embedding_inference.md +45 -0
- orchestrator/search/filters/__init__.py +27 -0
- orchestrator/search/filters/base.py +272 -0
- orchestrator/search/filters/date_filters.py +75 -0
- orchestrator/search/filters/definitions.py +93 -0
- orchestrator/search/filters/ltree_filters.py +43 -0
- orchestrator/search/filters/numeric_filter.py +60 -0
- orchestrator/search/indexing/__init__.py +3 -0
- orchestrator/search/indexing/indexer.py +323 -0
- orchestrator/search/indexing/registry.py +88 -0
- orchestrator/search/indexing/tasks.py +53 -0
- orchestrator/search/indexing/traverse.py +322 -0
- orchestrator/search/retrieval/__init__.py +3 -0
- orchestrator/search/retrieval/builder.py +108 -0
- orchestrator/search/retrieval/engine.py +152 -0
- orchestrator/search/retrieval/pagination.py +83 -0
- orchestrator/search/retrieval/retriever.py +447 -0
- orchestrator/search/retrieval/utils.py +106 -0
- orchestrator/search/retrieval/validation.py +174 -0
- orchestrator/search/schemas/__init__.py +0 -0
- orchestrator/search/schemas/parameters.py +116 -0
- orchestrator/search/schemas/results.py +63 -0
- orchestrator/services/settings_env_variables.py +2 -2
- orchestrator/settings.py +1 -1
- {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.1a1.dist-info}/METADATA +8 -3
- {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.1a1.dist-info}/RECORD +54 -11
- {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.1a1.dist-info}/WHEEL +0 -0
- {orchestrator_core-4.4.1.dist-info → orchestrator_core-4.5.1a1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"""Search index model for llm integration.
|
|
2
|
+
|
|
3
|
+
Revision ID: 52b37b5b2714
|
|
4
|
+
Revises: 850dccac3b02
|
|
5
|
+
Create Date: 2025-08-12 22:34:26.694750
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import sqlalchemy as sa
|
|
10
|
+
from alembic import op
|
|
11
|
+
from pgvector.sqlalchemy import Vector
|
|
12
|
+
from sqlalchemy.dialects import postgresql
|
|
13
|
+
from sqlalchemy_utils import LtreeType
|
|
14
|
+
|
|
15
|
+
from orchestrator.search.core.types import FieldType
|
|
16
|
+
|
|
17
|
+
# revision identifiers, used by Alembic.
|
|
18
|
+
revision = "52b37b5b2714"
|
|
19
|
+
down_revision = "850dccac3b02"
|
|
20
|
+
branch_labels = None
|
|
21
|
+
depends_on = None
|
|
22
|
+
|
|
23
|
+
TABLE = "ai_search_index"
|
|
24
|
+
IDX_EMBED_HNSW = "ix_flat_embed_hnsw"
|
|
25
|
+
IDX_PATH_GIST = "ix_flat_path_gist"
|
|
26
|
+
IDX_PATH_BTREE = "ix_flat_path_btree"
|
|
27
|
+
IDX_VALUE_TRGM = "ix_flat_value_trgm"
|
|
28
|
+
IDX_CONTENT_HASH = "idx_ai_search_index_content_hash"
|
|
29
|
+
|
|
30
|
+
TARGET_DIM = 1536
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def upgrade() -> None:
|
|
34
|
+
# Create PostgreSQL extensions
|
|
35
|
+
op.execute("CREATE EXTENSION IF NOT EXISTS ltree;")
|
|
36
|
+
op.execute("CREATE EXTENSION IF NOT EXISTS unaccent;")
|
|
37
|
+
op.execute("CREATE EXTENSION IF NOT EXISTS pg_trgm;")
|
|
38
|
+
op.execute("CREATE EXTENSION IF NOT EXISTS vector;")
|
|
39
|
+
|
|
40
|
+
# Create the ai_search_index table
|
|
41
|
+
op.create_table(
|
|
42
|
+
TABLE,
|
|
43
|
+
sa.Column("entity_type", sa.Text, nullable=False),
|
|
44
|
+
sa.Column("entity_id", postgresql.UUID, nullable=False),
|
|
45
|
+
sa.Column("path", LtreeType, nullable=False),
|
|
46
|
+
sa.Column("value", sa.Text, nullable=False),
|
|
47
|
+
sa.Column("embedding", Vector(TARGET_DIM), nullable=True),
|
|
48
|
+
sa.Column("content_hash", sa.String(64), nullable=False),
|
|
49
|
+
sa.PrimaryKeyConstraint("entity_id", "path", name="pk_ai_search_index"),
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
field_type_enum = sa.Enum(*[ft.value for ft in FieldType], name="field_type")
|
|
53
|
+
field_type_enum.create(op.get_bind(), checkfirst=True)
|
|
54
|
+
op.add_column(
|
|
55
|
+
TABLE,
|
|
56
|
+
sa.Column("value_type", field_type_enum, nullable=False, server_default=FieldType.STRING.value),
|
|
57
|
+
)
|
|
58
|
+
op.alter_column(TABLE, "value_type", server_default=None)
|
|
59
|
+
|
|
60
|
+
op.create_index(op.f("ix_ai_search_index_entity_id"), TABLE, ["entity_id"], unique=False)
|
|
61
|
+
op.create_index(IDX_CONTENT_HASH, TABLE, ["content_hash"])
|
|
62
|
+
|
|
63
|
+
op.create_index(
|
|
64
|
+
IDX_PATH_GIST,
|
|
65
|
+
TABLE,
|
|
66
|
+
["path"],
|
|
67
|
+
postgresql_using="GIST",
|
|
68
|
+
postgresql_ops={"path": "gist_ltree_ops"},
|
|
69
|
+
)
|
|
70
|
+
op.create_index(IDX_PATH_BTREE, TABLE, ["path"])
|
|
71
|
+
op.create_index(IDX_VALUE_TRGM, TABLE, ["value"], postgresql_using="GIN", postgresql_ops={"value": "gin_trgm_ops"})
|
|
72
|
+
|
|
73
|
+
op.create_index(
|
|
74
|
+
IDX_EMBED_HNSW,
|
|
75
|
+
TABLE,
|
|
76
|
+
["embedding"],
|
|
77
|
+
postgresql_using="HNSW",
|
|
78
|
+
postgresql_with={"m": 16, "ef_construction": 64},
|
|
79
|
+
postgresql_ops={"embedding": "vector_l2_ops"},
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def downgrade() -> None:
|
|
84
|
+
# Drop all indexes
|
|
85
|
+
op.drop_index(IDX_EMBED_HNSW, table_name=TABLE, if_exists=True)
|
|
86
|
+
op.drop_index(IDX_VALUE_TRGM, table_name=TABLE, if_exists=True)
|
|
87
|
+
op.drop_index(IDX_PATH_BTREE, table_name=TABLE, if_exists=True)
|
|
88
|
+
op.drop_index(IDX_PATH_GIST, table_name=TABLE, if_exists=True)
|
|
89
|
+
op.drop_index(IDX_CONTENT_HASH, table_name=TABLE, if_exists=True)
|
|
90
|
+
op.drop_index(op.f("ix_ai_search_index_entity_id"), table_name=TABLE, if_exists=True)
|
|
91
|
+
|
|
92
|
+
# Drop table and enum
|
|
93
|
+
op.drop_table(TABLE, if_exists=True)
|
|
94
|
+
field_type_enum = sa.Enum(name="field_type")
|
|
95
|
+
field_type_enum.drop(op.get_bind(), checkfirst=True)
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Any, Generic, TypeVar
|
|
3
|
+
from uuid import UUID
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
6
|
+
|
|
7
|
+
from orchestrator.search.core.types import SearchMetadata
|
|
8
|
+
from orchestrator.search.schemas.results import ComponentInfo, LeafInfo, MatchingField
|
|
9
|
+
|
|
10
|
+
T = TypeVar("T")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class PageInfoSchema(BaseModel):
|
|
14
|
+
has_next_page: bool = False
|
|
15
|
+
next_page_cursor: str | None = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ProductSchema(BaseModel):
|
|
19
|
+
model_config = ConfigDict(from_attributes=True)
|
|
20
|
+
|
|
21
|
+
name: str
|
|
22
|
+
tag: str
|
|
23
|
+
product_type: str
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SubscriptionSearchResult(BaseModel):
|
|
27
|
+
score: float
|
|
28
|
+
perfect_match: int
|
|
29
|
+
matching_field: MatchingField | None = None
|
|
30
|
+
subscription: dict[str, Any]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class SearchResultsSchema(BaseModel, Generic[T]):
|
|
34
|
+
data: list[T] = Field(default_factory=list)
|
|
35
|
+
page_info: PageInfoSchema = Field(default_factory=PageInfoSchema)
|
|
36
|
+
search_metadata: SearchMetadata | None = None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class WorkflowProductSchema(BaseModel):
|
|
40
|
+
"""Product associated with a workflow."""
|
|
41
|
+
|
|
42
|
+
model_config = ConfigDict(from_attributes=True)
|
|
43
|
+
|
|
44
|
+
product_type: str
|
|
45
|
+
product_id: UUID
|
|
46
|
+
name: str
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class WorkflowSearchSchema(BaseModel):
|
|
50
|
+
"""Schema for workflow search results."""
|
|
51
|
+
|
|
52
|
+
model_config = ConfigDict(from_attributes=True)
|
|
53
|
+
|
|
54
|
+
name: str
|
|
55
|
+
products: list[WorkflowProductSchema]
|
|
56
|
+
description: str | None = None
|
|
57
|
+
created_at: datetime | None = None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class ProductSearchSchema(BaseModel):
|
|
61
|
+
"""Schema for product search results."""
|
|
62
|
+
|
|
63
|
+
model_config = ConfigDict(from_attributes=True)
|
|
64
|
+
|
|
65
|
+
product_id: UUID
|
|
66
|
+
name: str
|
|
67
|
+
product_type: str
|
|
68
|
+
tag: str | None = None
|
|
69
|
+
description: str | None = None
|
|
70
|
+
status: str | None = None
|
|
71
|
+
created_at: datetime | None = None
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class ProcessSearchSchema(BaseModel):
|
|
75
|
+
"""Schema for process search results."""
|
|
76
|
+
|
|
77
|
+
model_config = ConfigDict(from_attributes=True)
|
|
78
|
+
|
|
79
|
+
process_id: UUID
|
|
80
|
+
workflow_name: str
|
|
81
|
+
workflow_id: UUID
|
|
82
|
+
last_status: str
|
|
83
|
+
is_task: bool
|
|
84
|
+
created_by: str | None = None
|
|
85
|
+
started_at: datetime
|
|
86
|
+
last_modified_at: datetime
|
|
87
|
+
last_step: str | None = None
|
|
88
|
+
failed_reason: str | None = None
|
|
89
|
+
subscription_ids: list[UUID] | None = None
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class WorkflowSearchResult(BaseModel):
|
|
93
|
+
score: float
|
|
94
|
+
perfect_match: int
|
|
95
|
+
matching_field: MatchingField | None = None
|
|
96
|
+
workflow: WorkflowSearchSchema
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class ProductSearchResult(BaseModel):
|
|
100
|
+
score: float
|
|
101
|
+
perfect_match: int
|
|
102
|
+
matching_field: MatchingField | None = None
|
|
103
|
+
product: ProductSearchSchema
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class ProcessSearchResult(BaseModel):
|
|
107
|
+
score: float
|
|
108
|
+
perfect_match: int
|
|
109
|
+
matching_field: MatchingField | None = None
|
|
110
|
+
process: ProcessSearchSchema
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class PathsResponse(BaseModel):
|
|
114
|
+
leaves: list[LeafInfo]
|
|
115
|
+
components: list[ComponentInfo]
|
|
116
|
+
|
|
117
|
+
model_config = ConfigDict(extra="forbid", use_enum_values=True)
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# Copyright 2019-2025 SURF.
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import structlog
|
|
4
|
+
from fastapi import FastAPI, HTTPException
|
|
5
|
+
from pydantic_ai.ag_ui import StateDeps
|
|
6
|
+
from pydantic_ai.agent import Agent
|
|
7
|
+
from pydantic_ai.models.openai import OpenAIModel
|
|
8
|
+
from pydantic_ai.settings import ModelSettings
|
|
9
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
10
|
+
from starlette.types import ASGIApp
|
|
11
|
+
|
|
12
|
+
from orchestrator.search.agent.prompts import get_base_instructions, get_dynamic_instructions
|
|
13
|
+
from orchestrator.search.agent.state import SearchState
|
|
14
|
+
from orchestrator.search.agent.tools import search_toolset
|
|
15
|
+
|
|
16
|
+
logger = structlog.get_logger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _disabled_agent_app(reason: str) -> FastAPI:
|
|
20
|
+
app = FastAPI(title="Agent disabled")
|
|
21
|
+
|
|
22
|
+
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS", "HEAD"])
|
|
23
|
+
async def _disabled(path: str) -> None:
|
|
24
|
+
raise HTTPException(status_code=503, detail=f"Agent disabled: {reason}")
|
|
25
|
+
|
|
26
|
+
return app
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def build_agent_app(model: str | OpenAIModel, toolsets: list[FunctionToolset[Any]] | None = None) -> ASGIApp:
|
|
30
|
+
try:
|
|
31
|
+
toolsets = toolsets + [search_toolset] if toolsets else [search_toolset]
|
|
32
|
+
|
|
33
|
+
agent = Agent(
|
|
34
|
+
model=model,
|
|
35
|
+
deps_type=StateDeps[SearchState],
|
|
36
|
+
model_settings=ModelSettings(
|
|
37
|
+
parallel_tool_calls=False,
|
|
38
|
+
), # https://github.com/pydantic/pydantic-ai/issues/562
|
|
39
|
+
toolsets=toolsets,
|
|
40
|
+
)
|
|
41
|
+
agent.instructions(get_base_instructions)
|
|
42
|
+
agent.instructions(get_dynamic_instructions)
|
|
43
|
+
|
|
44
|
+
return agent.to_ag_ui(deps=StateDeps(SearchState()))
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.error("Agent init failed; serving disabled stub.", error=str(e))
|
|
47
|
+
return _disabled_agent_app(str(e))
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from textwrap import dedent
|
|
3
|
+
|
|
4
|
+
import structlog
|
|
5
|
+
from pydantic_ai import RunContext
|
|
6
|
+
from pydantic_ai.ag_ui import StateDeps
|
|
7
|
+
|
|
8
|
+
from orchestrator.search.agent.state import SearchState
|
|
9
|
+
from orchestrator.search.retrieval.validation import get_structured_filter_schema
|
|
10
|
+
|
|
11
|
+
logger = structlog.get_logger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def get_base_instructions() -> str:
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
schema_dict = get_structured_filter_schema()
|
|
18
|
+
if schema_dict:
|
|
19
|
+
schema_info = "\n".join([f" {path}: {field_type}" for path, field_type in schema_dict.items()])
|
|
20
|
+
else:
|
|
21
|
+
schema_info = " No filterable fields available"
|
|
22
|
+
except Exception as e:
|
|
23
|
+
logger.warning(f"Failed to load schema for prompt: {e}")
|
|
24
|
+
schema_info = " Schema temporarily unavailable"
|
|
25
|
+
logger.error(f"Generated schema for agent prompt:\n{schema_info}")
|
|
26
|
+
|
|
27
|
+
return dedent(
|
|
28
|
+
f"""
|
|
29
|
+
You are a helpful assistant for building and running database queries.
|
|
30
|
+
|
|
31
|
+
**Available Data Schema:**
|
|
32
|
+
Use the following schema to understand the available fields.
|
|
33
|
+
When you build filters, each `path` MUST be a valid path from this schema,
|
|
34
|
+
and the operator/value MUST match that path's type.
|
|
35
|
+
```
|
|
36
|
+
{schema_info}
|
|
37
|
+
```
|
|
38
|
+
**Workflow (do in order):**
|
|
39
|
+
1) `set_search_parameters` to define the main entity being searched.
|
|
40
|
+
2) Build a complete `FilterTree` (AND at root unless the user asks for OR).
|
|
41
|
+
3) `set_filter_tree(filters=<FilterTree or null>)`.
|
|
42
|
+
4) `execute_search()`.
|
|
43
|
+
5) Summarize the results for the user.
|
|
44
|
+
|
|
45
|
+
"""
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
async def get_dynamic_instructions(ctx: RunContext[StateDeps[SearchState]]) -> str:
|
|
50
|
+
"""Dynamically generate the system prompt for the agent."""
|
|
51
|
+
param_state = json.dumps(ctx.deps.state.parameters, indent=2, default=str) if ctx.deps.state.parameters else "{}"
|
|
52
|
+
|
|
53
|
+
return dedent(
|
|
54
|
+
f"""
|
|
55
|
+
Current search parameters state:
|
|
56
|
+
{param_state}
|
|
57
|
+
|
|
58
|
+
Remember:
|
|
59
|
+
- If filters are missing or incomplete, construct a full FilterTree and call `set_filter_tree`.
|
|
60
|
+
- Then call `execute_search`.
|
|
61
|
+
"""
|
|
62
|
+
)
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
from collections.abc import Awaitable, Callable
|
|
2
|
+
from typing import Any, TypeVar
|
|
3
|
+
|
|
4
|
+
import structlog
|
|
5
|
+
from ag_ui.core import EventType, StateSnapshotEvent
|
|
6
|
+
from pydantic_ai import RunContext
|
|
7
|
+
from pydantic_ai.ag_ui import StateDeps
|
|
8
|
+
from pydantic_ai.exceptions import ModelRetry
|
|
9
|
+
from pydantic_ai.messages import ModelRequest, UserPromptPart
|
|
10
|
+
from pydantic_ai.toolsets import FunctionToolset
|
|
11
|
+
|
|
12
|
+
from orchestrator.api.api_v1.endpoints.search import (
|
|
13
|
+
search_processes,
|
|
14
|
+
search_products,
|
|
15
|
+
search_subscriptions,
|
|
16
|
+
search_workflows,
|
|
17
|
+
)
|
|
18
|
+
from orchestrator.schemas.search import SearchResultsSchema
|
|
19
|
+
from orchestrator.search.core.types import ActionType, EntityType
|
|
20
|
+
from orchestrator.search.filters import FilterTree
|
|
21
|
+
from orchestrator.search.retrieval.validation import validate_filter_tree
|
|
22
|
+
from orchestrator.search.schemas.parameters import PARAMETER_REGISTRY, BaseSearchParameters
|
|
23
|
+
|
|
24
|
+
from .state import SearchState
|
|
25
|
+
|
|
26
|
+
logger = structlog.get_logger(__name__)
|
|
27
|
+
P = TypeVar("P", bound=BaseSearchParameters)
|
|
28
|
+
|
|
29
|
+
SearchFn = Callable[[P], Awaitable[SearchResultsSchema[Any]]]
|
|
30
|
+
|
|
31
|
+
SEARCH_FN_MAP: dict[EntityType, SearchFn] = {
|
|
32
|
+
EntityType.SUBSCRIPTION: search_subscriptions,
|
|
33
|
+
EntityType.WORKFLOW: search_workflows,
|
|
34
|
+
EntityType.PRODUCT: search_products,
|
|
35
|
+
EntityType.PROCESS: search_processes,
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
search_toolset: FunctionToolset[StateDeps[SearchState]] = FunctionToolset(max_retries=1)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def last_user_message(ctx: RunContext[StateDeps[SearchState]]) -> str | None:
|
|
42
|
+
for msg in reversed(ctx.messages):
|
|
43
|
+
if isinstance(msg, ModelRequest):
|
|
44
|
+
for part in msg.parts:
|
|
45
|
+
if isinstance(part, UserPromptPart) and isinstance(part.content, str):
|
|
46
|
+
return part.content
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@search_toolset.tool
|
|
51
|
+
async def set_search_parameters(
|
|
52
|
+
ctx: RunContext[StateDeps[SearchState]],
|
|
53
|
+
entity_type: EntityType,
|
|
54
|
+
action: str | ActionType = ActionType.SELECT,
|
|
55
|
+
) -> StateSnapshotEvent:
|
|
56
|
+
params = ctx.deps.state.parameters or {}
|
|
57
|
+
is_new_search = params.get("entity_type") != entity_type.value
|
|
58
|
+
final_query = (last_user_message(ctx) or "") if is_new_search else params.get("query", "")
|
|
59
|
+
|
|
60
|
+
ctx.deps.state.parameters = {"action": action, "entity_type": entity_type, "filters": None, "query": final_query}
|
|
61
|
+
ctx.deps.state.results = []
|
|
62
|
+
logger.info(f"Set search parameters: entity_type={entity_type}, action={action}")
|
|
63
|
+
|
|
64
|
+
return StateSnapshotEvent(
|
|
65
|
+
type=EventType.STATE_SNAPSHOT,
|
|
66
|
+
snapshot=ctx.deps.state.model_dump(),
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@search_toolset.tool(retries=2)
|
|
71
|
+
async def set_filter_tree(
|
|
72
|
+
ctx: RunContext[StateDeps[SearchState]],
|
|
73
|
+
filters: FilterTree | None,
|
|
74
|
+
) -> StateSnapshotEvent:
|
|
75
|
+
"""Replace current filters atomically with a full FilterTree, or clear with None.
|
|
76
|
+
|
|
77
|
+
Requirements:
|
|
78
|
+
- Root/group operators must be 'AND' or 'OR' (uppercase).
|
|
79
|
+
- Provide either PathFilters or nested groups under `children`.
|
|
80
|
+
- See the FilterTree schema examples for the exact shape.
|
|
81
|
+
"""
|
|
82
|
+
if ctx.deps.state.parameters is None:
|
|
83
|
+
raise ModelRetry("Search parameters are not initialized. Call set_search_parameters first.")
|
|
84
|
+
|
|
85
|
+
entity_type = EntityType(ctx.deps.state.parameters["entity_type"])
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
await validate_filter_tree(filters, entity_type)
|
|
89
|
+
except Exception as e:
|
|
90
|
+
raise ModelRetry(str(e))
|
|
91
|
+
|
|
92
|
+
ctx.deps.state.parameters["filters"] = None if filters is None else filters.model_dump(mode="json", by_alias=True)
|
|
93
|
+
logger.info(
|
|
94
|
+
"Set filter tree",
|
|
95
|
+
filters=None if filters is None else filters.model_dump(mode="json", by_alias=True),
|
|
96
|
+
)
|
|
97
|
+
return StateSnapshotEvent(type=EventType.STATE_SNAPSHOT, snapshot=ctx.deps.state.model_dump())
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@search_toolset.tool
|
|
101
|
+
async def execute_search(
|
|
102
|
+
ctx: RunContext[StateDeps[SearchState]],
|
|
103
|
+
limit: int = 5,
|
|
104
|
+
) -> StateSnapshotEvent:
|
|
105
|
+
"""Execute the search with the current parameters."""
|
|
106
|
+
if not ctx.deps.state.parameters:
|
|
107
|
+
raise ValueError("No search parameters set")
|
|
108
|
+
|
|
109
|
+
entity_type = EntityType(ctx.deps.state.parameters["entity_type"])
|
|
110
|
+
param_class = PARAMETER_REGISTRY.get(entity_type)
|
|
111
|
+
if not param_class:
|
|
112
|
+
raise ValueError(f"Unknown entity type: {entity_type}")
|
|
113
|
+
|
|
114
|
+
params = param_class(**ctx.deps.state.parameters)
|
|
115
|
+
logger.info("Executing database search", **params.model_dump(mode="json"))
|
|
116
|
+
|
|
117
|
+
fn = SEARCH_FN_MAP[entity_type]
|
|
118
|
+
search_results = await fn(params)
|
|
119
|
+
ctx.deps.state.results = search_results.data[:limit]
|
|
120
|
+
|
|
121
|
+
return StateSnapshotEvent(type=EventType.STATE_SNAPSHOT, snapshot=ctx.deps.state.model_dump())
|
|
File without changes
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import structlog
|
|
4
|
+
from litellm import aembedding as llm_aembedding
|
|
5
|
+
from litellm import embedding as llm_embedding
|
|
6
|
+
from litellm import exceptions as llm_exc
|
|
7
|
+
|
|
8
|
+
from orchestrator.llm_settings import llm_settings
|
|
9
|
+
|
|
10
|
+
logger = structlog.get_logger(__name__)
|
|
11
|
+
|
|
12
|
+
# Its logging alot of noise such as embedding vectors.
|
|
13
|
+
logging.getLogger("LiteLLM").setLevel(logging.WARNING)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class EmbeddingIndexer:
|
|
17
|
+
|
|
18
|
+
@classmethod
|
|
19
|
+
def get_embeddings_from_api_batch(cls, texts: list[str], dry_run: bool) -> list[list[float]]:
|
|
20
|
+
if not texts:
|
|
21
|
+
return []
|
|
22
|
+
if dry_run:
|
|
23
|
+
logger.debug("Dry Run: returning empty embeddings")
|
|
24
|
+
return [[] for _ in texts]
|
|
25
|
+
|
|
26
|
+
try:
|
|
27
|
+
resp = llm_embedding(
|
|
28
|
+
model=llm_settings.EMBEDDING_MODEL,
|
|
29
|
+
input=[t.lower() for t in texts],
|
|
30
|
+
api_key=llm_settings.OPENAI_API_KEY,
|
|
31
|
+
api_base=llm_settings.OPENAI_BASE_URL,
|
|
32
|
+
timeout=llm_settings.LLM_TIMEOUT,
|
|
33
|
+
max_retries=llm_settings.LLM_MAX_RETRIES,
|
|
34
|
+
)
|
|
35
|
+
data = sorted(resp.data, key=lambda e: e["index"])
|
|
36
|
+
return [row["embedding"] for row in data]
|
|
37
|
+
except (llm_exc.APIError, llm_exc.APIConnectionError, llm_exc.RateLimitError, llm_exc.Timeout) as e:
|
|
38
|
+
logger.error("Embedding request failed", error=str(e))
|
|
39
|
+
return [[] for _ in texts]
|
|
40
|
+
except Exception as e:
|
|
41
|
+
logger.error("Unexpected embedding error", error=str(e))
|
|
42
|
+
return [[] for _ in texts]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class QueryEmbedder:
|
|
46
|
+
"""A stateless, async utility for embedding real-time user queries."""
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
async def generate_for_text_async(cls, text: str) -> list[float]:
|
|
50
|
+
if not text:
|
|
51
|
+
return []
|
|
52
|
+
try:
|
|
53
|
+
resp = await llm_aembedding(
|
|
54
|
+
model=llm_settings.EMBEDDING_MODEL,
|
|
55
|
+
input=[text.lower()],
|
|
56
|
+
api_key=llm_settings.OPENAI_API_KEY,
|
|
57
|
+
api_base=llm_settings.OPENAI_BASE_URL,
|
|
58
|
+
timeout=5.0,
|
|
59
|
+
max_retries=0, # No retries, prioritize speed.
|
|
60
|
+
)
|
|
61
|
+
return resp.data[0]["embedding"]
|
|
62
|
+
except Exception as e:
|
|
63
|
+
logger.error("Async embedding generation failed", error=str(e))
|
|
64
|
+
return []
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
class SearchUtilsError(Exception):
|
|
2
|
+
"""Base exception for this module."""
|
|
3
|
+
|
|
4
|
+
pass
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ProductNotInRegistryError(SearchUtilsError):
|
|
8
|
+
"""Raised when a product is not found in the model registry."""
|
|
9
|
+
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ModelLoadError(SearchUtilsError):
|
|
14
|
+
"""Raised when a Pydantic model fails to load from a subscription."""
|
|
15
|
+
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class InvalidCursorError(SearchUtilsError):
|
|
20
|
+
"""Raised when cursor cannot be decoded."""
|
|
21
|
+
|
|
22
|
+
pass
|