dao-ai 0.1.17__py3-none-any.whl → 0.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,366 @@
1
+ """
2
+ Instructed retriever for query decomposition and result fusion.
3
+
4
+ This module provides functions for decomposing user queries into multiple
5
+ subqueries with metadata filters and merging results using Reciprocal Rank Fusion.
6
+ """
7
+
8
+ import json
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+ from typing import Any, Optional, Union
12
+
13
+ import mlflow
14
+ import yaml
15
+ from langchain_core.documents import Document
16
+ from langchain_core.language_models import BaseChatModel
17
+ from langchain_core.runnables import Runnable
18
+ from loguru import logger
19
+ from mlflow.entities import SpanType
20
+ from pydantic import BaseModel, ConfigDict, Field
21
+
22
+ from dao_ai.config import (
23
+ ColumnInfo,
24
+ DecomposedQueries,
25
+ FilterItem,
26
+ LLMModel,
27
+ SearchQuery,
28
+ )
29
+
30
+ # Module-level cache for LLM clients
31
+ _llm_cache: dict[str, BaseChatModel] = {}
32
+
33
+ # Load prompt template
34
+ _PROMPT_PATH = (
35
+ Path(__file__).parent.parent / "prompts" / "instructed_retriever_decomposition.yaml"
36
+ )
37
+
38
+
39
+ def _load_prompt_template() -> dict[str, Any]:
40
+ """Load the decomposition prompt template from YAML."""
41
+ with open(_PROMPT_PATH) as f:
42
+ return yaml.safe_load(f)
43
+
44
+
45
+ def _get_cached_llm(model_config: LLMModel) -> BaseChatModel:
46
+ """
47
+ Get or create cached LLM client for decomposition.
48
+
49
+ Uses full config as cache key to avoid collisions when same model name
50
+ has different parameters (temperature, API keys, etc.).
51
+ """
52
+ cache_key = model_config.model_dump_json()
53
+ if cache_key not in _llm_cache:
54
+ _llm_cache[cache_key] = model_config.as_chat_model()
55
+ logger.debug(
56
+ "Created new LLM client for decomposition", model=model_config.name
57
+ )
58
+ return _llm_cache[cache_key]
59
+
60
+
61
+ def _format_constraints(constraints: list[str] | None) -> str:
62
+ """Format constraints list for prompt injection."""
63
+ if not constraints:
64
+ return "No additional constraints."
65
+ return "\n".join(f"- {c}" for c in constraints)
66
+
67
+
68
+ def _format_examples(examples: list[dict[str, Any]] | None) -> str:
69
+ """Format few-shot examples for prompt injection.
70
+
71
+ Converts dict-style filters from config to FilterItem array format
72
+ to match the expected JSON schema output.
73
+ """
74
+ if not examples:
75
+ return "No examples provided."
76
+
77
+ formatted = []
78
+ for i, ex in enumerate(examples, 1):
79
+ query = ex.get("query", "")
80
+ filters = ex.get("filters", {})
81
+ # Convert dict to FilterItem array format
82
+ filter_items = [{"key": k, "value": v} for k, v in filters.items()]
83
+ formatted.append(
84
+ f'Example {i}:\n Query: "{query}"\n Filters: {json.dumps(filter_items)}'
85
+ )
86
+ return "\n".join(formatted)
87
+
88
+
89
+ def create_decomposition_schema(
90
+ columns: list[ColumnInfo] | None = None,
91
+ ) -> type[BaseModel]:
92
+ """Create schema-aware DecomposedQueries model with dynamic descriptions.
93
+
94
+ When columns are provided, the column names and valid operators are embedded
95
+ directly into the JSON schema that with_structured_output sends to the LLM.
96
+ This improves accuracy by making valid filter keys explicit in the schema.
97
+
98
+ Args:
99
+ columns: List of column metadata for dynamic schema generation
100
+
101
+ Returns:
102
+ A DecomposedQueries-compatible Pydantic model class
103
+ """
104
+ if not columns:
105
+ # Fall back to generic models
106
+ return DecomposedQueries
107
+
108
+ # Build column info with types for the schema description
109
+ column_info = ", ".join(f"{c.name} ({c.type})" for c in columns)
110
+
111
+ # Build operator list from column definitions (union of all column operators)
112
+ all_operators: set[str] = set()
113
+ for col in columns:
114
+ all_operators.update(col.operators)
115
+ # Remove empty string (equality) and sort for consistent output
116
+ named_operators = sorted(all_operators - {""})
117
+ operator_list = ", ".join(named_operators) if named_operators else "equality only"
118
+
119
+ # Build valid key examples with operators
120
+ key_examples: list[str] = []
121
+ for col in columns[:3]: # Show examples for first 3 columns
122
+ key_examples.append(f"'{col.name}'")
123
+ if "<" in col.operators:
124
+ key_examples.append(f"'{col.name} <'")
125
+ if "NOT" in col.operators:
126
+ key_examples.append(f"'{col.name} NOT'")
127
+
128
+ # Create dynamic FilterItem with schema-aware description
129
+ class SchemaFilterItem(BaseModel):
130
+ """A metadata filter for vector search with schema-specific columns."""
131
+
132
+ model_config = ConfigDict(extra="forbid")
133
+ key: str = Field(
134
+ description=(
135
+ f"Column name with optional operator suffix. "
136
+ f"Valid columns: {column_info}. "
137
+ f"Operators: (none) for equality, {operator_list}. "
138
+ f"Examples: {', '.join(key_examples[:5])}"
139
+ )
140
+ )
141
+ value: Union[str, int, float, bool, list[Union[str, int, float, bool]]] = Field(
142
+ description="The filter value matching the column type."
143
+ )
144
+
145
+ # Create dynamic SearchQuery using SchemaFilterItem
146
+ class SchemaSearchQuery(BaseModel):
147
+ """A search query with schema-aware filters."""
148
+
149
+ model_config = ConfigDict(extra="forbid")
150
+ text: str = Field(
151
+ description=(
152
+ "Natural language search query text optimized for semantic similarity. "
153
+ "Should be focused on a single search intent. "
154
+ "Do NOT include filter criteria in the text; use the filters field instead."
155
+ )
156
+ )
157
+ filters: Optional[list[SchemaFilterItem]] = Field(
158
+ default=None,
159
+ description=(
160
+ f"Metadata filters to constrain search results. "
161
+ f"Valid filter columns: {column_info}. "
162
+ f"Set to null if no filters apply."
163
+ ),
164
+ )
165
+
166
+ # Create dynamic DecomposedQueries using SchemaSearchQuery
167
+ class SchemaDecomposedQueries(BaseModel):
168
+ """Decomposed search queries with schema-aware filters."""
169
+
170
+ model_config = ConfigDict(extra="forbid")
171
+ queries: list[SchemaSearchQuery] = Field(
172
+ description=(
173
+ "List of search queries extracted from the user request. "
174
+ "Each query should target a distinct search intent. "
175
+ "Order queries by importance, with the most relevant first."
176
+ )
177
+ )
178
+
179
+ return SchemaDecomposedQueries
180
+
181
+
182
+ @mlflow.trace(name="decompose_query", span_type=SpanType.LLM)
183
+ def decompose_query(
184
+ llm: BaseChatModel,
185
+ query: str,
186
+ schema_description: str,
187
+ constraints: list[str] | None = None,
188
+ max_subqueries: int = 3,
189
+ examples: list[dict[str, Any]] | None = None,
190
+ previous_feedback: str | None = None,
191
+ columns: list[ColumnInfo] | None = None,
192
+ ) -> list[SearchQuery]:
193
+ """
194
+ Decompose a user query into multiple search queries with filters.
195
+
196
+ Uses structured output for reliable parsing and injects current time
197
+ for resolving relative date references. When columns are provided,
198
+ schema-aware Pydantic models are used for improved filter accuracy.
199
+
200
+ Args:
201
+ llm: Language model for decomposition
202
+ query: User's search query
203
+ schema_description: Column names, types, and valid filter syntax
204
+ constraints: Default constraints to apply
205
+ max_subqueries: Maximum number of subqueries to generate
206
+ examples: Few-shot examples for domain-specific filter translation
207
+ previous_feedback: Feedback from failed verification (for retry)
208
+ columns: Structured column info for dynamic schema generation
209
+
210
+ Returns:
211
+ List of SearchQuery objects with text and optional filters
212
+ """
213
+ current_time = datetime.now().isoformat()
214
+
215
+ # Load and format prompt
216
+ prompt_config = _load_prompt_template()
217
+ prompt_template = prompt_config["template"]
218
+
219
+ # Add previous feedback section if provided (for retry)
220
+ feedback_section = ""
221
+ if previous_feedback:
222
+ feedback_section = f"\n\n## Previous Attempt Feedback\nThe previous search attempt failed verification: {previous_feedback}\nAdjust your filters to address this feedback."
223
+
224
+ prompt = (
225
+ prompt_template.format(
226
+ current_time=current_time,
227
+ schema_description=schema_description,
228
+ constraints=_format_constraints(constraints),
229
+ examples=_format_examples(examples),
230
+ max_subqueries=max_subqueries,
231
+ query=query,
232
+ )
233
+ + feedback_section
234
+ )
235
+
236
+ logger.trace(
237
+ "Decomposing query",
238
+ query=query[:100],
239
+ max_subqueries=max_subqueries,
240
+ dynamic_schema=columns is not None,
241
+ )
242
+
243
+ # Create schema-aware model when columns are provided
244
+ DecompositionSchema: type[BaseModel] = create_decomposition_schema(columns)
245
+
246
+ # Use LangChain's with_structured_output for automatic strategy selection
247
+ # (JSON schema vs tool calling based on model capabilities)
248
+ try:
249
+ structured_llm: Runnable[str, BaseModel] = llm.with_structured_output(
250
+ DecompositionSchema
251
+ )
252
+ result: BaseModel = structured_llm.invoke(prompt)
253
+ except Exception as e:
254
+ logger.warning("Query decomposition failed", error=str(e))
255
+ raise
256
+
257
+ # Extract queries from result (works with both static and dynamic schemas)
258
+ subqueries: list[SearchQuery] = []
259
+ for query_obj in result.queries[:max_subqueries]:
260
+ # Convert dynamic schema objects to SearchQuery for consistent return type
261
+ filters: list[FilterItem] | None = None
262
+ if query_obj.filters:
263
+ filters = [FilterItem(key=f.key, value=f.value) for f in query_obj.filters]
264
+ subqueries.append(SearchQuery(text=query_obj.text, filters=filters))
265
+
266
+ # Log for observability
267
+ mlflow.set_tag("num_subqueries", len(subqueries))
268
+ mlflow.log_text(
269
+ json.dumps([sq.model_dump() for sq in subqueries], indent=2),
270
+ "decomposition.json",
271
+ )
272
+
273
+ logger.debug(
274
+ "Query decomposed",
275
+ num_subqueries=len(subqueries),
276
+ queries=[sq.text[:50] for sq in subqueries],
277
+ )
278
+
279
+ return subqueries
280
+
281
+
282
+ def rrf_merge(
283
+ results_lists: list[list[Document]],
284
+ k: int = 60,
285
+ primary_key: str | None = None,
286
+ ) -> list[Document]:
287
+ """
288
+ Merge results from multiple queries using Reciprocal Rank Fusion.
289
+
290
+ RRF is safer than raw score sorting because Databricks Vector Search
291
+ scores aren't normalized across query types (HYBRID vs ANN).
292
+
293
+ RRF Score = Σ 1 / (k + rank_i) for each result list
294
+
295
+ Args:
296
+ results_lists: List of document lists from different subqueries
297
+ k: RRF constant (lower values weight top ranks more heavily)
298
+ primary_key: Metadata key for document identity (for deduplication)
299
+
300
+ Returns:
301
+ Merged and deduplicated documents sorted by RRF score
302
+ """
303
+ if not results_lists:
304
+ return []
305
+
306
+ # Filter empty lists first
307
+ non_empty = [r for r in results_lists if r]
308
+ if not non_empty:
309
+ return []
310
+
311
+ # Single list optimization (still add RRF scores for consistency)
312
+ if len(non_empty) == 1:
313
+ docs_with_scores: list[Document] = []
314
+ for rank, doc in enumerate(non_empty[0]):
315
+ rrf_score = 1.0 / (k + rank + 1)
316
+ docs_with_scores.append(
317
+ Document(
318
+ page_content=doc.page_content,
319
+ metadata={**doc.metadata, "rrf_score": rrf_score},
320
+ )
321
+ )
322
+ return docs_with_scores
323
+
324
+ # Calculate RRF scores
325
+ # Key: document identifier, Value: (total_rrf_score, Document)
326
+ doc_scores: dict[str, tuple[float, Document]] = {}
327
+
328
+ def get_doc_id(doc: Document) -> str:
329
+ """Get unique identifier for document."""
330
+ if primary_key and primary_key in doc.metadata:
331
+ return str(doc.metadata[primary_key])
332
+ # Fallback to content hash
333
+ return str(hash(doc.page_content))
334
+
335
+ for result_list in non_empty:
336
+ for rank, doc in enumerate(result_list):
337
+ doc_id = get_doc_id(doc)
338
+ rrf_score = 1.0 / (k + rank + 1) # rank is 0-indexed
339
+
340
+ if doc_id in doc_scores:
341
+ # Accumulate RRF score for duplicates
342
+ existing_score, existing_doc = doc_scores[doc_id]
343
+ doc_scores[doc_id] = (existing_score + rrf_score, existing_doc)
344
+ else:
345
+ doc_scores[doc_id] = (rrf_score, doc)
346
+
347
+ # Sort by RRF score descending
348
+ sorted_docs = sorted(doc_scores.values(), key=lambda x: x[0], reverse=True)
349
+
350
+ # Add RRF score to metadata
351
+ merged_docs: list[Document] = []
352
+ for rrf_score, doc in sorted_docs:
353
+ merged_doc = Document(
354
+ page_content=doc.page_content,
355
+ metadata={**doc.metadata, "rrf_score": rrf_score},
356
+ )
357
+ merged_docs.append(merged_doc)
358
+
359
+ logger.debug(
360
+ "RRF merge complete",
361
+ input_lists=len(results_lists),
362
+ total_docs=sum(len(r) for r in results_lists),
363
+ unique_docs=len(merged_docs),
364
+ )
365
+
366
+ return merged_docs
@@ -0,0 +1,202 @@
1
+ """
2
+ Instruction-aware reranker for constraint-based document reordering.
3
+
4
+ Runs after FlashRank to apply user instructions and constraints to the ranking.
5
+ General-purpose component usable with any retrieval strategy.
6
+ """
7
+
8
+ from pathlib import Path
9
+ from typing import Any
10
+
11
+ import mlflow
12
+ import yaml
13
+ from langchain_core.documents import Document
14
+ from langchain_core.language_models import BaseChatModel
15
+ from loguru import logger
16
+ from mlflow.entities import SpanType
17
+
18
+ from dao_ai.config import ColumnInfo, RankingResult
19
+
20
+ # Load prompt template
21
+ _PROMPT_PATH = Path(__file__).parent.parent / "prompts" / "instruction_reranker.yaml"
22
+
23
+
24
+ def _load_prompt_template() -> dict[str, Any]:
25
+ """Load the instruction reranker prompt template from YAML."""
26
+ with open(_PROMPT_PATH) as f:
27
+ return yaml.safe_load(f)
28
+
29
+
30
+ def _format_documents(documents: list[Document]) -> str:
31
+ """Format documents for the reranking prompt."""
32
+ if not documents:
33
+ return "No documents to rerank."
34
+
35
+ formatted = []
36
+ for i, doc in enumerate(documents):
37
+ metadata_str = ", ".join(
38
+ f"{k}: {v}"
39
+ for k, v in doc.metadata.items()
40
+ if not k.startswith("_") and k not in ("rrf_score", "reranker_score")
41
+ )
42
+ content_preview = (
43
+ doc.page_content[:300] + "..."
44
+ if len(doc.page_content) > 300
45
+ else doc.page_content
46
+ )
47
+ formatted.append(
48
+ f"[{i}] Content: {content_preview}\n Metadata: {metadata_str}"
49
+ )
50
+
51
+ return "\n\n".join(formatted)
52
+
53
+
54
+ def _format_column_info(columns: list[ColumnInfo] | None) -> str:
55
+ """Format column info for the reranking prompt."""
56
+ if not columns:
57
+ return ""
58
+ return ", ".join(f"{c.name} ({c.type})" for c in columns)
59
+
60
+
61
+ @mlflow.trace(name="instruction_aware_rerank", span_type=SpanType.LLM)
62
+ def instruction_aware_rerank(
63
+ llm: BaseChatModel,
64
+ query: str,
65
+ documents: list[Document],
66
+ instructions: str | None = None,
67
+ schema_description: str | None = None,
68
+ columns: list[ColumnInfo] | None = None,
69
+ top_n: int | None = None,
70
+ ) -> list[Document]:
71
+ """
72
+ Rerank documents based on user instructions and constraints.
73
+
74
+ Args:
75
+ llm: Language model for reranking
76
+ query: User's search query
77
+ documents: Documents to rerank (typically FlashRank output)
78
+ instructions: Custom reranking instructions
79
+ schema_description: Column names and types for context
80
+ columns: Structured column info for dynamic instruction generation
81
+ top_n: Number of documents to return (None = all scored documents)
82
+
83
+ Returns:
84
+ Reranked documents with instruction_rerank_score in metadata
85
+ """
86
+ if not documents:
87
+ return []
88
+
89
+ prompt_config = _load_prompt_template()
90
+ prompt_template = prompt_config["template"]
91
+
92
+ # Build dynamic default instructions based on columns
93
+ if columns:
94
+ column_names = ", ".join(c.name for c in columns)
95
+ default_instructions = (
96
+ f"Prioritize results that best match the user's explicit constraints "
97
+ f"on these columns: {column_names}. Prefer more specific matches over general results."
98
+ )
99
+ else:
100
+ default_instructions = (
101
+ "Prioritize results that best match the user's explicit constraints. "
102
+ "Prefer more specific matches over general results."
103
+ )
104
+
105
+ # Build effective instructions - use columns for context (ignore verbose schema_description)
106
+ effective_instructions = instructions or default_instructions
107
+
108
+ # Add column context if available (simpler than full schema_description)
109
+ if columns:
110
+ effective_instructions += (
111
+ f"\n\nAvailable metadata fields: {_format_column_info(columns)}"
112
+ )
113
+
114
+ prompt = prompt_template.format(
115
+ query=query,
116
+ instructions=effective_instructions,
117
+ documents=_format_documents(documents),
118
+ )
119
+
120
+ logger.trace("Instruction reranking", query=query[:100], num_docs=len(documents))
121
+
122
+ logger.debug(
123
+ "Invoking structured output for reranking",
124
+ query=query[:50],
125
+ num_docs=len(documents),
126
+ prompt_length=len(prompt),
127
+ )
128
+
129
+ try:
130
+ structured_llm = llm.with_structured_output(RankingResult)
131
+ result: RankingResult = structured_llm.invoke(prompt)
132
+ logger.debug(
133
+ "Structured output succeeded",
134
+ num_rankings=len(result.rankings),
135
+ )
136
+ except Exception as e:
137
+ logger.warning(
138
+ "Structured output invocation failed",
139
+ error=str(e),
140
+ query=query[:50],
141
+ )
142
+ result = None
143
+ if result is None or not result.rankings:
144
+ logger.warning(
145
+ "Failed to get structured output from reranker, returning original order",
146
+ query=query[:50],
147
+ )
148
+ # Return fallback with decreasing scores based on original order
149
+ return [
150
+ Document(
151
+ page_content=doc.page_content,
152
+ metadata={
153
+ **doc.metadata,
154
+ "instruction_rerank_score": 1.0 - (i / len(documents)),
155
+ "instruction_rerank_reason": "fallback: extraction failed",
156
+ },
157
+ )
158
+ for i, doc in enumerate(documents[:top_n] if top_n else documents)
159
+ ]
160
+
161
+ # Build reranked document list
162
+ reranked: list[Document] = []
163
+ for ranking in result.rankings:
164
+ if ranking.index < 0 or ranking.index >= len(documents):
165
+ logger.warning("Invalid document index from reranker", index=ranking.index)
166
+ continue
167
+
168
+ original_doc = documents[ranking.index]
169
+ reranked_doc = Document(
170
+ page_content=original_doc.page_content,
171
+ metadata={
172
+ **original_doc.metadata,
173
+ "instruction_rerank_score": ranking.score,
174
+ "instruction_rerank_reason": ranking.reason,
175
+ },
176
+ )
177
+ reranked.append(reranked_doc)
178
+
179
+ # Sort by score (highest first) - don't rely on LLM to sort
180
+ reranked.sort(
181
+ key=lambda d: d.metadata.get("instruction_rerank_score", 0),
182
+ reverse=True,
183
+ )
184
+
185
+ # Apply top_n limit after sorting
186
+ if top_n is not None and len(reranked) > top_n:
187
+ reranked = reranked[:top_n]
188
+
189
+ # Calculate and log average score
190
+ if reranked:
191
+ avg_score = sum(
192
+ d.metadata.get("instruction_rerank_score", 0) for d in reranked
193
+ ) / len(reranked)
194
+ mlflow.set_tag("reranker.instruction_avg_score", f"{avg_score:.3f}")
195
+
196
+ logger.debug(
197
+ "Instruction reranking complete",
198
+ input_count=len(documents),
199
+ output_count=len(reranked),
200
+ )
201
+
202
+ return reranked
dao_ai/tools/router.py ADDED
@@ -0,0 +1,89 @@
1
+ """
2
+ Query router for selecting execution mode based on query characteristics.
3
+
4
+ Routes to internal execution modes within the same retriever instance:
5
+ - standard: Single similarity_search for simple queries
6
+ - instructed: Decompose -> Parallel Search -> RRF for constrained queries
7
+ """
8
+
9
+ from pathlib import Path
10
+ from typing import Any, Literal
11
+
12
+ import mlflow
13
+ import yaml
14
+ from langchain_core.language_models import BaseChatModel
15
+ from langchain_core.runnables import Runnable
16
+ from loguru import logger
17
+ from mlflow.entities import SpanType
18
+ from pydantic import BaseModel, ConfigDict, Field
19
+
20
+ # Load prompt template
21
+ _PROMPT_PATH = Path(__file__).parent.parent / "prompts" / "router.yaml"
22
+
23
+
24
+ def _load_prompt_template() -> dict[str, Any]:
25
+ """Load the router prompt template from YAML."""
26
+ with open(_PROMPT_PATH) as f:
27
+ return yaml.safe_load(f)
28
+
29
+
30
+ class RouterDecision(BaseModel):
31
+ """Classification of a search query into an execution mode.
32
+
33
+ Analyze whether the query contains explicit constraints that map to
34
+ filterable metadata columns, or is a simple semantic search.
35
+ """
36
+
37
+ model_config = ConfigDict(extra="forbid")
38
+ mode: Literal["standard", "instructed"] = Field(
39
+ description=(
40
+ "The execution mode. "
41
+ "Use 'standard' for simple semantic searches without constraints. "
42
+ "Use 'instructed' when the query contains explicit constraints "
43
+ "that can be translated to metadata filters."
44
+ )
45
+ )
46
+
47
+
48
+ @mlflow.trace(name="route_query", span_type=SpanType.LLM)
49
+ def route_query(
50
+ llm: BaseChatModel,
51
+ query: str,
52
+ schema_description: str,
53
+ ) -> Literal["standard", "instructed"]:
54
+ """
55
+ Determine the execution mode for a search query.
56
+
57
+ Args:
58
+ llm: Language model for routing decision
59
+ query: User's search query
60
+ schema_description: Column names, types, and filter syntax
61
+
62
+ Returns:
63
+ "standard" for simple queries, "instructed" for constrained queries
64
+ """
65
+ prompt_config = _load_prompt_template()
66
+ prompt_template = prompt_config["template"]
67
+
68
+ prompt = prompt_template.format(
69
+ schema_description=schema_description,
70
+ query=query,
71
+ )
72
+
73
+ logger.trace("Routing query", query=query[:100])
74
+
75
+ # Use LangChain's with_structured_output for automatic strategy selection
76
+ # (JSON schema vs tool calling based on model capabilities)
77
+ try:
78
+ structured_llm: Runnable[str, RouterDecision] = llm.with_structured_output(
79
+ RouterDecision
80
+ )
81
+ decision: RouterDecision = structured_llm.invoke(prompt)
82
+ except Exception as e:
83
+ logger.warning("Router failed, defaulting to standard mode", error=str(e))
84
+ return "standard"
85
+
86
+ logger.debug("Router decision", mode=decision.mode, query=query[:50])
87
+ mlflow.set_tag("router.mode", decision.mode)
88
+
89
+ return decision.mode