gnosisllm-knowledge 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gnosisllm_knowledge/__init__.py +152 -0
- gnosisllm_knowledge/api/__init__.py +5 -0
- gnosisllm_knowledge/api/knowledge.py +548 -0
- gnosisllm_knowledge/backends/__init__.py +26 -0
- gnosisllm_knowledge/backends/memory/__init__.py +9 -0
- gnosisllm_knowledge/backends/memory/indexer.py +384 -0
- gnosisllm_knowledge/backends/memory/searcher.py +516 -0
- gnosisllm_knowledge/backends/opensearch/__init__.py +19 -0
- gnosisllm_knowledge/backends/opensearch/agentic.py +738 -0
- gnosisllm_knowledge/backends/opensearch/config.py +195 -0
- gnosisllm_knowledge/backends/opensearch/indexer.py +499 -0
- gnosisllm_knowledge/backends/opensearch/mappings.py +255 -0
- gnosisllm_knowledge/backends/opensearch/queries.py +445 -0
- gnosisllm_knowledge/backends/opensearch/searcher.py +383 -0
- gnosisllm_knowledge/backends/opensearch/setup.py +1390 -0
- gnosisllm_knowledge/chunking/__init__.py +9 -0
- gnosisllm_knowledge/chunking/fixed.py +138 -0
- gnosisllm_knowledge/chunking/sentence.py +239 -0
- gnosisllm_knowledge/cli/__init__.py +18 -0
- gnosisllm_knowledge/cli/app.py +509 -0
- gnosisllm_knowledge/cli/commands/__init__.py +7 -0
- gnosisllm_knowledge/cli/commands/agentic.py +529 -0
- gnosisllm_knowledge/cli/commands/load.py +369 -0
- gnosisllm_knowledge/cli/commands/search.py +440 -0
- gnosisllm_knowledge/cli/commands/setup.py +228 -0
- gnosisllm_knowledge/cli/display/__init__.py +5 -0
- gnosisllm_knowledge/cli/display/service.py +555 -0
- gnosisllm_knowledge/cli/utils/__init__.py +5 -0
- gnosisllm_knowledge/cli/utils/config.py +207 -0
- gnosisllm_knowledge/core/__init__.py +87 -0
- gnosisllm_knowledge/core/domain/__init__.py +43 -0
- gnosisllm_knowledge/core/domain/document.py +240 -0
- gnosisllm_knowledge/core/domain/result.py +176 -0
- gnosisllm_knowledge/core/domain/search.py +327 -0
- gnosisllm_knowledge/core/domain/source.py +139 -0
- gnosisllm_knowledge/core/events/__init__.py +23 -0
- gnosisllm_knowledge/core/events/emitter.py +216 -0
- gnosisllm_knowledge/core/events/types.py +226 -0
- gnosisllm_knowledge/core/exceptions.py +407 -0
- gnosisllm_knowledge/core/interfaces/__init__.py +20 -0
- gnosisllm_knowledge/core/interfaces/agentic.py +136 -0
- gnosisllm_knowledge/core/interfaces/chunker.py +64 -0
- gnosisllm_knowledge/core/interfaces/fetcher.py +112 -0
- gnosisllm_knowledge/core/interfaces/indexer.py +244 -0
- gnosisllm_knowledge/core/interfaces/loader.py +102 -0
- gnosisllm_knowledge/core/interfaces/searcher.py +178 -0
- gnosisllm_knowledge/core/interfaces/setup.py +164 -0
- gnosisllm_knowledge/fetchers/__init__.py +12 -0
- gnosisllm_knowledge/fetchers/config.py +77 -0
- gnosisllm_knowledge/fetchers/http.py +167 -0
- gnosisllm_knowledge/fetchers/neoreader.py +204 -0
- gnosisllm_knowledge/loaders/__init__.py +13 -0
- gnosisllm_knowledge/loaders/base.py +399 -0
- gnosisllm_knowledge/loaders/factory.py +202 -0
- gnosisllm_knowledge/loaders/sitemap.py +285 -0
- gnosisllm_knowledge/loaders/website.py +57 -0
- gnosisllm_knowledge/py.typed +0 -0
- gnosisllm_knowledge/services/__init__.py +9 -0
- gnosisllm_knowledge/services/indexing.py +387 -0
- gnosisllm_knowledge/services/search.py +349 -0
- gnosisllm_knowledge-0.2.0.dist-info/METADATA +382 -0
- gnosisllm_knowledge-0.2.0.dist-info/RECORD +64 -0
- gnosisllm_knowledge-0.2.0.dist-info/WHEEL +4 -0
- gnosisllm_knowledge-0.2.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,529 @@
|
|
|
1
|
+
"""Agentic search commands for AI-powered knowledge retrieval.
|
|
2
|
+
|
|
3
|
+
Commands:
|
|
4
|
+
- setup: Configure agents in OpenSearch
|
|
5
|
+
- chat: Interactive agentic chat session
|
|
6
|
+
- status: Show agent configuration status
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import sys
|
|
13
|
+
from typing import TYPE_CHECKING, Any
|
|
14
|
+
|
|
15
|
+
from opensearchpy import AsyncOpenSearch
|
|
16
|
+
from rich.prompt import Prompt
|
|
17
|
+
|
|
18
|
+
from gnosisllm_knowledge.backends.opensearch.agentic import OpenSearchAgenticSearcher
|
|
19
|
+
from gnosisllm_knowledge.backends.opensearch.config import OpenSearchConfig
|
|
20
|
+
from gnosisllm_knowledge.backends.opensearch.setup import OpenSearchSetupAdapter
|
|
21
|
+
from gnosisllm_knowledge.cli.display.service import RichDisplayService, StepProgress
|
|
22
|
+
from gnosisllm_knowledge.cli.utils.config import CliConfig
|
|
23
|
+
from gnosisllm_knowledge.core.domain.search import AgentType, AgenticSearchQuery
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def _create_client(cli_config: CliConfig) -> AsyncOpenSearch:
|
|
30
|
+
"""Create async OpenSearch client from CLI config."""
|
|
31
|
+
http_auth = None
|
|
32
|
+
if cli_config.opensearch_username and cli_config.opensearch_password:
|
|
33
|
+
http_auth = (cli_config.opensearch_username, cli_config.opensearch_password)
|
|
34
|
+
|
|
35
|
+
return AsyncOpenSearch(
|
|
36
|
+
hosts=[{"host": cli_config.opensearch_host, "port": cli_config.opensearch_port}],
|
|
37
|
+
http_auth=http_auth,
|
|
38
|
+
use_ssl=cli_config.opensearch_use_ssl,
|
|
39
|
+
verify_certs=cli_config.opensearch_verify_certs,
|
|
40
|
+
ssl_show_warn=False,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _create_opensearch_config(cli_config: CliConfig) -> OpenSearchConfig:
|
|
45
|
+
"""Create OpenSearchConfig from CLI config."""
|
|
46
|
+
return OpenSearchConfig(
|
|
47
|
+
host=cli_config.opensearch_host,
|
|
48
|
+
port=cli_config.opensearch_port,
|
|
49
|
+
username=cli_config.opensearch_username,
|
|
50
|
+
password=cli_config.opensearch_password,
|
|
51
|
+
use_ssl=cli_config.opensearch_use_ssl,
|
|
52
|
+
verify_certs=cli_config.opensearch_verify_certs,
|
|
53
|
+
model_id=cli_config.opensearch_model_id,
|
|
54
|
+
openai_api_key=cli_config.openai_api_key,
|
|
55
|
+
flow_agent_id=cli_config.opensearch_flow_agent_id,
|
|
56
|
+
conversational_agent_id=cli_config.opensearch_conversational_agent_id,
|
|
57
|
+
agentic_llm_model=cli_config.agentic_llm_model,
|
|
58
|
+
agentic_max_iterations=cli_config.agentic_max_iterations,
|
|
59
|
+
agentic_timeout_seconds=cli_config.agentic_timeout_seconds,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
async def agentic_setup_command(
|
|
64
|
+
display: RichDisplayService,
|
|
65
|
+
agent_type: str = "all",
|
|
66
|
+
force: bool = False,
|
|
67
|
+
) -> None:
|
|
68
|
+
"""Setup agentic search agents.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
display: Display service for output.
|
|
72
|
+
agent_type: Agent type to setup ('flow', 'conversational', 'all').
|
|
73
|
+
force: Force recreate existing agents.
|
|
74
|
+
"""
|
|
75
|
+
cli_config = CliConfig.from_env()
|
|
76
|
+
|
|
77
|
+
display.header(
|
|
78
|
+
"GnosisLLM Agentic Search Setup",
|
|
79
|
+
"Configuring AI agents for intelligent search",
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Validate configuration
|
|
83
|
+
errors = cli_config.validate_for_agentic_setup()
|
|
84
|
+
if errors:
|
|
85
|
+
for error in errors:
|
|
86
|
+
display.error(error)
|
|
87
|
+
display.newline()
|
|
88
|
+
display.format_error_with_suggestion(
|
|
89
|
+
error="Configuration validation failed.",
|
|
90
|
+
suggestion="Ensure all required environment variables are set.",
|
|
91
|
+
command="gnosisllm-knowledge setup",
|
|
92
|
+
)
|
|
93
|
+
sys.exit(1)
|
|
94
|
+
|
|
95
|
+
# Create client and adapter
|
|
96
|
+
client = await _create_client(cli_config)
|
|
97
|
+
config = _create_opensearch_config(cli_config)
|
|
98
|
+
adapter = OpenSearchSetupAdapter(client, config)
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
# Determine which agents to setup
|
|
102
|
+
agent_types_to_setup: list[str] = []
|
|
103
|
+
if agent_type in ("flow", "all"):
|
|
104
|
+
agent_types_to_setup.append("flow")
|
|
105
|
+
if agent_type in ("conversational", "all"):
|
|
106
|
+
agent_types_to_setup.append("conversational")
|
|
107
|
+
|
|
108
|
+
# Build step list
|
|
109
|
+
steps = []
|
|
110
|
+
if "flow" in agent_types_to_setup:
|
|
111
|
+
steps.append(StepProgress("llm_connector", "Create LLM connector for reasoning"))
|
|
112
|
+
steps.append(StepProgress("llm_model", "Deploy LLM model"))
|
|
113
|
+
steps.append(StepProgress("flow_agent", "Create flow agent for fast RAG"))
|
|
114
|
+
if "conversational" in agent_types_to_setup:
|
|
115
|
+
if "flow" not in agent_types_to_setup:
|
|
116
|
+
steps.append(StepProgress("llm_connector", "Create LLM connector for reasoning"))
|
|
117
|
+
steps.append(StepProgress("llm_model", "Deploy LLM model"))
|
|
118
|
+
steps.append(StepProgress("conversational_agent", "Create conversational agent with memory"))
|
|
119
|
+
|
|
120
|
+
progress = display.progress(steps)
|
|
121
|
+
results: dict[str, str] = {}
|
|
122
|
+
step_idx = 0
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
if "flow" in agent_types_to_setup:
|
|
126
|
+
# LLM connector
|
|
127
|
+
progress.update(step_idx, "running")
|
|
128
|
+
# Connector is created as part of setup_flow_agent
|
|
129
|
+
progress.complete(step_idx)
|
|
130
|
+
step_idx += 1
|
|
131
|
+
|
|
132
|
+
# LLM model
|
|
133
|
+
progress.update(step_idx, "running")
|
|
134
|
+
progress.complete(step_idx)
|
|
135
|
+
step_idx += 1
|
|
136
|
+
|
|
137
|
+
# Flow agent
|
|
138
|
+
progress.update(step_idx, "running")
|
|
139
|
+
flow_agent_id = await adapter.setup_flow_agent()
|
|
140
|
+
results["flow_agent_id"] = flow_agent_id
|
|
141
|
+
progress.complete(step_idx)
|
|
142
|
+
step_idx += 1
|
|
143
|
+
|
|
144
|
+
if "conversational" in agent_types_to_setup:
|
|
145
|
+
if "flow" not in agent_types_to_setup:
|
|
146
|
+
# LLM connector (if not already done)
|
|
147
|
+
progress.update(step_idx, "running")
|
|
148
|
+
progress.complete(step_idx)
|
|
149
|
+
step_idx += 1
|
|
150
|
+
|
|
151
|
+
# LLM model
|
|
152
|
+
progress.update(step_idx, "running")
|
|
153
|
+
progress.complete(step_idx)
|
|
154
|
+
step_idx += 1
|
|
155
|
+
|
|
156
|
+
# Conversational agent
|
|
157
|
+
progress.update(step_idx, "running")
|
|
158
|
+
conv_agent_id = await adapter.setup_conversational_agent()
|
|
159
|
+
results["conversational_agent_id"] = conv_agent_id
|
|
160
|
+
progress.complete(step_idx)
|
|
161
|
+
step_idx += 1
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
progress.fail(step_idx, str(e))
|
|
165
|
+
progress.stop()
|
|
166
|
+
display.newline()
|
|
167
|
+
display.error(f"Setup failed: {e}")
|
|
168
|
+
sys.exit(1)
|
|
169
|
+
|
|
170
|
+
progress.stop()
|
|
171
|
+
display.newline()
|
|
172
|
+
|
|
173
|
+
# Show environment variable instructions
|
|
174
|
+
content = "[bold]Add to your .env file:[/bold]\n\n"
|
|
175
|
+
if "flow_agent_id" in results:
|
|
176
|
+
content += f" [green]OPENSEARCH_FLOW_AGENT_ID={results['flow_agent_id']}[/green]\n"
|
|
177
|
+
if "conversational_agent_id" in results:
|
|
178
|
+
content += f" [green]OPENSEARCH_CONVERSATIONAL_AGENT_ID={results['conversational_agent_id']}[/green]\n"
|
|
179
|
+
|
|
180
|
+
content += "\n[bold]Test with:[/bold]\n"
|
|
181
|
+
content += ' [dim]gnosisllm-knowledge search --mode agentic "your question"[/dim]\n'
|
|
182
|
+
content += " [dim]gnosisllm-knowledge agentic chat[/dim]"
|
|
183
|
+
|
|
184
|
+
display.panel(content, title="Agentic Setup Complete", style="success")
|
|
185
|
+
|
|
186
|
+
finally:
|
|
187
|
+
await client.close()
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
async def agentic_chat_command(
|
|
191
|
+
display: RichDisplayService,
|
|
192
|
+
index_name: str = "knowledge",
|
|
193
|
+
agent_type: str = "conversational",
|
|
194
|
+
account_id: str | None = None,
|
|
195
|
+
collection_ids: str | None = None,
|
|
196
|
+
verbose: bool = False,
|
|
197
|
+
) -> None:
|
|
198
|
+
"""Interactive agentic chat session.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
display: Display service for output.
|
|
202
|
+
index_name: Index to search.
|
|
203
|
+
agent_type: Agent type ('flow' or 'conversational').
|
|
204
|
+
account_id: Filter by account ID.
|
|
205
|
+
collection_ids: Filter by collection IDs (comma-separated).
|
|
206
|
+
verbose: Show reasoning steps.
|
|
207
|
+
"""
|
|
208
|
+
cli_config = CliConfig.from_env()
|
|
209
|
+
|
|
210
|
+
# Validate configuration
|
|
211
|
+
errors = cli_config.validate_for_agentic_search(agent_type)
|
|
212
|
+
if errors:
|
|
213
|
+
for error in errors:
|
|
214
|
+
display.error(error)
|
|
215
|
+
sys.exit(1)
|
|
216
|
+
|
|
217
|
+
display.header(
|
|
218
|
+
"GnosisLLM Agentic Chat",
|
|
219
|
+
f"Agent: {agent_type} | Index: {index_name} | Press Ctrl+C to exit",
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
client = await _create_client(cli_config)
|
|
223
|
+
config = _create_opensearch_config(cli_config)
|
|
224
|
+
searcher = OpenSearchAgenticSearcher(client, config)
|
|
225
|
+
|
|
226
|
+
conversation_id: str | None = None
|
|
227
|
+
collection_list = collection_ids.split(",") if collection_ids else None
|
|
228
|
+
|
|
229
|
+
async def start_new_conversation() -> str | None:
|
|
230
|
+
"""Create a new conversation memory for multi-turn chat."""
|
|
231
|
+
if agent_type == "conversational":
|
|
232
|
+
return await searcher.create_conversation(
|
|
233
|
+
name="CLI Chat Session",
|
|
234
|
+
account_id=account_id,
|
|
235
|
+
)
|
|
236
|
+
return None
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
# Show help
|
|
240
|
+
display.info("[dim]Commands: /new (new conversation), /quit (exit), /help (show help)[/dim]")
|
|
241
|
+
|
|
242
|
+
# Create initial conversation memory for conversational agent
|
|
243
|
+
if agent_type == "conversational":
|
|
244
|
+
conversation_id = await start_new_conversation()
|
|
245
|
+
if conversation_id:
|
|
246
|
+
display.info(f"[dim]Conversation started (memory_id: {conversation_id[:8]}...)[/dim]")
|
|
247
|
+
else:
|
|
248
|
+
display.info("[dim]Conversation mode (memory will be created automatically)[/dim]")
|
|
249
|
+
|
|
250
|
+
while True:
|
|
251
|
+
try:
|
|
252
|
+
display.newline()
|
|
253
|
+
user_input = Prompt.ask("[bold cyan]You[/bold cyan]")
|
|
254
|
+
|
|
255
|
+
if not user_input:
|
|
256
|
+
continue
|
|
257
|
+
|
|
258
|
+
# Handle special commands
|
|
259
|
+
if user_input.lower() in ("/new", "/clear"):
|
|
260
|
+
if conversation_id:
|
|
261
|
+
await searcher.clear_conversation(conversation_id)
|
|
262
|
+
# Create a fresh conversation memory
|
|
263
|
+
conversation_id = await start_new_conversation()
|
|
264
|
+
if conversation_id:
|
|
265
|
+
display.info(f"Started new conversation (memory_id: {conversation_id[:8]}...)")
|
|
266
|
+
else:
|
|
267
|
+
display.info("Started new conversation.")
|
|
268
|
+
continue
|
|
269
|
+
|
|
270
|
+
if user_input.lower() in ("/quit", "/exit", "/q"):
|
|
271
|
+
break
|
|
272
|
+
|
|
273
|
+
if user_input.lower() == "/help":
|
|
274
|
+
_show_chat_help(display)
|
|
275
|
+
continue
|
|
276
|
+
|
|
277
|
+
# Build agentic query
|
|
278
|
+
query = AgenticSearchQuery(
|
|
279
|
+
text=user_input,
|
|
280
|
+
agent_type=AgentType.CONVERSATIONAL if agent_type == "conversational" else AgentType.FLOW,
|
|
281
|
+
conversation_id=conversation_id,
|
|
282
|
+
collection_ids=collection_list,
|
|
283
|
+
account_id=account_id,
|
|
284
|
+
include_reasoning=verbose,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
# Execute search with loading indicator
|
|
288
|
+
with display.loading_spinner("Thinking..."):
|
|
289
|
+
result = await searcher.agentic_search(query, index_name)
|
|
290
|
+
|
|
291
|
+
# Update conversation ID if agent returns one (prefer agent's memory_id)
|
|
292
|
+
if result.conversation_id:
|
|
293
|
+
conversation_id = result.conversation_id
|
|
294
|
+
|
|
295
|
+
# Display answer
|
|
296
|
+
display.newline()
|
|
297
|
+
if result.answer:
|
|
298
|
+
display.console.print(f"[bold green]Assistant[/bold green]: {result.answer}")
|
|
299
|
+
else:
|
|
300
|
+
display.warning("No answer generated.")
|
|
301
|
+
|
|
302
|
+
# Show reasoning steps if verbose
|
|
303
|
+
if verbose and result.reasoning_steps:
|
|
304
|
+
display.newline()
|
|
305
|
+
display.console.print("[dim]Reasoning:[/dim]")
|
|
306
|
+
for step in result.reasoning_steps:
|
|
307
|
+
display.console.print(f" [dim]→ {step.tool}: {step.action}[/dim]")
|
|
308
|
+
|
|
309
|
+
# Show sources if available
|
|
310
|
+
if result.items:
|
|
311
|
+
display.newline()
|
|
312
|
+
display.info(f"[dim]Sources: {len(result.items)} documents ({result.duration_ms:.0f}ms)[/dim]")
|
|
313
|
+
for i, item in enumerate(result.items[:3], 1):
|
|
314
|
+
display.console.print(f" [dim]{i}. {item.title or 'Untitled'}[/dim]")
|
|
315
|
+
if item.url:
|
|
316
|
+
display.console.print(f" [blue]{item.url[:60]}[/blue]")
|
|
317
|
+
|
|
318
|
+
# Show conversation ID in verbose mode
|
|
319
|
+
if verbose and conversation_id:
|
|
320
|
+
display.console.print(f" [dim]Memory ID: {conversation_id}[/dim]")
|
|
321
|
+
|
|
322
|
+
except KeyboardInterrupt:
|
|
323
|
+
display.newline()
|
|
324
|
+
display.info("Goodbye!")
|
|
325
|
+
break
|
|
326
|
+
except Exception as e:
|
|
327
|
+
display.error(f"Error: {e}")
|
|
328
|
+
|
|
329
|
+
finally:
|
|
330
|
+
await client.close()
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
async def agentic_status_command(
|
|
334
|
+
display: RichDisplayService,
|
|
335
|
+
) -> None:
|
|
336
|
+
"""Show agentic search configuration status."""
|
|
337
|
+
cli_config = CliConfig.from_env()
|
|
338
|
+
|
|
339
|
+
display.header(
|
|
340
|
+
"GnosisLLM Agentic Status",
|
|
341
|
+
"Agent configuration and health",
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
display.agentic_status(
|
|
345
|
+
flow_agent_id=cli_config.opensearch_flow_agent_id,
|
|
346
|
+
conversational_agent_id=cli_config.opensearch_conversational_agent_id,
|
|
347
|
+
embedding_model_id=cli_config.opensearch_model_id,
|
|
348
|
+
llm_model=cli_config.agentic_llm_model,
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
# If any agent is configured, try to get status from OpenSearch
|
|
352
|
+
if cli_config.has_agentic_agents and cli_config.opensearch_model_id:
|
|
353
|
+
display.newline()
|
|
354
|
+
|
|
355
|
+
client = await _create_client(cli_config)
|
|
356
|
+
config = _create_opensearch_config(cli_config)
|
|
357
|
+
searcher = OpenSearchAgenticSearcher(client, config)
|
|
358
|
+
|
|
359
|
+
try:
|
|
360
|
+
# Check flow agent
|
|
361
|
+
if cli_config.opensearch_flow_agent_id:
|
|
362
|
+
status = await searcher.get_agent_status(cli_config.opensearch_flow_agent_id)
|
|
363
|
+
if status:
|
|
364
|
+
display.success(f"Flow agent '{status.get('name')}' is active")
|
|
365
|
+
else:
|
|
366
|
+
display.warning("Flow agent not found in OpenSearch")
|
|
367
|
+
|
|
368
|
+
# Check conversational agent
|
|
369
|
+
if cli_config.opensearch_conversational_agent_id:
|
|
370
|
+
status = await searcher.get_agent_status(cli_config.opensearch_conversational_agent_id)
|
|
371
|
+
if status:
|
|
372
|
+
display.success(f"Conversational agent '{status.get('name')}' is active")
|
|
373
|
+
else:
|
|
374
|
+
display.warning("Conversational agent not found in OpenSearch")
|
|
375
|
+
|
|
376
|
+
except Exception as e:
|
|
377
|
+
display.warning(f"Could not verify agent status: {e}")
|
|
378
|
+
finally:
|
|
379
|
+
await client.close()
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
async def agentic_search_command(
|
|
383
|
+
display: RichDisplayService,
|
|
384
|
+
query: str,
|
|
385
|
+
index_name: str = "knowledge",
|
|
386
|
+
agent_type: str = "flow",
|
|
387
|
+
account_id: str | None = None,
|
|
388
|
+
collection_ids: str | None = None,
|
|
389
|
+
source_ids: str | None = None,
|
|
390
|
+
limit: int = 5,
|
|
391
|
+
json_output: bool = False,
|
|
392
|
+
verbose: bool = False,
|
|
393
|
+
) -> dict[str, Any] | None:
|
|
394
|
+
"""Execute agentic search.
|
|
395
|
+
|
|
396
|
+
Args:
|
|
397
|
+
display: Display service for output.
|
|
398
|
+
query: Search query text.
|
|
399
|
+
index_name: Index to search.
|
|
400
|
+
agent_type: Agent type ('flow' or 'conversational').
|
|
401
|
+
account_id: Filter by account ID.
|
|
402
|
+
collection_ids: Filter by collection IDs (comma-separated).
|
|
403
|
+
source_ids: Filter by source IDs (comma-separated).
|
|
404
|
+
limit: Maximum source documents to retrieve.
|
|
405
|
+
json_output: Output as JSON for scripting.
|
|
406
|
+
verbose: Show reasoning steps.
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
Search result dict or None if failed.
|
|
410
|
+
"""
|
|
411
|
+
cli_config = CliConfig.from_env()
|
|
412
|
+
|
|
413
|
+
# Validate configuration
|
|
414
|
+
errors = cli_config.validate_for_agentic_search(agent_type)
|
|
415
|
+
if errors:
|
|
416
|
+
if json_output:
|
|
417
|
+
print(json.dumps({"error": errors[0]}))
|
|
418
|
+
else:
|
|
419
|
+
for error in errors:
|
|
420
|
+
display.error(error)
|
|
421
|
+
return None
|
|
422
|
+
|
|
423
|
+
# Parse filter lists
|
|
424
|
+
collection_list = collection_ids.split(",") if collection_ids else None
|
|
425
|
+
source_list = source_ids.split(",") if source_ids else None
|
|
426
|
+
|
|
427
|
+
client = await _create_client(cli_config)
|
|
428
|
+
config = _create_opensearch_config(cli_config)
|
|
429
|
+
searcher = OpenSearchAgenticSearcher(client, config)
|
|
430
|
+
|
|
431
|
+
try:
|
|
432
|
+
if not json_output:
|
|
433
|
+
display.header(
|
|
434
|
+
"GnosisLLM Agentic Search",
|
|
435
|
+
f"Query: {query[:50]}{'...' if len(query) > 50 else ''}",
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# Build query
|
|
439
|
+
agentic_query = AgenticSearchQuery(
|
|
440
|
+
text=query,
|
|
441
|
+
agent_type=AgentType.CONVERSATIONAL if agent_type == "conversational" else AgentType.FLOW,
|
|
442
|
+
collection_ids=collection_list,
|
|
443
|
+
source_ids=source_list,
|
|
444
|
+
account_id=account_id,
|
|
445
|
+
limit=limit,
|
|
446
|
+
include_reasoning=verbose,
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# Execute with loading spinner
|
|
450
|
+
if not json_output:
|
|
451
|
+
with display.loading_spinner("Agent thinking..."):
|
|
452
|
+
result = await searcher.agentic_search(agentic_query, index_name)
|
|
453
|
+
else:
|
|
454
|
+
result = await searcher.agentic_search(agentic_query, index_name)
|
|
455
|
+
|
|
456
|
+
# JSON output
|
|
457
|
+
if json_output:
|
|
458
|
+
output = {
|
|
459
|
+
"query": result.query,
|
|
460
|
+
"mode": "agentic",
|
|
461
|
+
"agent_type": result.agent_type.value,
|
|
462
|
+
"answer": result.answer,
|
|
463
|
+
"total_hits": result.total_hits,
|
|
464
|
+
"duration_ms": result.duration_ms,
|
|
465
|
+
"conversation_id": result.conversation_id,
|
|
466
|
+
"reasoning_steps": [
|
|
467
|
+
{
|
|
468
|
+
"tool": step.tool,
|
|
469
|
+
"action": step.action,
|
|
470
|
+
"input": step.input,
|
|
471
|
+
"output": step.output[:200] if step.output else None,
|
|
472
|
+
}
|
|
473
|
+
for step in result.reasoning_steps
|
|
474
|
+
] if verbose else [],
|
|
475
|
+
"sources": [
|
|
476
|
+
{
|
|
477
|
+
"title": item.title,
|
|
478
|
+
"url": item.url,
|
|
479
|
+
"score": item.score,
|
|
480
|
+
"content": item.content[:300] if not verbose else item.content,
|
|
481
|
+
}
|
|
482
|
+
for item in result.items
|
|
483
|
+
],
|
|
484
|
+
}
|
|
485
|
+
print(json.dumps(output, indent=2, default=str))
|
|
486
|
+
return output
|
|
487
|
+
|
|
488
|
+
# Human-readable output
|
|
489
|
+
display.agentic_result(
|
|
490
|
+
answer=result.answer,
|
|
491
|
+
sources=result.items,
|
|
492
|
+
reasoning_steps=result.reasoning_steps if verbose else None,
|
|
493
|
+
duration_ms=result.duration_ms,
|
|
494
|
+
query=result.query,
|
|
495
|
+
conversation_id=result.conversation_id,
|
|
496
|
+
verbose=verbose,
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
return {"answer": result.answer, "sources": len(result.items)}
|
|
500
|
+
|
|
501
|
+
except Exception as e:
|
|
502
|
+
if json_output:
|
|
503
|
+
print(json.dumps({"error": str(e)}))
|
|
504
|
+
else:
|
|
505
|
+
display.format_error_with_suggestion(
|
|
506
|
+
error=f"Agentic search failed: {e}",
|
|
507
|
+
suggestion="Check that agents are configured and OpenSearch ML plugin is enabled.",
|
|
508
|
+
command="gnosisllm-knowledge agentic status",
|
|
509
|
+
)
|
|
510
|
+
return None
|
|
511
|
+
|
|
512
|
+
finally:
|
|
513
|
+
await client.close()
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
def _show_chat_help(display: RichDisplayService) -> None:
|
|
517
|
+
"""Show chat help message."""
|
|
518
|
+
help_content = """[bold]Available Commands:[/bold]
|
|
519
|
+
|
|
520
|
+
[cyan]/new[/cyan], [cyan]/clear[/cyan] - Start a new conversation
|
|
521
|
+
[cyan]/quit[/cyan], [cyan]/exit[/cyan] - Exit chat
|
|
522
|
+
[cyan]/help[/cyan] - Show this help message
|
|
523
|
+
|
|
524
|
+
[bold]Tips:[/bold]
|
|
525
|
+
- Ask follow-up questions to continue the conversation
|
|
526
|
+
- The agent remembers context from previous messages
|
|
527
|
+
- Use specific questions for better answers"""
|
|
528
|
+
|
|
529
|
+
display.panel(help_content, title="Chat Help", style="info")
|