haiku.rag-slim 0.16.0__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of haiku.rag-slim might be problematic. Click here for more details.

Files changed (94) hide show
  1. haiku/rag/app.py +430 -72
  2. haiku/rag/chunkers/__init__.py +31 -0
  3. haiku/rag/chunkers/base.py +31 -0
  4. haiku/rag/chunkers/docling_local.py +164 -0
  5. haiku/rag/chunkers/docling_serve.py +179 -0
  6. haiku/rag/cli.py +207 -24
  7. haiku/rag/cli_chat.py +489 -0
  8. haiku/rag/client.py +1251 -266
  9. haiku/rag/config/__init__.py +16 -10
  10. haiku/rag/config/loader.py +5 -44
  11. haiku/rag/config/models.py +126 -17
  12. haiku/rag/converters/__init__.py +31 -0
  13. haiku/rag/converters/base.py +63 -0
  14. haiku/rag/converters/docling_local.py +193 -0
  15. haiku/rag/converters/docling_serve.py +229 -0
  16. haiku/rag/converters/text_utils.py +237 -0
  17. haiku/rag/embeddings/__init__.py +123 -24
  18. haiku/rag/embeddings/voyageai.py +175 -20
  19. haiku/rag/graph/__init__.py +0 -11
  20. haiku/rag/graph/agui/__init__.py +8 -2
  21. haiku/rag/graph/agui/cli_renderer.py +1 -1
  22. haiku/rag/graph/agui/emitter.py +219 -31
  23. haiku/rag/graph/agui/server.py +20 -62
  24. haiku/rag/graph/agui/stream.py +1 -2
  25. haiku/rag/graph/research/__init__.py +5 -2
  26. haiku/rag/graph/research/dependencies.py +12 -126
  27. haiku/rag/graph/research/graph.py +390 -135
  28. haiku/rag/graph/research/models.py +91 -112
  29. haiku/rag/graph/research/prompts.py +99 -91
  30. haiku/rag/graph/research/state.py +35 -27
  31. haiku/rag/inspector/__init__.py +8 -0
  32. haiku/rag/inspector/app.py +259 -0
  33. haiku/rag/inspector/widgets/__init__.py +6 -0
  34. haiku/rag/inspector/widgets/chunk_list.py +100 -0
  35. haiku/rag/inspector/widgets/context_modal.py +89 -0
  36. haiku/rag/inspector/widgets/detail_view.py +130 -0
  37. haiku/rag/inspector/widgets/document_list.py +75 -0
  38. haiku/rag/inspector/widgets/info_modal.py +209 -0
  39. haiku/rag/inspector/widgets/search_modal.py +183 -0
  40. haiku/rag/inspector/widgets/visual_modal.py +126 -0
  41. haiku/rag/mcp.py +106 -102
  42. haiku/rag/monitor.py +33 -9
  43. haiku/rag/providers/__init__.py +5 -0
  44. haiku/rag/providers/docling_serve.py +108 -0
  45. haiku/rag/qa/__init__.py +12 -10
  46. haiku/rag/qa/agent.py +43 -61
  47. haiku/rag/qa/prompts.py +35 -57
  48. haiku/rag/reranking/__init__.py +9 -6
  49. haiku/rag/reranking/base.py +1 -1
  50. haiku/rag/reranking/cohere.py +5 -4
  51. haiku/rag/reranking/mxbai.py +5 -2
  52. haiku/rag/reranking/vllm.py +3 -4
  53. haiku/rag/reranking/zeroentropy.py +6 -5
  54. haiku/rag/store/__init__.py +2 -1
  55. haiku/rag/store/engine.py +242 -42
  56. haiku/rag/store/exceptions.py +4 -0
  57. haiku/rag/store/models/__init__.py +8 -2
  58. haiku/rag/store/models/chunk.py +190 -0
  59. haiku/rag/store/models/document.py +46 -0
  60. haiku/rag/store/repositories/chunk.py +141 -121
  61. haiku/rag/store/repositories/document.py +25 -84
  62. haiku/rag/store/repositories/settings.py +11 -14
  63. haiku/rag/store/upgrades/__init__.py +19 -3
  64. haiku/rag/store/upgrades/v0_10_1.py +1 -1
  65. haiku/rag/store/upgrades/v0_19_6.py +65 -0
  66. haiku/rag/store/upgrades/v0_20_0.py +68 -0
  67. haiku/rag/store/upgrades/v0_23_1.py +100 -0
  68. haiku/rag/store/upgrades/v0_9_3.py +3 -3
  69. haiku/rag/utils.py +371 -146
  70. {haiku_rag_slim-0.16.0.dist-info → haiku_rag_slim-0.24.0.dist-info}/METADATA +15 -12
  71. haiku_rag_slim-0.24.0.dist-info/RECORD +78 -0
  72. {haiku_rag_slim-0.16.0.dist-info → haiku_rag_slim-0.24.0.dist-info}/WHEEL +1 -1
  73. haiku/rag/chunker.py +0 -65
  74. haiku/rag/embeddings/base.py +0 -25
  75. haiku/rag/embeddings/ollama.py +0 -28
  76. haiku/rag/embeddings/openai.py +0 -26
  77. haiku/rag/embeddings/vllm.py +0 -29
  78. haiku/rag/graph/agui/events.py +0 -254
  79. haiku/rag/graph/common/__init__.py +0 -5
  80. haiku/rag/graph/common/models.py +0 -42
  81. haiku/rag/graph/common/nodes.py +0 -265
  82. haiku/rag/graph/common/prompts.py +0 -46
  83. haiku/rag/graph/common/utils.py +0 -44
  84. haiku/rag/graph/deep_qa/__init__.py +0 -1
  85. haiku/rag/graph/deep_qa/dependencies.py +0 -27
  86. haiku/rag/graph/deep_qa/graph.py +0 -243
  87. haiku/rag/graph/deep_qa/models.py +0 -20
  88. haiku/rag/graph/deep_qa/prompts.py +0 -59
  89. haiku/rag/graph/deep_qa/state.py +0 -56
  90. haiku/rag/graph/research/common.py +0 -87
  91. haiku/rag/reader.py +0 -135
  92. haiku_rag_slim-0.16.0.dist-info/RECORD +0 -71
  93. {haiku_rag_slim-0.16.0.dist-info → haiku_rag_slim-0.24.0.dist-info}/entry_points.txt +0 -0
  94. {haiku_rag_slim-0.16.0.dist-info → haiku_rag_slim-0.24.0.dist-info}/licenses/LICENSE +0 -0
haiku/rag/cli_chat.py ADDED
@@ -0,0 +1,489 @@
1
+ """Interactive CLI chat loop for research graph with human-in-the-loop."""
2
+
3
+ import asyncio
4
+ import json
5
+
6
+ from pydantic_ai import Agent
7
+ from rich.console import Console
8
+ from rich.markdown import Markdown
9
+ from rich.panel import Panel
10
+ from rich.prompt import Prompt
11
+
12
+ from haiku.rag.client import HaikuRAG
13
+ from haiku.rag.config import get_config
14
+ from haiku.rag.config.models import AppConfig
15
+ from haiku.rag.graph.agui.emitter import AGUIEmitter
16
+ from haiku.rag.graph.research.dependencies import ResearchContext
17
+ from haiku.rag.graph.research.graph import build_research_graph
18
+ from haiku.rag.graph.research.models import ResearchReport
19
+ from haiku.rag.graph.research.state import HumanDecision, ResearchDeps, ResearchState
20
+ from haiku.rag.utils import get_model
21
+
22
+ INITIAL_CHAT_PROMPT = """You are a research assistant. The user hasn't started a research task yet.
23
+
24
+ You can:
25
+ 1. Chat with the user - greet them, answer questions about what you can do
26
+ 2. Detect when they want to research something
27
+
28
+ ## Actions:
29
+ - "chat": User is chatting, greeting, or asking questions (set message with your response)
30
+ - "research": User wants to research a topic (extract the research question into research_question)
31
+
32
+ ## Guidelines:
33
+ - If the user provides a clear research question or topic, set action="research" and extract the question
34
+ - If the user is just chatting or asking what you can do, set action="chat" and respond helpfully
35
+ - Be friendly and explain you can help them research topics by searching a knowledge base
36
+
37
+ Examples:
38
+ - "hi" → action="chat", message="Hello! I'm a research assistant. I can help you research topics by searching through documents and synthesizing findings. What would you like to explore?"
39
+ - "what can you do?" → action="chat", message="I help you conduct research! Give me a question or topic, and I'll break it into sub-questions, search for answers, and synthesize a report. What are you curious about?"
40
+ - "tell me about Python's memory management" → action="research", research_question="How does Python's memory management work?"
41
+ - "I want to understand how RAG systems work" → action="research", research_question="How do RAG (Retrieval-Augmented Generation) systems work?"
42
+ """
43
+
44
+ RESEARCH_ASSISTANT_PROMPT = """You are a research assistant helping the user conduct research on a topic.
45
+
46
+ You are at a decision point in the research workflow. You can:
47
+ 1. Chat with the user - answer questions, discuss the research, make suggestions
48
+ 2. Take workflow actions when the user requests them
49
+
50
+ ## Workflow Actions (set in the action field):
51
+ - "search": Search the pending questions (user says: "go", "search", "yes", "continue", "looks good")
52
+ - "synthesize": Generate final report (user says: "done", "finish", "synthesize", "generate report")
53
+ - "add_questions": Add NEW research questions to the existing list
54
+ - "modify_questions": REPLACE all pending questions with a new list (use when user wants to remove, keep only certain questions, or change the questions)
55
+ - "chat": Have a conversation without modifying questions
56
+
57
+ ## IMPORTANT - Modifying Questions:
58
+ - "use only the first question" → action="modify_questions", questions=[first question from the list]
59
+ - "drop questions 2 and 3" → action="modify_questions", questions=[remaining questions]
60
+ - "keep only questions about X" → action="modify_questions", questions=[filtered list]
61
+ - "remove the duplicate" → action="modify_questions", questions=[deduplicated list]
62
+ - When user wants to reduce/filter/keep-only, use "modify_questions" NOT "chat"
63
+
64
+ ## Guidelines:
65
+ - If the user wants to modify the question list in ANY way (remove, keep only, filter), use "modify_questions"
66
+ - For "modify_questions", include ALL questions that should remain in the questions field
67
+ - You can combine "chat" with a message to explain what you're doing
68
+ - If just chatting without changes, set action="chat" and provide helpful response in message
69
+ """
70
+
71
+
72
+ async def initial_chat(
73
+ user_message: str,
74
+ config: AppConfig,
75
+ ) -> HumanDecision:
76
+ """Handle initial conversation before research starts.
77
+
78
+ Args:
79
+ user_message: The user's message
80
+ config: Application configuration
81
+
82
+ Returns:
83
+ HumanDecision with chat response or research question
84
+ """
85
+ agent: Agent[None, HumanDecision] = Agent(
86
+ model=get_model(config.research.model, config),
87
+ output_type=HumanDecision,
88
+ instructions=INITIAL_CHAT_PROMPT,
89
+ retries=2,
90
+ )
91
+
92
+ result = await agent.run(user_message)
93
+ return result.output
94
+
95
+
96
+ async def interpret_user_decision(
97
+ user_message: str,
98
+ sub_questions: list[str],
99
+ qa_responses: list[dict],
100
+ config: AppConfig,
101
+ ) -> HumanDecision:
102
+ """Interpret a natural language user message into a HumanDecision.
103
+
104
+ Args:
105
+ user_message: The user's natural language input
106
+ sub_questions: Current sub-questions pending search
107
+ qa_responses: Answers already collected
108
+ config: Application configuration
109
+
110
+ Returns:
111
+ HumanDecision with the interpreted action, questions, and/or message
112
+ """
113
+ agent: Agent[None, HumanDecision] = Agent(
114
+ model=get_model(config.research.model, config),
115
+ output_type=HumanDecision,
116
+ instructions=RESEARCH_ASSISTANT_PROMPT,
117
+ retries=2,
118
+ )
119
+
120
+ # Build context with full research state
121
+ answers_summary = ""
122
+ if qa_responses:
123
+ answers_parts = []
124
+ for qa in qa_responses:
125
+ conf = f"{qa['confidence']:.0%}" if qa.get("confidence") else "N/A"
126
+ answers_parts.append(
127
+ f"Q: {qa['query']}\nA: {qa['answer'][:300]}... (confidence: {conf})"
128
+ )
129
+ answers_summary = "\n\n".join(answers_parts)
130
+
131
+ context = f"""Current research state:
132
+ - Answers collected: {len(qa_responses)}
133
+ - Pending questions to search: {len(sub_questions)}
134
+
135
+ Pending questions:
136
+ {chr(10).join(f"- {q}" for q in sub_questions) if sub_questions else "(none)"}
137
+
138
+ {f"Collected answers:{chr(10)}{answers_summary}" if answers_summary else ""}
139
+
140
+ User message: {user_message}"""
141
+
142
+ result = await agent.run(context)
143
+ return result.output
144
+
145
+
146
+ async def run_interactive_research(
147
+ question: str,
148
+ client: HaikuRAG,
149
+ config: AppConfig | None = None,
150
+ search_filter: str | None = None,
151
+ ) -> ResearchReport:
152
+ """Run interactive research with human-in-the-loop decision points.
153
+
154
+ Args:
155
+ question: The research question
156
+ client: HaikuRAG client for document operations
157
+ config: Application configuration (uses global config if None)
158
+ search_filter: Optional SQL WHERE clause to filter documents
159
+
160
+ Returns:
161
+ ResearchReport with the final synthesis
162
+ """
163
+ config = config or get_config()
164
+ console = Console()
165
+
166
+ # Build interactive graph
167
+ graph = build_research_graph(config=config, include_plan=True, interactive=True)
168
+
169
+ # Create async queue for human input
170
+ human_input_queue: asyncio.Queue[HumanDecision] = asyncio.Queue()
171
+
172
+ # Create emitter
173
+ emitter: AGUIEmitter[ResearchState, ResearchReport] = AGUIEmitter()
174
+
175
+ # Create deps with queue
176
+ deps = ResearchDeps(
177
+ client=client,
178
+ agui_emitter=emitter,
179
+ human_input_queue=human_input_queue,
180
+ interactive=True,
181
+ )
182
+
183
+ # Create initial state
184
+ context = ResearchContext(original_question=question)
185
+ state = ResearchState.from_config(context=context, config=config)
186
+ state.search_filter = search_filter
187
+
188
+ # Start the run
189
+ emitter.start_run(state)
190
+
191
+ # Run graph in background task
192
+ async def run_graph() -> ResearchReport:
193
+ try:
194
+ result = await graph.run(state=state, deps=deps)
195
+ emitter.finish_run(result)
196
+ return result
197
+ except Exception as e:
198
+ emitter.error(e)
199
+ raise
200
+
201
+ graph_task = asyncio.create_task(run_graph())
202
+
203
+ # Process events and handle human decision points
204
+ try:
205
+ async for event in emitter:
206
+ event_type = event.get("type")
207
+
208
+ if event_type == "STEP_STARTED":
209
+ step_name = event.get("stepName", "")
210
+ if step_name == "plan":
211
+ console.print("[dim]Planning research...[/dim]")
212
+ elif step_name.startswith("search:"):
213
+ query = step_name.replace("search: ", "")
214
+ console.print(f"[dim]Searching: {query}[/dim]")
215
+ elif step_name == "synthesize":
216
+ console.print("[dim]Synthesizing report...[/dim]")
217
+
218
+ elif event_type == "STATE_SNAPSHOT" or event_type == "STATE_DELTA":
219
+ # State updated, could show progress
220
+ pass
221
+
222
+ elif event_type == "TOOL_CALL_START":
223
+ tool_name = event.get("toolCallName")
224
+ if tool_name == "human_decision":
225
+ # Will get args in next event
226
+ pass
227
+
228
+ elif event_type == "TOOL_CALL_ARGS":
229
+ delta = event.get("delta", "{}")
230
+ args = json.loads(delta) if isinstance(delta, str) else delta
231
+ original_question = args.get("original_question", "")
232
+ sub_questions = list(args.get("sub_questions", []))
233
+ qa_responses = args.get("qa_responses", [])
234
+ iterations = args.get("iterations", 0)
235
+
236
+ # Loop for modifications until user wants to proceed
237
+ while True:
238
+ # Show research state
239
+ console.print()
240
+ console.print(
241
+ Panel(
242
+ f"[bold]{original_question}[/bold]",
243
+ title="Research Question",
244
+ border_style="blue",
245
+ )
246
+ )
247
+
248
+ # Show collected answers
249
+ if qa_responses:
250
+ answers_text = []
251
+ for i, qa in enumerate(qa_responses, 1):
252
+ conf = (
253
+ f"{qa['confidence']:.0%}"
254
+ if qa.get("confidence")
255
+ else "N/A"
256
+ )
257
+ answer_preview = (
258
+ qa["answer"][:200] + "..."
259
+ if len(qa["answer"]) > 200
260
+ else qa["answer"]
261
+ )
262
+ answers_text.append(
263
+ f"[cyan]{i}. {qa['query']}[/cyan]\n"
264
+ f" [dim]Confidence: {conf} | Citations: {qa.get('citations_count', 0)}[/dim]\n"
265
+ f" {answer_preview}"
266
+ )
267
+ console.print(
268
+ Panel(
269
+ "\n\n".join(answers_text),
270
+ title=f"Answers Collected ({len(qa_responses)})",
271
+ border_style="green",
272
+ )
273
+ )
274
+
275
+ # Show pending questions
276
+ if sub_questions:
277
+ console.print(
278
+ Panel(
279
+ "\n".join(
280
+ f"{i + 1}. {q}" for i, q in enumerate(sub_questions)
281
+ ),
282
+ title="Pending Questions to Search",
283
+ border_style="cyan",
284
+ )
285
+ )
286
+ else:
287
+ console.print("[dim]No pending questions.[/dim]")
288
+
289
+ if iterations > 0:
290
+ console.print(f"[dim]Iteration: {iterations}[/dim]")
291
+
292
+ # Prompt user with context-aware hints
293
+ console.print()
294
+ hints = []
295
+ if sub_questions:
296
+ hints.append("search questions")
297
+ hints.append("modify questions")
298
+ if qa_responses:
299
+ hints.append("generate report")
300
+ hint_text = f" [dim]({', '.join(hints)})[/dim]" if hints else ""
301
+ user_input = Prompt.ask(
302
+ f"[bold]What would you like to do?[/bold]{hint_text}"
303
+ )
304
+
305
+ # Chat with research assistant
306
+ console.print("[dim]Thinking...[/dim]")
307
+ decision = await interpret_user_decision(
308
+ user_message=user_input,
309
+ sub_questions=sub_questions,
310
+ qa_responses=qa_responses,
311
+ config=config,
312
+ )
313
+
314
+ # Handle modifications and chat locally, continue loop
315
+ if decision.action == "chat":
316
+ if decision.message:
317
+ console.print(
318
+ f"\n[bold cyan]Assistant:[/bold cyan] {decision.message}"
319
+ )
320
+ continue
321
+ elif decision.action == "add_questions" and decision.questions:
322
+ sub_questions.extend(decision.questions)
323
+ console.print(
324
+ f"[green]Added {len(decision.questions)} question(s)[/green]"
325
+ )
326
+ continue
327
+ elif decision.action == "modify_questions" and decision.questions:
328
+ sub_questions = list(decision.questions)
329
+ console.print(
330
+ f"[green]Replaced with {len(decision.questions)} question(s)[/green]"
331
+ )
332
+ continue
333
+
334
+ # User wants to proceed - send final decision
335
+ action_display = {
336
+ "search": "Searching questions",
337
+ "synthesize": "Generating report",
338
+ }
339
+ console.print(
340
+ f"[dim]→ {action_display.get(decision.action, decision.action)}[/dim]"
341
+ )
342
+
343
+ # Include any accumulated question changes
344
+ if decision.action == "search":
345
+ decision = HumanDecision(
346
+ action="modify_questions", questions=sub_questions
347
+ )
348
+
349
+ await human_input_queue.put(decision)
350
+ break
351
+
352
+ elif event_type == "TEXT_MESSAGE_CHUNK":
353
+ # Log message from graph
354
+ message = event.get("delta", "")
355
+ if message:
356
+ console.print(f"[dim]{message}[/dim]")
357
+
358
+ elif event_type == "RUN_FINISHED":
359
+ break
360
+
361
+ elif event_type == "RUN_ERROR":
362
+ error_msg = event.get("message", "Unknown error")
363
+ console.print(f"[red]Error: {error_msg}[/red]")
364
+ break
365
+
366
+ # Wait for graph to complete
367
+ report = await graph_task
368
+ return report
369
+
370
+ except Exception as e:
371
+ graph_task.cancel()
372
+ raise e
373
+ finally:
374
+ await emitter.close()
375
+
376
+
377
+ async def run_chat_loop(
378
+ client: HaikuRAG,
379
+ config: AppConfig | None = None,
380
+ search_filter: str | None = None,
381
+ question: str | None = None,
382
+ ) -> None:
383
+ """Run an interactive chat loop for research.
384
+
385
+ Args:
386
+ client: HaikuRAG client for document operations
387
+ config: Application configuration (uses global config if None)
388
+ search_filter: Optional SQL WHERE clause to filter documents
389
+ question: Optional initial research question (skips initial chat if provided)
390
+ """
391
+ config = config or get_config()
392
+ console = Console()
393
+
394
+ console.print(
395
+ Panel(
396
+ "[bold cyan]Interactive Research Mode[/bold cyan]\n\n"
397
+ "Chat with me or tell me what you'd like to research.\n"
398
+ "Type [green]exit[/green] or [green]quit[/green] to end the session.",
399
+ title="haiku.rag Research Assistant",
400
+ border_style="cyan",
401
+ )
402
+ )
403
+
404
+ while True:
405
+ try:
406
+ # Use provided question or get one through conversation
407
+ if question:
408
+ research_question = question
409
+ console.print(f"[dim]Starting research: {research_question}[/dim]")
410
+ question = None # Clear so subsequent loops go through chat
411
+ else:
412
+ # Initial conversation loop - chat until user wants to research
413
+ research_question = None
414
+ while research_question is None:
415
+ user_input = Prompt.ask("\n[bold blue]You[/bold blue]")
416
+
417
+ if not user_input.strip():
418
+ continue
419
+
420
+ if user_input.lower().strip() in ("exit", "quit", "q"):
421
+ console.print("[dim]Goodbye![/dim]")
422
+ return
423
+
424
+ console.print("[dim]Thinking...[/dim]")
425
+ decision = await initial_chat(user_input, config)
426
+
427
+ if decision.action == "research" and decision.research_question:
428
+ research_question = decision.research_question
429
+ console.print(
430
+ f"[dim]Starting research: {research_question}[/dim]"
431
+ )
432
+ elif decision.action == "chat" and decision.message:
433
+ console.print(
434
+ f"\n[bold cyan]Assistant:[/bold cyan] {decision.message}"
435
+ )
436
+ else:
437
+ # Fallback - treat as research question
438
+ research_question = user_input
439
+
440
+ console.print()
441
+ report = await run_interactive_research(
442
+ question=research_question,
443
+ client=client,
444
+ config=config,
445
+ search_filter=search_filter,
446
+ )
447
+
448
+ # Display final report
449
+ console.print()
450
+ console.print(
451
+ Panel(
452
+ Markdown(f"## {report.title}\n\n{report.executive_summary}"),
453
+ title="Research Report",
454
+ border_style="green",
455
+ )
456
+ )
457
+
458
+ if report.main_findings:
459
+ findings = "\n".join(f"- {f}" for f in report.main_findings[:5])
460
+ console.print(Markdown(f"**Key Findings:**\n{findings}"))
461
+
462
+ if report.conclusions:
463
+ conclusions = "\n".join(f"- {c}" for c in report.conclusions[:3])
464
+ console.print(Markdown(f"**Conclusions:**\n{conclusions}"))
465
+
466
+ console.print(Markdown(f"**Sources:** {report.sources_summary}"))
467
+
468
+ except KeyboardInterrupt:
469
+ console.print("\n[dim]Goodbye![/dim]")
470
+ return
471
+ except Exception as e:
472
+ console.print(f"[red]Error: {e}[/red]")
473
+
474
+
475
+ def interactive_research(
476
+ client: HaikuRAG,
477
+ config: AppConfig | None = None,
478
+ search_filter: str | None = None,
479
+ question: str | None = None,
480
+ ) -> None:
481
+ """Entry point for interactive research mode.
482
+
483
+ Args:
484
+ client: HaikuRAG client for document operations
485
+ config: Application configuration (uses global config if None)
486
+ search_filter: Optional SQL WHERE clause to filter documents
487
+ question: Optional initial research question (skips initial chat if provided)
488
+ """
489
+ asyncio.run(run_chat_loop(client, config, search_filter, question))