haiku.rag 0.9.2__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. README.md +205 -0
  2. haiku_rag-0.14.0.dist-info/METADATA +227 -0
  3. haiku_rag-0.14.0.dist-info/RECORD +6 -0
  4. haiku/rag/__init__.py +0 -0
  5. haiku/rag/app.py +0 -267
  6. haiku/rag/chunker.py +0 -51
  7. haiku/rag/cli.py +0 -359
  8. haiku/rag/client.py +0 -565
  9. haiku/rag/config.py +0 -77
  10. haiku/rag/embeddings/__init__.py +0 -35
  11. haiku/rag/embeddings/base.py +0 -15
  12. haiku/rag/embeddings/ollama.py +0 -17
  13. haiku/rag/embeddings/openai.py +0 -16
  14. haiku/rag/embeddings/vllm.py +0 -19
  15. haiku/rag/embeddings/voyageai.py +0 -17
  16. haiku/rag/logging.py +0 -56
  17. haiku/rag/mcp.py +0 -144
  18. haiku/rag/migration.py +0 -316
  19. haiku/rag/monitor.py +0 -73
  20. haiku/rag/qa/__init__.py +0 -15
  21. haiku/rag/qa/agent.py +0 -89
  22. haiku/rag/qa/prompts.py +0 -60
  23. haiku/rag/reader.py +0 -115
  24. haiku/rag/reranking/__init__.py +0 -34
  25. haiku/rag/reranking/base.py +0 -13
  26. haiku/rag/reranking/cohere.py +0 -34
  27. haiku/rag/reranking/mxbai.py +0 -28
  28. haiku/rag/reranking/vllm.py +0 -44
  29. haiku/rag/research/__init__.py +0 -37
  30. haiku/rag/research/base.py +0 -130
  31. haiku/rag/research/dependencies.py +0 -45
  32. haiku/rag/research/evaluation_agent.py +0 -42
  33. haiku/rag/research/orchestrator.py +0 -300
  34. haiku/rag/research/presearch_agent.py +0 -34
  35. haiku/rag/research/prompts.py +0 -129
  36. haiku/rag/research/search_agent.py +0 -65
  37. haiku/rag/research/synthesis_agent.py +0 -40
  38. haiku/rag/store/__init__.py +0 -4
  39. haiku/rag/store/engine.py +0 -230
  40. haiku/rag/store/models/__init__.py +0 -4
  41. haiku/rag/store/models/chunk.py +0 -15
  42. haiku/rag/store/models/document.py +0 -16
  43. haiku/rag/store/repositories/__init__.py +0 -9
  44. haiku/rag/store/repositories/chunk.py +0 -399
  45. haiku/rag/store/repositories/document.py +0 -234
  46. haiku/rag/store/repositories/settings.py +0 -148
  47. haiku/rag/store/upgrades/__init__.py +0 -1
  48. haiku/rag/utils.py +0 -162
  49. haiku_rag-0.9.2.dist-info/METADATA +0 -131
  50. haiku_rag-0.9.2.dist-info/RECORD +0 -50
  51. {haiku_rag-0.9.2.dist-info → haiku_rag-0.14.0.dist-info}/WHEEL +0 -0
  52. {haiku_rag-0.9.2.dist-info → haiku_rag-0.14.0.dist-info}/entry_points.txt +0 -0
  53. {haiku_rag-0.9.2.dist-info → haiku_rag-0.14.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,300 +0,0 @@
1
- from typing import Any
2
-
3
- from pydantic import BaseModel, Field
4
- from pydantic_ai.format_prompt import format_as_xml
5
- from pydantic_ai.run import AgentRunResult
6
- from rich.console import Console
7
-
8
- from haiku.rag.config import Config
9
- from haiku.rag.research.base import BaseResearchAgent
10
- from haiku.rag.research.dependencies import ResearchContext, ResearchDependencies
11
- from haiku.rag.research.evaluation_agent import (
12
- AnalysisEvaluationAgent,
13
- EvaluationResult,
14
- )
15
- from haiku.rag.research.presearch_agent import PresearchSurveyAgent
16
- from haiku.rag.research.prompts import ORCHESTRATOR_PROMPT
17
- from haiku.rag.research.search_agent import SearchSpecialistAgent
18
- from haiku.rag.research.synthesis_agent import ResearchReport, SynthesisAgent
19
-
20
-
21
- class ResearchPlan(BaseModel):
22
- """Research execution plan."""
23
-
24
- main_question: str = Field(description="The main research question")
25
- sub_questions: list[str] = Field(
26
- description="Decomposed sub-questions to investigate (max 3)", max_length=3
27
- )
28
-
29
-
30
- class ResearchOrchestrator(BaseResearchAgent[ResearchPlan]):
31
- """Orchestrator agent that coordinates the research workflow."""
32
-
33
- def __init__(
34
- self, provider: str | None = Config.RESEARCH_PROVIDER, model: str | None = None
35
- ):
36
- # Use provided values or fall back to config defaults
37
- provider = provider or Config.RESEARCH_PROVIDER or Config.QA_PROVIDER
38
- model = model or Config.RESEARCH_MODEL or Config.QA_MODEL
39
-
40
- super().__init__(provider, model, output_type=ResearchPlan)
41
-
42
- self.search_agent: SearchSpecialistAgent = SearchSpecialistAgent(
43
- provider, model
44
- )
45
- self.presearch_agent: PresearchSurveyAgent = PresearchSurveyAgent(
46
- provider, model
47
- )
48
- self.evaluation_agent: AnalysisEvaluationAgent = AnalysisEvaluationAgent(
49
- provider, model
50
- )
51
- self.synthesis_agent: SynthesisAgent = SynthesisAgent(provider, model)
52
-
53
- def get_system_prompt(self) -> str:
54
- return ORCHESTRATOR_PROMPT
55
-
56
- def register_tools(self) -> None:
57
- """Register orchestration tools."""
58
- # Tools are no longer needed - orchestrator directly calls agents
59
- pass
60
-
61
- def _format_context_for_prompt(self, context: ResearchContext) -> str:
62
- """Format the research context as XML for inclusion in prompts."""
63
-
64
- context_data = {
65
- "original_question": context.original_question,
66
- "unanswered_questions": context.sub_questions,
67
- "qa_responses": [
68
- {
69
- "question": qa.query,
70
- "answer": qa.answer,
71
- "context_snippets": qa.context,
72
- "sources": qa.sources,
73
- }
74
- for qa in context.qa_responses
75
- ],
76
- "insights": context.insights,
77
- "gaps": context.gaps,
78
- }
79
- return format_as_xml(context_data, root_tag="research_context")
80
-
81
- async def conduct_research(
82
- self,
83
- question: str,
84
- client: Any,
85
- max_iterations: int = 3,
86
- confidence_threshold: float = 0.8,
87
- verbose: bool = False,
88
- console: Console | None = None,
89
- ) -> ResearchReport:
90
- """Conduct comprehensive research on a question.
91
-
92
- Args:
93
- question: The research question to investigate
94
- client: HaikuRAG client for document operations
95
- max_iterations: Maximum number of search-analyze-clarify cycles
96
- confidence_threshold: Minimum confidence level to stop research (0-1)
97
- verbose: If True, print progress and intermediate results
98
- console: Optional Rich console for output
99
-
100
- Returns:
101
- ResearchReport with comprehensive findings
102
- """
103
-
104
- # Initialize context
105
- context = ResearchContext(original_question=question)
106
- deps = ResearchDependencies(client=client, context=context)
107
-
108
- # Use provided console or create a new one
109
- console = console or Console() if verbose else None
110
-
111
- # Run a simple presearch survey to summarize KB context
112
- if console:
113
- console.print(
114
- "\n[bold cyan]🔎 Presearch: summarizing KB context...[/bold cyan]"
115
- )
116
-
117
- presearch_result = await self.presearch_agent.run(question, deps=deps)
118
-
119
- # Create initial research plan
120
- if console:
121
- console.print("\n[bold cyan]📋 Creating research plan...[/bold cyan]")
122
-
123
- # Include the presearch summary to ground the planning step.
124
-
125
- planning_context_xml = format_as_xml(
126
- {
127
- "original_question": question,
128
- "presearch_summary": presearch_result.output or "",
129
- },
130
- root_tag="planning_context",
131
- )
132
-
133
- plan_prompt = (
134
- "Create a research plan for the main question below.\n\n"
135
- f"Main question: {question}\n\n"
136
- "Use this brief presearch summary to inform the plan. Focus the 3 sub-questions "
137
- "on the most important aspects not already obvious from the current KB context.\n\n"
138
- f"{planning_context_xml}"
139
- )
140
-
141
- plan_result: AgentRunResult[ResearchPlan] = await self.run(
142
- plan_prompt, deps=deps
143
- )
144
-
145
- context.sub_questions = plan_result.output.sub_questions
146
-
147
- if console:
148
- console.print("\n[bold green]✅ Research Plan Created:[/bold green]")
149
- console.print(
150
- f" [bold]Main Question:[/bold] {plan_result.output.main_question}"
151
- )
152
- console.print(" [bold]Sub-questions:[/bold]")
153
- for i, sq in enumerate(plan_result.output.sub_questions, 1):
154
- console.print(f" {i}. {sq}")
155
- console.print()
156
-
157
- # Execute research iterations
158
- for iteration in range(max_iterations):
159
- if console:
160
- console.rule(
161
- f"[bold yellow]🔄 Iteration {iteration + 1}/{max_iterations}[/bold yellow]"
162
- )
163
-
164
- # Check if we have questions to search
165
- if not context.sub_questions:
166
- # No more questions to explore
167
- if console:
168
- console.print(
169
- "[yellow]No more questions to explore. Concluding research.[/yellow]"
170
- )
171
- break
172
-
173
- # Use current sub-questions for this iteration
174
- questions_to_search = context.sub_questions
175
-
176
- # Search phase - answer all questions in this iteration
177
- if console:
178
- console.print(
179
- f"\n[bold cyan]🔍 Searching & Answering {len(questions_to_search)} questions:[/bold cyan]"
180
- )
181
- for i, q in enumerate(questions_to_search, 1):
182
- console.print(f" {i}. {q}")
183
-
184
- # Run searches for all questions and remove answered ones
185
- answered_questions = []
186
- for search_question in questions_to_search:
187
- try:
188
- await self.search_agent.run(search_question, deps=deps)
189
- except Exception as e: # pragma: no cover - defensive
190
- if console:
191
- console.print(
192
- f"\n [red]×[/red] Omitting failed question: {search_question} ({e})"
193
- )
194
- finally:
195
- answered_questions.append(search_question)
196
-
197
- if console and context.qa_responses:
198
- # Show the last QA response (which should be for this question)
199
- latest_qa = context.qa_responses[-1]
200
- answer_preview = (
201
- latest_qa.answer[:150] + "..."
202
- if len(latest_qa.answer) > 150
203
- else latest_qa.answer
204
- )
205
- console.print(
206
- f"\n [green]✓[/green] {search_question[:50]}..."
207
- if len(search_question) > 50
208
- else f"\n [green]✓[/green] {search_question}"
209
- )
210
- console.print(f" {answer_preview}")
211
-
212
- # Remove answered questions from the list
213
- for question in answered_questions:
214
- if question in context.sub_questions:
215
- context.sub_questions.remove(question)
216
-
217
- # Analysis and Evaluation phase
218
- if console:
219
- console.print(
220
- "\n[bold cyan]📊 Analyzing and evaluating research progress...[/bold cyan]"
221
- )
222
-
223
- # Format context for the evaluation agent
224
- context_xml = self._format_context_for_prompt(context)
225
- evaluation_prompt = f"""Analyze all gathered information and evaluate the completeness of research.
226
-
227
- {context_xml}
228
-
229
- Evaluate the research progress for the original question and identify any remaining gaps."""
230
-
231
- evaluation_result = await self.evaluation_agent.run(
232
- evaluation_prompt,
233
- deps=deps,
234
- )
235
-
236
- if console and evaluation_result.output:
237
- output = evaluation_result.output
238
- if output.key_insights:
239
- console.print(" [bold]Key insights:[/bold]")
240
- for insight in output.key_insights:
241
- console.print(f" • {insight}")
242
- console.print(
243
- f" Confidence: [yellow]{output.confidence_score:.1%}[/yellow]"
244
- )
245
- status = (
246
- "[green]Yes[/green]" if output.is_sufficient else "[red]No[/red]"
247
- )
248
- console.print(f" Sufficient: {status}")
249
-
250
- # Store insights
251
- for insight in evaluation_result.output.key_insights:
252
- context.add_insight(insight)
253
-
254
- # Add new questions to the sub-questions list
255
- for new_q in evaluation_result.output.new_questions:
256
- if new_q not in context.sub_questions:
257
- context.sub_questions.append(new_q)
258
-
259
- # Check if research is sufficient
260
- if self._should_stop_research(evaluation_result, confidence_threshold):
261
- if console:
262
- console.print(
263
- f"\n[bold green]✅ Stopping research:[/bold green] {evaluation_result.output.reasoning}"
264
- )
265
- break
266
-
267
- # Generate final report
268
- if console:
269
- console.print(
270
- "\n[bold cyan]📝 Generating final research report...[/bold cyan]"
271
- )
272
-
273
- # Format context for the synthesis agent
274
- final_context_xml = self._format_context_for_prompt(context)
275
- synthesis_prompt = f"""Generate a comprehensive research report based on all gathered information.
276
-
277
- {final_context_xml}
278
-
279
- Create a detailed report that synthesizes all findings into a coherent response."""
280
-
281
- report_result: AgentRunResult[ResearchReport] = await self.synthesis_agent.run(
282
- synthesis_prompt, deps=deps
283
- )
284
-
285
- if console:
286
- console.print("[bold green]✅ Research complete![/bold green]")
287
-
288
- return report_result.output
289
-
290
- def _should_stop_research(
291
- self,
292
- evaluation_result: AgentRunResult[EvaluationResult],
293
- confidence_threshold: float,
294
- ) -> bool:
295
- """Determine if research should stop based on evaluation."""
296
-
297
- result = evaluation_result.output
298
-
299
- # Stop if the agent indicates sufficient information AND confidence exceeds threshold
300
- return result.is_sufficient and result.confidence_score >= confidence_threshold
@@ -1,34 +0,0 @@
1
- from pydantic_ai import RunContext
2
- from pydantic_ai.run import AgentRunResult
3
-
4
- from haiku.rag.research.base import BaseResearchAgent
5
- from haiku.rag.research.dependencies import ResearchDependencies
6
- from haiku.rag.research.prompts import PRESEARCH_AGENT_PROMPT
7
-
8
-
9
- class PresearchSurveyAgent(BaseResearchAgent[str]):
10
- """Presearch agent that gathers verbatim context and summarizes it."""
11
-
12
- def __init__(self, provider: str, model: str) -> None:
13
- super().__init__(provider, model, str)
14
-
15
- async def run(
16
- self, prompt: str, deps: ResearchDependencies, **kwargs
17
- ) -> AgentRunResult[str]:
18
- return await super().run(prompt, deps, **kwargs)
19
-
20
- def get_system_prompt(self) -> str:
21
- return PRESEARCH_AGENT_PROMPT
22
-
23
- def register_tools(self) -> None:
24
- @self.agent.tool
25
- async def gather_context(
26
- ctx: RunContext[ResearchDependencies],
27
- query: str,
28
- limit: int = 6,
29
- ) -> str:
30
- """Return verbatim concatenation of relevant chunk texts."""
31
- query = query.replace('"', "")
32
- results = await ctx.deps.client.search(query, limit=limit)
33
- expanded = await ctx.deps.client.expand_context(results)
34
- return "\n\n".join(chunk.content for chunk, _ in expanded)
@@ -1,129 +0,0 @@
1
- ORCHESTRATOR_PROMPT = """You are a research orchestrator responsible for coordinating a comprehensive research workflow.
2
-
3
- Your role is to:
4
- 1. Understand and decompose the research question
5
- 2. Plan a systematic research approach
6
- 3. Coordinate specialized agents to gather and analyze information
7
- 4. Ensure comprehensive coverage of the topic
8
- 5. Iterate based on findings and gaps
9
-
10
- Create a research plan that:
11
- - Breaks down the question into at most 3 focused sub-questions
12
- - Each sub-question should target a specific aspect of the research
13
- - Prioritize the most important aspects to investigate
14
- - Ensure comprehensive coverage within the 3-question limit
15
- - IMPORTANT: Make each sub-question a standalone, self-contained query that can
16
- be executed without additional context. Include necessary entities, scope,
17
- timeframe, and qualifiers. Avoid pronouns like "it/they/this"; write queries
18
- that make sense in isolation."""
19
-
20
- SEARCH_AGENT_PROMPT = """You are a search and question-answering specialist.
21
-
22
- Your role is to:
23
- 1. Search the knowledge base for relevant information
24
- 2. Analyze the retrieved documents
25
- 3. Provide an accurate answer strictly grounded in the retrieved context
26
-
27
- Output format:
28
- - You must return a SearchAnswer model with fields:
29
- - query: the question being answered (echo the user query)
30
- - answer: your final answer based only on the provided context
31
- - context: list[str] of only the minimal set of verbatim snippet texts you
32
- used to justify the answer (do not include unrelated text; do not invent)
33
- - sources: list[str] of document_uri values corresponding to the snippets you
34
- actually used in the answer (one URI per context snippet, order aligned)
35
-
36
- Tool usage:
37
- - Always call the search_and_answer tool before drafting any answer.
38
- - The tool returns XML containing only a list of snippets, where each snippet
39
- has the verbatim `text`, a `score` indicating relevance, and the
40
- `document_uri` it came from.
41
- - You may call the tool multiple times to refine or broaden context, but do not
42
- exceed 3 total tool calls per question. Prefer precision over volume.
43
- - Use scores to prioritize evidence, but include only the minimal subset of
44
- snippet texts (verbatim) in SearchAnswer.context.
45
- - Set SearchAnswer.sources to the matching document_uris for the snippets you
46
- used (one URI per snippet, aligned by order). Context must be text-only.
47
- - If no relevant information is found, say so and return an empty context list.
48
-
49
- Important:
50
- - Do not include any content in the answer that is not supported by the context.
51
- - Keep context snippets short (just the necessary lines), verbatim, and focused."""
52
-
53
- EVALUATION_AGENT_PROMPT = """You are an analysis and evaluation specialist for research workflows.
54
-
55
- You have access to:
56
- - The original research question
57
- - Question-answer pairs from search operations
58
- - Raw search results and source documents
59
- - Previously identified insights
60
-
61
- Your dual role is to:
62
-
63
- ANALYSIS:
64
- 1. Extract key insights from all gathered information
65
- 2. Identify patterns and connections across sources
66
- 3. Synthesize findings into coherent understanding
67
- 4. Focus on the most important discoveries
68
-
69
- EVALUATION:
70
- 1. Assess if we have sufficient information to answer the original question
71
- 2. Calculate a confidence score (0-1) based on:
72
- - Coverage of the main question's aspects
73
- - Quality and consistency of sources
74
- - Depth of information gathered
75
- 3. Identify specific gaps that still need investigation
76
- 4. Generate up to 3 new sub-questions that haven't been answered yet
77
-
78
- Be critical and thorough in your evaluation. Only mark research as sufficient when:
79
- - All major aspects of the question are addressed
80
- - Sources provide consistent, reliable information
81
- - The depth of coverage meets the question's requirements
82
- - No critical gaps remain
83
-
84
- Generate new sub-questions that:
85
- - Target specific unexplored aspects not covered by existing questions
86
- - Seek clarification on ambiguities
87
- - Explore important edge cases or exceptions
88
- - Are focused and actionable (max 3)
89
- - Do NOT repeat or rephrase questions that have already been answered (see qa_responses)
90
- - Should be genuinely new areas to explore
91
- - Must be standalone, self-contained queries: include entities, scope, and any
92
- needed qualifiers (e.g., timeframe, region), and avoid ambiguous pronouns so
93
- they can be executed independently."""
94
-
95
- SYNTHESIS_AGENT_PROMPT = """You are a synthesis specialist agent focused on creating comprehensive research reports.
96
-
97
- Your role is to:
98
- 1. Synthesize all gathered information into a coherent narrative
99
- 2. Present findings in a clear, structured format
100
- 3. Draw evidence-based conclusions
101
- 4. Acknowledge limitations and uncertainties
102
- 5. Provide actionable recommendations
103
- 6. Maintain academic rigor and objectivity
104
-
105
- Your report should be:
106
- - Comprehensive yet concise
107
- - Well-structured and easy to follow
108
- - Based solely on evidence from the research
109
- - Transparent about limitations
110
- - Professional and objective in tone
111
-
112
- Focus on creating a report that provides clear value to the reader by:
113
- - Answering the original research question thoroughly
114
- - Highlighting the most important findings
115
- - Explaining the implications of the research
116
- - Suggesting concrete next steps"""
117
-
118
- PRESEARCH_AGENT_PROMPT = """You are a rapid research surveyor.
119
-
120
- Task:
121
- - Call the gather_context tool once with the main question to obtain a
122
- relevant texts from the Knowledge Base (KB).
123
- - Read that context and produce a brief natural-language summary describing
124
- what the KB appears to contain relative to the question.
125
-
126
- Rules:
127
- - Base the summary strictly on the provided text; do not invent.
128
- - Output only the summary as plain text (one short paragraph).
129
- """
@@ -1,65 +0,0 @@
1
- from pydantic_ai import RunContext
2
- from pydantic_ai.format_prompt import format_as_xml
3
- from pydantic_ai.run import AgentRunResult
4
-
5
- from haiku.rag.research.base import BaseResearchAgent, SearchAnswer
6
- from haiku.rag.research.dependencies import ResearchDependencies
7
- from haiku.rag.research.prompts import SEARCH_AGENT_PROMPT
8
-
9
-
10
- class SearchSpecialistAgent(BaseResearchAgent[SearchAnswer]):
11
- """Agent specialized in answering questions using RAG search."""
12
-
13
- def __init__(self, provider: str, model: str) -> None:
14
- super().__init__(provider, model, output_type=SearchAnswer)
15
-
16
- async def run(
17
- self, prompt: str, deps: ResearchDependencies, **kwargs
18
- ) -> AgentRunResult[SearchAnswer]:
19
- """Execute the agent and persist the QA pair in shared context.
20
-
21
- Pydantic AI enforces `SearchAnswer` as the output model; we just store
22
- the QA response with the last search results as sources.
23
- """
24
- result = await super().run(prompt, deps, **kwargs)
25
-
26
- if result.output:
27
- deps.context.add_qa_response(result.output)
28
-
29
- return result
30
-
31
- def get_system_prompt(self) -> str:
32
- return SEARCH_AGENT_PROMPT
33
-
34
- def register_tools(self) -> None:
35
- """Register search-specific tools."""
36
-
37
- @self.agent.tool
38
- async def search_and_answer(
39
- ctx: RunContext[ResearchDependencies],
40
- query: str,
41
- limit: int = 5,
42
- ) -> str:
43
- """Search the KB and return a concise context pack."""
44
- # Remove quotes from queries as this requires positional indexing in lancedb
45
- # XXX: Investigate how to do that with lancedb
46
- query = query.replace('"', "")
47
- search_results = await ctx.deps.client.search(query, limit=limit)
48
- expanded = await ctx.deps.client.expand_context(search_results)
49
-
50
- snippet_entries = [
51
- {
52
- "text": chunk.content,
53
- "score": score,
54
- "document_uri": (chunk.document_uri or ""),
55
- }
56
- for chunk, score in expanded
57
- ]
58
-
59
- # Return an XML-formatted payload with the question and snippets.
60
- if snippet_entries:
61
- return format_as_xml(snippet_entries, root_tag="snippets")
62
- else:
63
- return (
64
- f"No relevant information found in the knowledge base for: {query}"
65
- )
@@ -1,40 +0,0 @@
1
- from pydantic import BaseModel, Field
2
-
3
- from haiku.rag.research.base import BaseResearchAgent
4
- from haiku.rag.research.prompts import SYNTHESIS_AGENT_PROMPT
5
-
6
-
7
- class ResearchReport(BaseModel):
8
- """Final research report structure."""
9
-
10
- title: str = Field(description="Concise title for the research")
11
- executive_summary: str = Field(description="Brief overview of key findings")
12
- main_findings: list[str] = Field(
13
- description="Primary research findings with supporting evidence"
14
- )
15
- conclusions: list[str] = Field(description="Evidence-based conclusions")
16
- limitations: list[str] = Field(
17
- description="Limitations of the current research", default=[]
18
- )
19
- recommendations: list[str] = Field(
20
- description="Actionable recommendations based on findings", default=[]
21
- )
22
- sources_summary: str = Field(
23
- description="Summary of sources used and their reliability"
24
- )
25
-
26
-
27
- class SynthesisAgent(BaseResearchAgent[ResearchReport]):
28
- """Agent specialized in synthesizing research into comprehensive reports."""
29
-
30
- def __init__(self, provider: str, model: str) -> None:
31
- super().__init__(provider, model, output_type=ResearchReport)
32
-
33
- def get_system_prompt(self) -> str:
34
- return SYNTHESIS_AGENT_PROMPT
35
-
36
- def register_tools(self) -> None:
37
- """Register synthesis-specific tools."""
38
- # The agent will use its LLM capabilities directly for synthesis
39
- # The structured output will guide the report generation
40
- pass
@@ -1,4 +0,0 @@
1
- from .engine import Store
2
- from .models import Chunk, Document
3
-
4
- __all__ = ["Store", "Chunk", "Document"]