haiku.rag 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of haiku.rag might be problematic. Click here for more details.
- haiku/rag/app.py +36 -2
- haiku/rag/cli.py +11 -1
- haiku/rag/client.py +47 -22
- haiku/rag/config.py +2 -2
- haiku/rag/embeddings/ollama.py +2 -0
- haiku/rag/embeddings/openai.py +2 -0
- haiku/rag/embeddings/vllm.py +2 -0
- haiku/rag/embeddings/voyageai.py +2 -0
- haiku/rag/graph/__init__.py +1 -0
- haiku/rag/graph/base.py +31 -0
- haiku/rag/graph/common.py +33 -0
- haiku/rag/graph/models.py +24 -0
- haiku/rag/graph/nodes/__init__.py +0 -0
- haiku/rag/{research → graph}/nodes/analysis.py +5 -4
- haiku/rag/{research → graph}/nodes/plan.py +6 -4
- haiku/rag/{research → graph}/nodes/search.py +5 -4
- haiku/rag/{research → graph}/nodes/synthesize.py +3 -4
- haiku/rag/graph/prompts.py +45 -0
- haiku/rag/monitor.py +2 -2
- haiku/rag/qa/deep/__init__.py +1 -0
- haiku/rag/qa/deep/dependencies.py +29 -0
- haiku/rag/qa/deep/graph.py +21 -0
- haiku/rag/qa/deep/models.py +20 -0
- haiku/rag/qa/deep/nodes.py +303 -0
- haiku/rag/qa/deep/prompts.py +57 -0
- haiku/rag/qa/deep/state.py +25 -0
- haiku/rag/reranking/__init__.py +3 -0
- haiku/rag/research/__init__.py +2 -27
- haiku/rag/research/common.py +0 -31
- haiku/rag/research/dependencies.py +1 -1
- haiku/rag/research/graph.py +4 -15
- haiku/rag/research/models.py +0 -25
- haiku/rag/research/prompts.py +0 -46
- haiku/rag/store/repositories/settings.py +3 -3
- {haiku_rag-0.11.1.dist-info → haiku_rag-0.11.3.dist-info}/METADATA +7 -1
- haiku_rag-0.11.3.dist-info/RECORD +68 -0
- haiku_rag-0.11.1.dist-info/RECORD +0 -55
- {haiku_rag-0.11.1.dist-info → haiku_rag-0.11.3.dist-info}/WHEEL +0 -0
- {haiku_rag-0.11.1.dist-info → haiku_rag-0.11.3.dist-info}/entry_points.txt +0 -0
- {haiku_rag-0.11.1.dist-info → haiku_rag-0.11.3.dist-info}/licenses/LICENSE +0 -0
haiku/rag/app.py
CHANGED
|
@@ -194,10 +194,44 @@ class HaikuRAGApp:
|
|
|
194
194
|
for chunk, score in results:
|
|
195
195
|
self._rich_print_search_result(chunk, score)
|
|
196
196
|
|
|
197
|
-
async def ask(
|
|
197
|
+
async def ask(
|
|
198
|
+
self,
|
|
199
|
+
question: str,
|
|
200
|
+
cite: bool = False,
|
|
201
|
+
deep: bool = False,
|
|
202
|
+
verbose: bool = False,
|
|
203
|
+
):
|
|
198
204
|
async with HaikuRAG(db_path=self.db_path) as self.client:
|
|
199
205
|
try:
|
|
200
|
-
|
|
206
|
+
if deep:
|
|
207
|
+
from rich.console import Console
|
|
208
|
+
|
|
209
|
+
from haiku.rag.qa.deep.dependencies import DeepQAContext
|
|
210
|
+
from haiku.rag.qa.deep.graph import build_deep_qa_graph
|
|
211
|
+
from haiku.rag.qa.deep.nodes import DeepQAPlanNode
|
|
212
|
+
from haiku.rag.qa.deep.state import DeepQADeps, DeepQAState
|
|
213
|
+
|
|
214
|
+
graph = build_deep_qa_graph()
|
|
215
|
+
context = DeepQAContext(
|
|
216
|
+
original_question=question, use_citations=cite
|
|
217
|
+
)
|
|
218
|
+
state = DeepQAState(context=context)
|
|
219
|
+
deps = DeepQADeps(
|
|
220
|
+
client=self.client, console=Console() if verbose else None
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
start_node = DeepQAPlanNode(
|
|
224
|
+
provider=Config.QA_PROVIDER,
|
|
225
|
+
model=Config.QA_MODEL,
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
result = await graph.run(
|
|
229
|
+
start_node=start_node, state=state, deps=deps
|
|
230
|
+
)
|
|
231
|
+
answer = result.output.answer
|
|
232
|
+
else:
|
|
233
|
+
answer = await self.client.ask(question, cite=cite)
|
|
234
|
+
|
|
201
235
|
self.console.print(f"[bold blue]Question:[/bold blue] {question}")
|
|
202
236
|
self.console.print()
|
|
203
237
|
self.console.print("[bold green]Answer:[/bold green]")
|
haiku/rag/cli.py
CHANGED
|
@@ -299,11 +299,21 @@ def ask(
|
|
|
299
299
|
"--cite",
|
|
300
300
|
help="Include citations in the response",
|
|
301
301
|
),
|
|
302
|
+
deep: bool = typer.Option(
|
|
303
|
+
False,
|
|
304
|
+
"--deep",
|
|
305
|
+
help="Use deep multi-agent QA for complex questions",
|
|
306
|
+
),
|
|
307
|
+
verbose: bool = typer.Option(
|
|
308
|
+
False,
|
|
309
|
+
"--verbose",
|
|
310
|
+
help="Show verbose progress output (only with --deep)",
|
|
311
|
+
),
|
|
302
312
|
):
|
|
303
313
|
from haiku.rag.app import HaikuRAGApp
|
|
304
314
|
|
|
305
315
|
app = HaikuRAGApp(db_path=db)
|
|
306
|
-
asyncio.run(app.ask(question=question, cite=cite))
|
|
316
|
+
asyncio.run(app.ask(question=question, cite=cite, deep=deep, verbose=verbose))
|
|
307
317
|
|
|
308
318
|
|
|
309
319
|
@cli.command("research", help="Run multi-agent research and output a concise report")
|
haiku/rag/client.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import hashlib
|
|
2
|
+
import logging
|
|
2
3
|
import mimetypes
|
|
3
4
|
import tempfile
|
|
4
5
|
from collections.abc import AsyncGenerator
|
|
@@ -18,6 +19,8 @@ from haiku.rag.store.repositories.document import DocumentRepository
|
|
|
18
19
|
from haiku.rag.store.repositories.settings import SettingsRepository
|
|
19
20
|
from haiku.rag.utils import text_to_docling_document
|
|
20
21
|
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
21
24
|
|
|
22
25
|
class HaikuRAG:
|
|
23
26
|
"""High-level haiku-rag client."""
|
|
@@ -538,8 +541,8 @@ class HaikuRAG:
|
|
|
538
541
|
"""Rebuild the database by deleting all chunks and re-indexing all documents.
|
|
539
542
|
|
|
540
543
|
For documents with URIs:
|
|
541
|
-
-
|
|
542
|
-
-
|
|
544
|
+
- Re-adds from source if source exists
|
|
545
|
+
- Re-embeds from existing content if source is missing
|
|
543
546
|
|
|
544
547
|
For documents without URIs:
|
|
545
548
|
- Re-creates chunks from existing content
|
|
@@ -559,29 +562,51 @@ class HaikuRAG:
|
|
|
559
562
|
for doc in documents:
|
|
560
563
|
assert doc.id is not None, "Document ID should not be None"
|
|
561
564
|
if doc.uri:
|
|
562
|
-
# Document has a URI -
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
await self.delete_document(doc.id)
|
|
565
|
+
# Document has a URI - check if source is accessible
|
|
566
|
+
source_accessible = False
|
|
567
|
+
parsed_url = urlparse(doc.uri)
|
|
566
568
|
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
569
|
+
try:
|
|
570
|
+
if parsed_url.scheme == "file":
|
|
571
|
+
# Check if file exists
|
|
572
|
+
source_path = Path(parsed_url.path)
|
|
573
|
+
source_accessible = source_path.exists()
|
|
574
|
+
elif parsed_url.scheme in ("http", "https"):
|
|
575
|
+
# For URLs, we'll try to create and catch errors
|
|
576
|
+
source_accessible = True
|
|
577
|
+
else:
|
|
578
|
+
source_accessible = False
|
|
579
|
+
except Exception:
|
|
580
|
+
source_accessible = False
|
|
581
|
+
|
|
582
|
+
if source_accessible:
|
|
583
|
+
# Source exists - delete and recreate from source
|
|
584
|
+
try:
|
|
585
|
+
await self.delete_document(doc.id)
|
|
586
|
+
new_doc = await self.create_document_from_source(
|
|
587
|
+
source=doc.uri, metadata=doc.metadata or {}
|
|
588
|
+
)
|
|
589
|
+
assert new_doc.id is not None, (
|
|
590
|
+
"New document ID should not be None"
|
|
591
|
+
)
|
|
592
|
+
yield new_doc.id
|
|
593
|
+
except Exception as e:
|
|
594
|
+
logger.error(
|
|
595
|
+
"Error recreating document from source %s: %s",
|
|
596
|
+
doc.uri,
|
|
597
|
+
e,
|
|
598
|
+
)
|
|
599
|
+
continue
|
|
600
|
+
else:
|
|
601
|
+
# Source missing - re-embed from existing content
|
|
602
|
+
logger.warning(
|
|
603
|
+
"Source missing for %s, re-embedding from content", doc.uri
|
|
570
604
|
)
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
except (FileNotFoundError, ValueError, OSError) as e:
|
|
576
|
-
# Source doesn't exist or can't be accessed - document already deleted, skip
|
|
577
|
-
print(f"Skipping document with URI {doc.uri}: {e}")
|
|
578
|
-
continue
|
|
579
|
-
except Exception as e:
|
|
580
|
-
# Unexpected error - log it and skip
|
|
581
|
-
print(
|
|
582
|
-
f"Unexpected error processing document with URI {doc.uri}: {e}"
|
|
605
|
+
docling_document = text_to_docling_document(doc.content)
|
|
606
|
+
await self.chunk_repository.create_chunks_for_document(
|
|
607
|
+
doc.id, docling_document
|
|
583
608
|
)
|
|
584
|
-
|
|
609
|
+
yield doc.id
|
|
585
610
|
else:
|
|
586
611
|
# Document without URI - re-create chunks from existing content
|
|
587
612
|
docling_document = text_to_docling_document(doc.content)
|
haiku/rag/config.py
CHANGED
|
@@ -20,8 +20,8 @@ class AppConfig(BaseModel):
|
|
|
20
20
|
MONITOR_DIRECTORIES: list[Path] = []
|
|
21
21
|
|
|
22
22
|
EMBEDDINGS_PROVIDER: str = "ollama"
|
|
23
|
-
EMBEDDINGS_MODEL: str = "
|
|
24
|
-
EMBEDDINGS_VECTOR_DIM: int =
|
|
23
|
+
EMBEDDINGS_MODEL: str = "qwen3-embedding"
|
|
24
|
+
EMBEDDINGS_VECTOR_DIM: int = 4096
|
|
25
25
|
|
|
26
26
|
RERANK_PROVIDER: str = ""
|
|
27
27
|
RERANK_MODEL: str = ""
|
haiku/rag/embeddings/ollama.py
CHANGED
|
@@ -7,6 +7,8 @@ from haiku.rag.embeddings.base import EmbedderBase
|
|
|
7
7
|
class Embedder(EmbedderBase):
|
|
8
8
|
async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
|
|
9
9
|
client = AsyncOpenAI(base_url=f"{Config.OLLAMA_BASE_URL}/v1", api_key="dummy")
|
|
10
|
+
if not text:
|
|
11
|
+
return []
|
|
10
12
|
response = await client.embeddings.create(
|
|
11
13
|
model=self._model,
|
|
12
14
|
input=text,
|
haiku/rag/embeddings/openai.py
CHANGED
|
@@ -6,6 +6,8 @@ from haiku.rag.embeddings.base import EmbedderBase
|
|
|
6
6
|
class Embedder(EmbedderBase):
|
|
7
7
|
async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
|
|
8
8
|
client = AsyncOpenAI()
|
|
9
|
+
if not text:
|
|
10
|
+
return []
|
|
9
11
|
response = await client.embeddings.create(
|
|
10
12
|
model=self._model,
|
|
11
13
|
input=text,
|
haiku/rag/embeddings/vllm.py
CHANGED
haiku/rag/embeddings/voyageai.py
CHANGED
|
@@ -6,6 +6,8 @@ try:
|
|
|
6
6
|
class Embedder(EmbedderBase):
|
|
7
7
|
async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
|
|
8
8
|
client = Client()
|
|
9
|
+
if not text:
|
|
10
|
+
return []
|
|
9
11
|
if isinstance(text, str):
|
|
10
12
|
res = client.embed([text], model=self._model, output_dtype="float")
|
|
11
13
|
return res.embeddings[0] # type: ignore[return-value]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from haiku.rag.graph.models import ResearchPlan, SearchAnswer
|
haiku/rag/graph/base.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from typing import Protocol, runtime_checkable
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
|
|
6
|
+
from haiku.rag.client import HaikuRAG
|
|
7
|
+
from haiku.rag.graph.models import SearchAnswer
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@runtime_checkable
|
|
11
|
+
class GraphContext(Protocol):
|
|
12
|
+
"""Protocol for graph context objects."""
|
|
13
|
+
|
|
14
|
+
original_question: str
|
|
15
|
+
sub_questions: list[str]
|
|
16
|
+
qa_responses: list[SearchAnswer]
|
|
17
|
+
|
|
18
|
+
def add_qa_response(self, qa: SearchAnswer) -> None: ...
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class BaseGraphDeps(BaseModel):
|
|
22
|
+
"""Base dependencies for graph nodes."""
|
|
23
|
+
|
|
24
|
+
model_config = {"arbitrary_types_allowed": True}
|
|
25
|
+
|
|
26
|
+
client: HaikuRAG = Field(description="RAG client for document operations")
|
|
27
|
+
console: Console | None = None
|
|
28
|
+
|
|
29
|
+
def emit_log(self, message: str) -> None:
|
|
30
|
+
if self.console:
|
|
31
|
+
self.console.print(message)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from typing import Any, Protocol
|
|
2
|
+
|
|
3
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
4
|
+
from pydantic_ai.providers.ollama import OllamaProvider
|
|
5
|
+
from pydantic_ai.providers.openai import OpenAIProvider
|
|
6
|
+
|
|
7
|
+
from haiku.rag.config import Config
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class HasEmitLog(Protocol):
|
|
11
|
+
def emit_log(self, message: str, state: Any = None) -> None: ...
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_model(provider: str, model: str) -> Any:
|
|
15
|
+
if provider == "ollama":
|
|
16
|
+
return OpenAIChatModel(
|
|
17
|
+
model_name=model,
|
|
18
|
+
provider=OllamaProvider(base_url=f"{Config.OLLAMA_BASE_URL}/v1"),
|
|
19
|
+
)
|
|
20
|
+
elif provider == "vllm":
|
|
21
|
+
return OpenAIChatModel(
|
|
22
|
+
model_name=model,
|
|
23
|
+
provider=OpenAIProvider(
|
|
24
|
+
base_url=f"{Config.VLLM_RESEARCH_BASE_URL or Config.VLLM_QA_BASE_URL}/v1",
|
|
25
|
+
api_key="none",
|
|
26
|
+
),
|
|
27
|
+
)
|
|
28
|
+
else:
|
|
29
|
+
return f"{provider}:{model}"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def log(deps: HasEmitLog, state: Any, message: str) -> None:
|
|
33
|
+
deps.emit_log(message, state)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from pydantic import BaseModel, Field
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ResearchPlan(BaseModel):
|
|
5
|
+
main_question: str
|
|
6
|
+
sub_questions: list[str]
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SearchAnswer(BaseModel):
|
|
10
|
+
query: str = Field(description="The search query that was performed")
|
|
11
|
+
answer: str = Field(description="The answer generated based on the context")
|
|
12
|
+
context: list[str] = Field(
|
|
13
|
+
description=(
|
|
14
|
+
"Only the minimal set of relevant snippets (verbatim) that directly "
|
|
15
|
+
"support the answer"
|
|
16
|
+
)
|
|
17
|
+
)
|
|
18
|
+
sources: list[str] = Field(
|
|
19
|
+
description=(
|
|
20
|
+
"Document titles (if available) or URIs corresponding to the"
|
|
21
|
+
" snippets actually used in the answer (one per snippet; omit if none)"
|
|
22
|
+
),
|
|
23
|
+
default_factory=list,
|
|
24
|
+
)
|
|
File without changes
|
|
@@ -3,15 +3,13 @@ from dataclasses import dataclass
|
|
|
3
3
|
from pydantic_ai import Agent
|
|
4
4
|
from pydantic_graph import BaseNode, GraphRunContext
|
|
5
5
|
|
|
6
|
+
from haiku.rag.graph.common import get_model, log
|
|
6
7
|
from haiku.rag.research.common import (
|
|
7
8
|
format_analysis_for_prompt,
|
|
8
9
|
format_context_for_prompt,
|
|
9
|
-
get_model,
|
|
10
|
-
log,
|
|
11
10
|
)
|
|
12
11
|
from haiku.rag.research.dependencies import ResearchDependencies
|
|
13
12
|
from haiku.rag.research.models import EvaluationResult, InsightAnalysis, ResearchReport
|
|
14
|
-
from haiku.rag.research.nodes.synthesize import SynthesizeNode
|
|
15
13
|
from haiku.rag.research.prompts import DECISION_AGENT_PROMPT, INSIGHT_AGENT_PROMPT
|
|
16
14
|
from haiku.rag.research.state import ResearchDeps, ResearchState
|
|
17
15
|
|
|
@@ -89,6 +87,8 @@ class AnalyzeInsightsNode(BaseNode[ResearchState, ResearchDeps, ResearchReport])
|
|
|
89
87
|
for question in analysis.new_questions:
|
|
90
88
|
log(deps, state, f" • {question}")
|
|
91
89
|
|
|
90
|
+
from haiku.rag.graph.nodes.analysis import DecisionNode
|
|
91
|
+
|
|
92
92
|
return DecisionNode(self.provider, self.model)
|
|
93
93
|
|
|
94
94
|
|
|
@@ -169,7 +169,8 @@ class DecisionNode(BaseNode[ResearchState, ResearchDeps, ResearchReport]):
|
|
|
169
169
|
status = "[green]Yes[/green]" if output.is_sufficient else "[red]No[/red]"
|
|
170
170
|
log(deps, state, f" Sufficient: {status}")
|
|
171
171
|
|
|
172
|
-
from haiku.rag.
|
|
172
|
+
from haiku.rag.graph.nodes.search import SearchDispatchNode
|
|
173
|
+
from haiku.rag.graph.nodes.synthesize import SynthesizeNode
|
|
173
174
|
|
|
174
175
|
if (
|
|
175
176
|
output.is_sufficient
|
|
@@ -3,11 +3,11 @@ from dataclasses import dataclass
|
|
|
3
3
|
from pydantic_ai import Agent, RunContext
|
|
4
4
|
from pydantic_graph import BaseNode, GraphRunContext
|
|
5
5
|
|
|
6
|
-
from haiku.rag.
|
|
6
|
+
from haiku.rag.graph.common import get_model, log
|
|
7
|
+
from haiku.rag.graph.models import ResearchPlan
|
|
8
|
+
from haiku.rag.graph.prompts import PLAN_PROMPT
|
|
7
9
|
from haiku.rag.research.dependencies import ResearchDependencies
|
|
8
|
-
from haiku.rag.research.models import
|
|
9
|
-
from haiku.rag.research.nodes.search import SearchDispatchNode
|
|
10
|
-
from haiku.rag.research.prompts import PLAN_PROMPT
|
|
10
|
+
from haiku.rag.research.models import ResearchReport
|
|
11
11
|
from haiku.rag.research.state import ResearchDeps, ResearchState
|
|
12
12
|
|
|
13
13
|
|
|
@@ -67,4 +67,6 @@ class PlanNode(BaseNode[ResearchState, ResearchDeps, ResearchReport]):
|
|
|
67
67
|
for i, sq in enumerate(state.context.sub_questions, 1):
|
|
68
68
|
log(deps, state, f" {i}. {sq}")
|
|
69
69
|
|
|
70
|
+
from haiku.rag.graph.nodes.search import SearchDispatchNode
|
|
71
|
+
|
|
70
72
|
return SearchDispatchNode(self.provider, self.model)
|
|
@@ -7,10 +7,11 @@ from pydantic_ai.format_prompt import format_as_xml
|
|
|
7
7
|
from pydantic_ai.output import ToolOutput
|
|
8
8
|
from pydantic_graph import BaseNode, GraphRunContext
|
|
9
9
|
|
|
10
|
-
from haiku.rag.
|
|
10
|
+
from haiku.rag.graph.common import get_model, log
|
|
11
|
+
from haiku.rag.graph.models import SearchAnswer
|
|
12
|
+
from haiku.rag.graph.prompts import SEARCH_AGENT_PROMPT
|
|
11
13
|
from haiku.rag.research.dependencies import ResearchDependencies
|
|
12
|
-
from haiku.rag.research.models import ResearchReport
|
|
13
|
-
from haiku.rag.research.prompts import SEARCH_AGENT_PROMPT
|
|
14
|
+
from haiku.rag.research.models import ResearchReport
|
|
14
15
|
from haiku.rag.research.state import ResearchDeps, ResearchState
|
|
15
16
|
|
|
16
17
|
|
|
@@ -25,7 +26,7 @@ class SearchDispatchNode(BaseNode[ResearchState, ResearchDeps, ResearchReport]):
|
|
|
25
26
|
state = ctx.state
|
|
26
27
|
deps = ctx.deps
|
|
27
28
|
if not state.context.sub_questions:
|
|
28
|
-
from haiku.rag.
|
|
29
|
+
from haiku.rag.graph.nodes.analysis import AnalyzeInsightsNode
|
|
29
30
|
|
|
30
31
|
return AnalyzeInsightsNode(self.provider, self.model)
|
|
31
32
|
|
|
@@ -3,10 +3,9 @@ from dataclasses import dataclass
|
|
|
3
3
|
from pydantic_ai import Agent
|
|
4
4
|
from pydantic_graph import BaseNode, End, GraphRunContext
|
|
5
5
|
|
|
6
|
-
from haiku.rag.
|
|
7
|
-
from haiku.rag.research.
|
|
8
|
-
|
|
9
|
-
)
|
|
6
|
+
from haiku.rag.graph.common import get_model, log
|
|
7
|
+
from haiku.rag.research.common import format_context_for_prompt
|
|
8
|
+
from haiku.rag.research.dependencies import ResearchDependencies
|
|
10
9
|
from haiku.rag.research.models import ResearchReport
|
|
11
10
|
from haiku.rag.research.prompts import SYNTHESIS_AGENT_PROMPT
|
|
12
11
|
from haiku.rag.research.state import ResearchDeps, ResearchState
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
PLAN_PROMPT = """You are the research orchestrator for a focused, iterative
|
|
2
|
+
workflow.
|
|
3
|
+
|
|
4
|
+
Responsibilities:
|
|
5
|
+
1. Understand and decompose the main question
|
|
6
|
+
2. Propose a minimal, high‑leverage plan
|
|
7
|
+
3. Coordinate specialized agents to gather evidence
|
|
8
|
+
4. Iterate based on gaps and new findings
|
|
9
|
+
|
|
10
|
+
Plan requirements:
|
|
11
|
+
- Produce at most 3 sub_questions that together cover the main question.
|
|
12
|
+
- Each sub_question must be a standalone, self‑contained query that can run
|
|
13
|
+
without extra context. Include concrete entities, scope, timeframe, and any
|
|
14
|
+
qualifiers. Avoid ambiguous pronouns (it/they/this/that).
|
|
15
|
+
- Prioritize the highest‑value aspects first; avoid redundancy and overlap.
|
|
16
|
+
- Prefer questions that are likely answerable from the current knowledge base;
|
|
17
|
+
if coverage is uncertain, make scopes narrower and specific.
|
|
18
|
+
- Order sub_questions by execution priority (most valuable first)."""
|
|
19
|
+
|
|
20
|
+
SEARCH_AGENT_PROMPT = """You are a search and question‑answering specialist.
|
|
21
|
+
|
|
22
|
+
Tasks:
|
|
23
|
+
1. Search the knowledge base for relevant evidence.
|
|
24
|
+
2. Analyze retrieved snippets.
|
|
25
|
+
3. Provide an answer strictly grounded in that evidence.
|
|
26
|
+
|
|
27
|
+
Tool usage:
|
|
28
|
+
- Always call search_and_answer before drafting any answer.
|
|
29
|
+
- The tool returns snippets with verbatim `text`, a relevance `score`, and the
|
|
30
|
+
originating document identifier (document title if available, otherwise URI).
|
|
31
|
+
- You may call the tool multiple times to refine or broaden context, but do not
|
|
32
|
+
exceed 3 total calls. Favor precision over volume.
|
|
33
|
+
- Use scores to prioritize evidence, but include only the minimal subset of
|
|
34
|
+
snippet texts (verbatim) in SearchAnswer.context (typically 1‑4).
|
|
35
|
+
- Set SearchAnswer.sources to the corresponding document identifiers for the
|
|
36
|
+
snippets you used (title if available, otherwise URI; one per snippet; same
|
|
37
|
+
order as context). Context must be text‑only.
|
|
38
|
+
- If no relevant information is found, clearly say so and return an empty
|
|
39
|
+
context list and sources list.
|
|
40
|
+
|
|
41
|
+
Answering rules:
|
|
42
|
+
- Be direct and specific; avoid meta commentary about the process.
|
|
43
|
+
- Do not include any claims not supported by the provided snippets.
|
|
44
|
+
- Prefer concise phrasing; avoid copying long passages.
|
|
45
|
+
- When evidence is partial, state the limits explicitly in the answer."""
|
haiku/rag/monitor.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
from pathlib import Path
|
|
2
3
|
|
|
3
4
|
from watchfiles import Change, DefaultFilter, awatch
|
|
4
5
|
|
|
5
6
|
from haiku.rag.client import HaikuRAG
|
|
6
|
-
from haiku.rag.logging import get_logger
|
|
7
7
|
from haiku.rag.reader import FileReader
|
|
8
8
|
from haiku.rag.store.models.document import Document
|
|
9
9
|
|
|
10
|
-
logger =
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class FileFilter(DefaultFilter):
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from haiku.rag.qa.deep.models import DeepQAAnswer
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from pydantic import BaseModel, Field
|
|
2
|
+
from rich.console import Console
|
|
3
|
+
|
|
4
|
+
from haiku.rag.client import HaikuRAG
|
|
5
|
+
from haiku.rag.graph.models import SearchAnswer
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class DeepQAContext(BaseModel):
|
|
9
|
+
original_question: str = Field(description="The original question")
|
|
10
|
+
sub_questions: list[str] = Field(
|
|
11
|
+
default_factory=list, description="Decomposed sub-questions"
|
|
12
|
+
)
|
|
13
|
+
qa_responses: list[SearchAnswer] = Field(
|
|
14
|
+
default_factory=list, description="QA pairs collected during answering"
|
|
15
|
+
)
|
|
16
|
+
use_citations: bool = Field(
|
|
17
|
+
default=False, description="Whether to include citations in the answer"
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
def add_qa_response(self, qa: SearchAnswer) -> None:
|
|
21
|
+
self.qa_responses.append(qa)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class DeepQADependencies(BaseModel):
|
|
25
|
+
model_config = {"arbitrary_types_allowed": True}
|
|
26
|
+
|
|
27
|
+
client: HaikuRAG = Field(description="RAG client for document operations")
|
|
28
|
+
context: DeepQAContext = Field(description="Shared QA context")
|
|
29
|
+
console: Console | None = None
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from pydantic_graph import Graph
|
|
2
|
+
|
|
3
|
+
from haiku.rag.qa.deep.models import DeepQAAnswer
|
|
4
|
+
from haiku.rag.qa.deep.nodes import (
|
|
5
|
+
DeepQADecisionNode,
|
|
6
|
+
DeepQAPlanNode,
|
|
7
|
+
DeepQASearchDispatchNode,
|
|
8
|
+
DeepQASynthesizeNode,
|
|
9
|
+
)
|
|
10
|
+
from haiku.rag.qa.deep.state import DeepQADeps, DeepQAState
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def build_deep_qa_graph() -> Graph[DeepQAState, DeepQADeps, DeepQAAnswer]:
|
|
14
|
+
return Graph(
|
|
15
|
+
nodes=[
|
|
16
|
+
DeepQAPlanNode,
|
|
17
|
+
DeepQASearchDispatchNode,
|
|
18
|
+
DeepQADecisionNode,
|
|
19
|
+
DeepQASynthesizeNode,
|
|
20
|
+
]
|
|
21
|
+
)
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from pydantic import BaseModel, Field
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class DeepQAEvaluation(BaseModel):
|
|
5
|
+
is_sufficient: bool = Field(
|
|
6
|
+
description="Whether we have sufficient information to answer the question"
|
|
7
|
+
)
|
|
8
|
+
reasoning: str = Field(description="Explanation of the sufficiency assessment")
|
|
9
|
+
new_questions: list[str] = Field(
|
|
10
|
+
description="Additional sub-questions needed if insufficient",
|
|
11
|
+
default_factory=list,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class DeepQAAnswer(BaseModel):
|
|
16
|
+
answer: str = Field(description="The comprehensive answer to the question")
|
|
17
|
+
sources: list[str] = Field(
|
|
18
|
+
description="Document titles or URIs used to generate the answer",
|
|
19
|
+
default_factory=list,
|
|
20
|
+
)
|