haiku.rag 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of haiku.rag might be problematic. Click here for more details.
- haiku/rag/app.py +2 -2
- haiku/rag/cli.py +6 -1
- haiku/rag/client.py +129 -2
- haiku/rag/config.py +1 -0
- haiku/rag/qa/__init__.py +12 -4
- haiku/rag/qa/anthropic.py +16 -14
- haiku/rag/qa/base.py +51 -3
- haiku/rag/qa/ollama.py +8 -12
- haiku/rag/qa/openai.py +13 -16
- haiku/rag/qa/prompts.py +37 -0
- haiku/rag/store/repositories/chunk.py +46 -0
- {haiku_rag-0.5.2.dist-info → haiku_rag-0.5.4.dist-info}/METADATA +8 -1
- {haiku_rag-0.5.2.dist-info → haiku_rag-0.5.4.dist-info}/RECORD +16 -16
- {haiku_rag-0.5.2.dist-info → haiku_rag-0.5.4.dist-info}/WHEEL +0 -0
- {haiku_rag-0.5.2.dist-info → haiku_rag-0.5.4.dist-info}/entry_points.txt +0 -0
- {haiku_rag-0.5.2.dist-info → haiku_rag-0.5.4.dist-info}/licenses/LICENSE +0 -0
haiku/rag/app.py
CHANGED
|
@@ -62,10 +62,10 @@ class HaikuRAGApp:
|
|
|
62
62
|
for chunk, score in results:
|
|
63
63
|
self._rich_print_search_result(chunk, score)
|
|
64
64
|
|
|
65
|
-
async def ask(self, question: str):
|
|
65
|
+
async def ask(self, question: str, cite: bool = False):
|
|
66
66
|
async with HaikuRAG(db_path=self.db_path) as self.client:
|
|
67
67
|
try:
|
|
68
|
-
answer = await self.client.ask(question)
|
|
68
|
+
answer = await self.client.ask(question, cite=cite)
|
|
69
69
|
self.console.print(f"[bold blue]Question:[/bold blue] {question}")
|
|
70
70
|
self.console.print()
|
|
71
71
|
self.console.print("[bold green]Answer:[/bold green]")
|
haiku/rag/cli.py
CHANGED
|
@@ -160,9 +160,14 @@ def ask(
|
|
|
160
160
|
"--db",
|
|
161
161
|
help="Path to the SQLite database file",
|
|
162
162
|
),
|
|
163
|
+
cite: bool = typer.Option(
|
|
164
|
+
False,
|
|
165
|
+
"--cite",
|
|
166
|
+
help="Include citations in the response",
|
|
167
|
+
),
|
|
163
168
|
):
|
|
164
169
|
app = HaikuRAGApp(db_path=db)
|
|
165
|
-
asyncio.run(app.ask(question=question))
|
|
170
|
+
asyncio.run(app.ask(question=question, cite=cite))
|
|
166
171
|
|
|
167
172
|
|
|
168
173
|
@cli.command("settings", help="Display current configuration settings")
|
haiku/rag/client.py
CHANGED
|
@@ -348,18 +348,145 @@ class HaikuRAG:
|
|
|
348
348
|
# Return reranked results with scores from reranker
|
|
349
349
|
return reranked_results
|
|
350
350
|
|
|
351
|
-
async def
|
|
351
|
+
async def expand_context(
|
|
352
|
+
self, search_results: list[tuple[Chunk, float]]
|
|
353
|
+
) -> list[tuple[Chunk, float]]:
|
|
354
|
+
"""Expand search results with adjacent chunks, merging overlapping chunks.
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
search_results: List of (chunk, score) tuples from search.
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
List of (chunk, score) tuples with expanded and merged context chunks.
|
|
361
|
+
"""
|
|
362
|
+
if Config.CONTEXT_CHUNK_RADIUS == 0:
|
|
363
|
+
return search_results
|
|
364
|
+
|
|
365
|
+
# Group chunks by document_id to handle merging within documents
|
|
366
|
+
document_groups = {}
|
|
367
|
+
for chunk, score in search_results:
|
|
368
|
+
doc_id = chunk.document_id
|
|
369
|
+
if doc_id not in document_groups:
|
|
370
|
+
document_groups[doc_id] = []
|
|
371
|
+
document_groups[doc_id].append((chunk, score))
|
|
372
|
+
|
|
373
|
+
results = []
|
|
374
|
+
|
|
375
|
+
for doc_id, doc_chunks in document_groups.items():
|
|
376
|
+
# Get all expanded ranges for this document
|
|
377
|
+
expanded_ranges = []
|
|
378
|
+
for chunk, score in doc_chunks:
|
|
379
|
+
adjacent_chunks = await self.chunk_repository.get_adjacent_chunks(
|
|
380
|
+
chunk, Config.CONTEXT_CHUNK_RADIUS
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
all_chunks = adjacent_chunks + [chunk]
|
|
384
|
+
|
|
385
|
+
# Get the range of orders for this expanded chunk
|
|
386
|
+
orders = [c.metadata.get("order", 0) for c in all_chunks]
|
|
387
|
+
min_order = min(orders)
|
|
388
|
+
max_order = max(orders)
|
|
389
|
+
|
|
390
|
+
expanded_ranges.append(
|
|
391
|
+
{
|
|
392
|
+
"original_chunk": chunk,
|
|
393
|
+
"score": score,
|
|
394
|
+
"min_order": min_order,
|
|
395
|
+
"max_order": max_order,
|
|
396
|
+
"all_chunks": sorted(
|
|
397
|
+
all_chunks, key=lambda c: c.metadata.get("order", 0)
|
|
398
|
+
),
|
|
399
|
+
}
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
# Merge overlapping/adjacent ranges
|
|
403
|
+
merged_ranges = self._merge_overlapping_ranges(expanded_ranges)
|
|
404
|
+
|
|
405
|
+
# Create merged chunks
|
|
406
|
+
for merged_range in merged_ranges:
|
|
407
|
+
combined_content_parts = [c.content for c in merged_range["all_chunks"]]
|
|
408
|
+
|
|
409
|
+
# Use the first original chunk for metadata
|
|
410
|
+
original_chunk = merged_range["original_chunks"][0]
|
|
411
|
+
|
|
412
|
+
merged_chunk = Chunk(
|
|
413
|
+
id=original_chunk.id,
|
|
414
|
+
document_id=original_chunk.document_id,
|
|
415
|
+
content="".join(combined_content_parts),
|
|
416
|
+
metadata=original_chunk.metadata,
|
|
417
|
+
document_uri=original_chunk.document_uri,
|
|
418
|
+
document_meta=original_chunk.document_meta,
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
# Use the highest score from merged chunks
|
|
422
|
+
best_score = max(merged_range["scores"])
|
|
423
|
+
results.append((merged_chunk, best_score))
|
|
424
|
+
|
|
425
|
+
return results
|
|
426
|
+
|
|
427
|
+
def _merge_overlapping_ranges(self, expanded_ranges):
|
|
428
|
+
"""Merge overlapping or adjacent expanded ranges."""
|
|
429
|
+
if not expanded_ranges:
|
|
430
|
+
return []
|
|
431
|
+
|
|
432
|
+
# Sort by min_order
|
|
433
|
+
sorted_ranges = sorted(expanded_ranges, key=lambda x: x["min_order"])
|
|
434
|
+
merged = []
|
|
435
|
+
|
|
436
|
+
current = {
|
|
437
|
+
"min_order": sorted_ranges[0]["min_order"],
|
|
438
|
+
"max_order": sorted_ranges[0]["max_order"],
|
|
439
|
+
"original_chunks": [sorted_ranges[0]["original_chunk"]],
|
|
440
|
+
"scores": [sorted_ranges[0]["score"]],
|
|
441
|
+
"all_chunks": sorted_ranges[0]["all_chunks"],
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
for range_info in sorted_ranges[1:]:
|
|
445
|
+
# Check if ranges overlap or are adjacent (max_order + 1 >= min_order)
|
|
446
|
+
if current["max_order"] >= range_info["min_order"] - 1:
|
|
447
|
+
# Merge ranges
|
|
448
|
+
current["max_order"] = max(
|
|
449
|
+
current["max_order"], range_info["max_order"]
|
|
450
|
+
)
|
|
451
|
+
current["original_chunks"].append(range_info["original_chunk"])
|
|
452
|
+
current["scores"].append(range_info["score"])
|
|
453
|
+
|
|
454
|
+
# Merge all_chunks and deduplicate by order
|
|
455
|
+
all_chunks_dict = {}
|
|
456
|
+
for chunk in current["all_chunks"] + range_info["all_chunks"]:
|
|
457
|
+
order = chunk.metadata.get("order", 0)
|
|
458
|
+
all_chunks_dict[order] = chunk
|
|
459
|
+
current["all_chunks"] = [
|
|
460
|
+
all_chunks_dict[order] for order in sorted(all_chunks_dict.keys())
|
|
461
|
+
]
|
|
462
|
+
else:
|
|
463
|
+
# No overlap, add current to merged and start new
|
|
464
|
+
merged.append(current)
|
|
465
|
+
current = {
|
|
466
|
+
"min_order": range_info["min_order"],
|
|
467
|
+
"max_order": range_info["max_order"],
|
|
468
|
+
"original_chunks": [range_info["original_chunk"]],
|
|
469
|
+
"scores": [range_info["score"]],
|
|
470
|
+
"all_chunks": range_info["all_chunks"],
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
# Add the last range
|
|
474
|
+
merged.append(current)
|
|
475
|
+
return merged
|
|
476
|
+
|
|
477
|
+
async def ask(self, question: str, cite: bool = False) -> str:
|
|
352
478
|
"""Ask a question using the configured QA agent.
|
|
353
479
|
|
|
354
480
|
Args:
|
|
355
481
|
question: The question to ask.
|
|
482
|
+
cite: Whether to include citations in the response.
|
|
356
483
|
|
|
357
484
|
Returns:
|
|
358
485
|
The generated answer as a string.
|
|
359
486
|
"""
|
|
360
487
|
from haiku.rag.qa import get_qa_agent
|
|
361
488
|
|
|
362
|
-
qa_agent = get_qa_agent(self)
|
|
489
|
+
qa_agent = get_qa_agent(self, use_citations=cite)
|
|
363
490
|
return await qa_agent.answer(question)
|
|
364
491
|
|
|
365
492
|
async def rebuild_database(self) -> AsyncGenerator[int, None]:
|
haiku/rag/config.py
CHANGED
haiku/rag/qa/__init__.py
CHANGED
|
@@ -4,12 +4,16 @@ from haiku.rag.qa.base import QuestionAnswerAgentBase
|
|
|
4
4
|
from haiku.rag.qa.ollama import QuestionAnswerOllamaAgent
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
def get_qa_agent(
|
|
7
|
+
def get_qa_agent(
|
|
8
|
+
client: HaikuRAG, model: str = "", use_citations: bool = False
|
|
9
|
+
) -> QuestionAnswerAgentBase:
|
|
8
10
|
"""
|
|
9
11
|
Factory function to get the appropriate QA agent based on the configuration.
|
|
10
12
|
"""
|
|
11
13
|
if Config.QA_PROVIDER == "ollama":
|
|
12
|
-
return QuestionAnswerOllamaAgent(
|
|
14
|
+
return QuestionAnswerOllamaAgent(
|
|
15
|
+
client, model or Config.QA_MODEL, use_citations
|
|
16
|
+
)
|
|
13
17
|
|
|
14
18
|
if Config.QA_PROVIDER == "openai":
|
|
15
19
|
try:
|
|
@@ -20,7 +24,9 @@ def get_qa_agent(client: HaikuRAG, model: str = "") -> QuestionAnswerAgentBase:
|
|
|
20
24
|
"Please install haiku.rag with the 'openai' extra:"
|
|
21
25
|
"uv pip install haiku.rag[openai]"
|
|
22
26
|
)
|
|
23
|
-
return QuestionAnswerOpenAIAgent(
|
|
27
|
+
return QuestionAnswerOpenAIAgent(
|
|
28
|
+
client, model or Config.QA_MODEL, use_citations
|
|
29
|
+
)
|
|
24
30
|
|
|
25
31
|
if Config.QA_PROVIDER == "anthropic":
|
|
26
32
|
try:
|
|
@@ -31,6 +37,8 @@ def get_qa_agent(client: HaikuRAG, model: str = "") -> QuestionAnswerAgentBase:
|
|
|
31
37
|
"Please install haiku.rag with the 'anthropic' extra:"
|
|
32
38
|
"uv pip install haiku.rag[anthropic]"
|
|
33
39
|
)
|
|
34
|
-
return QuestionAnswerAnthropicAgent(
|
|
40
|
+
return QuestionAnswerAnthropicAgent(
|
|
41
|
+
client, model or Config.QA_MODEL, use_citations
|
|
42
|
+
)
|
|
35
43
|
|
|
36
44
|
raise ValueError(f"Unsupported QA provider: {Config.QA_PROVIDER}")
|
haiku/rag/qa/anthropic.py
CHANGED
|
@@ -1,19 +1,29 @@
|
|
|
1
1
|
from collections.abc import Sequence
|
|
2
2
|
|
|
3
3
|
try:
|
|
4
|
-
from anthropic import AsyncAnthropic
|
|
5
|
-
from anthropic.types import
|
|
4
|
+
from anthropic import AsyncAnthropic # type: ignore
|
|
5
|
+
from anthropic.types import ( # type: ignore
|
|
6
|
+
MessageParam,
|
|
7
|
+
TextBlock,
|
|
8
|
+
ToolParam,
|
|
9
|
+
ToolUseBlock,
|
|
10
|
+
)
|
|
6
11
|
|
|
7
12
|
from haiku.rag.client import HaikuRAG
|
|
8
13
|
from haiku.rag.qa.base import QuestionAnswerAgentBase
|
|
9
14
|
|
|
10
15
|
class QuestionAnswerAnthropicAgent(QuestionAnswerAgentBase):
|
|
11
|
-
def __init__(
|
|
12
|
-
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
client: HaikuRAG,
|
|
19
|
+
model: str = "claude-3-5-haiku-20241022",
|
|
20
|
+
use_citations: bool = False,
|
|
21
|
+
):
|
|
22
|
+
super().__init__(client, model or self._model, use_citations)
|
|
13
23
|
self.tools: Sequence[ToolParam] = [
|
|
14
24
|
ToolParam(
|
|
15
25
|
name="search_documents",
|
|
16
|
-
description="Search the knowledge base for relevant documents",
|
|
26
|
+
description="Search the knowledge base for relevant documents. Returns a JSON array with content, score, and document_uri for each result.",
|
|
17
27
|
input_schema={
|
|
18
28
|
"type": "object",
|
|
19
29
|
"properties": {
|
|
@@ -69,18 +79,10 @@ try:
|
|
|
69
79
|
else 3
|
|
70
80
|
)
|
|
71
81
|
|
|
72
|
-
|
|
82
|
+
context = await self._search_and_expand(
|
|
73
83
|
query, limit=limit
|
|
74
84
|
)
|
|
75
85
|
|
|
76
|
-
context_chunks = []
|
|
77
|
-
for chunk, score in search_results:
|
|
78
|
-
context_chunks.append(
|
|
79
|
-
f"Content: {chunk.content}\nScore: {score:.4f}"
|
|
80
|
-
)
|
|
81
|
-
|
|
82
|
-
context = "\n\n".join(context_chunks)
|
|
83
|
-
|
|
84
86
|
tool_results.append(
|
|
85
87
|
{
|
|
86
88
|
"type": "tool_result",
|
haiku/rag/qa/base.py
CHANGED
|
@@ -1,26 +1,50 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
1
3
|
from haiku.rag.client import HaikuRAG
|
|
2
|
-
from haiku.rag.qa.prompts import SYSTEM_PROMPT
|
|
4
|
+
from haiku.rag.qa.prompts import SYSTEM_PROMPT, SYSTEM_PROMPT_WITH_CITATIONS
|
|
3
5
|
|
|
4
6
|
|
|
5
7
|
class QuestionAnswerAgentBase:
|
|
6
8
|
_model: str = ""
|
|
7
9
|
_system_prompt: str = SYSTEM_PROMPT
|
|
8
10
|
|
|
9
|
-
def __init__(self, client: HaikuRAG, model: str = ""):
|
|
11
|
+
def __init__(self, client: HaikuRAG, model: str = "", use_citations: bool = False):
|
|
10
12
|
self._model = model
|
|
11
13
|
self._client = client
|
|
14
|
+
self._system_prompt = (
|
|
15
|
+
SYSTEM_PROMPT_WITH_CITATIONS if use_citations else SYSTEM_PROMPT
|
|
16
|
+
)
|
|
12
17
|
|
|
13
18
|
async def answer(self, question: str) -> str:
|
|
14
19
|
raise NotImplementedError(
|
|
15
20
|
"QABase is an abstract class. Please implement the answer method in a subclass."
|
|
16
21
|
)
|
|
17
22
|
|
|
23
|
+
async def _search_and_expand(self, query: str, limit: int = 3) -> str:
|
|
24
|
+
"""Search for documents and expand context, then format as JSON"""
|
|
25
|
+
search_results = await self._client.search(query, limit=limit)
|
|
26
|
+
expanded_results = await self._client.expand_context(search_results)
|
|
27
|
+
return self._format_search_results(expanded_results)
|
|
28
|
+
|
|
29
|
+
def _format_search_results(self, search_results) -> str:
|
|
30
|
+
"""Format search results as JSON list of {content, score, document_uri}"""
|
|
31
|
+
formatted_results = []
|
|
32
|
+
for chunk, score in search_results:
|
|
33
|
+
formatted_results.append(
|
|
34
|
+
{
|
|
35
|
+
"content": chunk.content,
|
|
36
|
+
"score": score,
|
|
37
|
+
"document_uri": chunk.document_uri,
|
|
38
|
+
}
|
|
39
|
+
)
|
|
40
|
+
return json.dumps(formatted_results, indent=2)
|
|
41
|
+
|
|
18
42
|
tools = [
|
|
19
43
|
{
|
|
20
44
|
"type": "function",
|
|
21
45
|
"function": {
|
|
22
46
|
"name": "search_documents",
|
|
23
|
-
"description": "Search the knowledge base for relevant documents",
|
|
47
|
+
"description": "Search the knowledge base for relevant documents. Returns a JSON array of search results.",
|
|
24
48
|
"parameters": {
|
|
25
49
|
"type": "object",
|
|
26
50
|
"properties": {
|
|
@@ -36,6 +60,30 @@ class QuestionAnswerAgentBase:
|
|
|
36
60
|
},
|
|
37
61
|
"required": ["query"],
|
|
38
62
|
},
|
|
63
|
+
"returns": {
|
|
64
|
+
"type": "string",
|
|
65
|
+
"description": "JSON array of search results",
|
|
66
|
+
"schema": {
|
|
67
|
+
"type": "array",
|
|
68
|
+
"items": {
|
|
69
|
+
"type": "object",
|
|
70
|
+
"properties": {
|
|
71
|
+
"content": {
|
|
72
|
+
"type": "string",
|
|
73
|
+
"description": "The document text content",
|
|
74
|
+
},
|
|
75
|
+
"score": {
|
|
76
|
+
"type": "number",
|
|
77
|
+
"description": "Relevance score (higher is more relevant)",
|
|
78
|
+
},
|
|
79
|
+
"document_uri": {
|
|
80
|
+
"type": "string",
|
|
81
|
+
"description": "Source URI/path of the document",
|
|
82
|
+
},
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
},
|
|
86
|
+
},
|
|
39
87
|
},
|
|
40
88
|
}
|
|
41
89
|
]
|
haiku/rag/qa/ollama.py
CHANGED
|
@@ -8,8 +8,13 @@ OLLAMA_OPTIONS = {"temperature": 0.0, "seed": 42, "num_ctx": 16384}
|
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
class QuestionAnswerOllamaAgent(QuestionAnswerAgentBase):
|
|
11
|
-
def __init__(
|
|
12
|
-
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
client: HaikuRAG,
|
|
14
|
+
model: str = Config.QA_MODEL,
|
|
15
|
+
use_citations: bool = False,
|
|
16
|
+
):
|
|
17
|
+
super().__init__(client, model or self._model, use_citations)
|
|
13
18
|
|
|
14
19
|
async def answer(self, question: str) -> str:
|
|
15
20
|
ollama_client = AsyncClient(host=Config.OLLAMA_BASE_URL)
|
|
@@ -39,16 +44,7 @@ class QuestionAnswerOllamaAgent(QuestionAnswerAgentBase):
|
|
|
39
44
|
query = args.get("query", question)
|
|
40
45
|
limit = int(args.get("limit", 3))
|
|
41
46
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
context_chunks = []
|
|
45
|
-
for chunk, score in search_results:
|
|
46
|
-
context_chunks.append(
|
|
47
|
-
f"Content: {chunk.content}\nScore: {score:.4f}"
|
|
48
|
-
)
|
|
49
|
-
|
|
50
|
-
context = "\n\n".join(context_chunks)
|
|
51
|
-
|
|
47
|
+
context = await self._search_and_expand(query, limit=limit)
|
|
52
48
|
messages.append(
|
|
53
49
|
{
|
|
54
50
|
"role": "tool",
|
haiku/rag/qa/openai.py
CHANGED
|
@@ -1,22 +1,29 @@
|
|
|
1
1
|
from collections.abc import Sequence
|
|
2
2
|
|
|
3
3
|
try:
|
|
4
|
-
from openai import AsyncOpenAI
|
|
5
|
-
from openai.types.chat import (
|
|
4
|
+
from openai import AsyncOpenAI # type: ignore
|
|
5
|
+
from openai.types.chat import ( # type: ignore
|
|
6
6
|
ChatCompletionAssistantMessageParam,
|
|
7
7
|
ChatCompletionMessageParam,
|
|
8
8
|
ChatCompletionSystemMessageParam,
|
|
9
9
|
ChatCompletionToolMessageParam,
|
|
10
10
|
ChatCompletionUserMessageParam,
|
|
11
11
|
)
|
|
12
|
-
from openai.types.chat.chat_completion_tool_param import
|
|
12
|
+
from openai.types.chat.chat_completion_tool_param import ( # type: ignore
|
|
13
|
+
ChatCompletionToolParam,
|
|
14
|
+
)
|
|
13
15
|
|
|
14
16
|
from haiku.rag.client import HaikuRAG
|
|
15
17
|
from haiku.rag.qa.base import QuestionAnswerAgentBase
|
|
16
18
|
|
|
17
19
|
class QuestionAnswerOpenAIAgent(QuestionAnswerAgentBase):
|
|
18
|
-
def __init__(
|
|
19
|
-
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
client: HaikuRAG,
|
|
23
|
+
model: str = "gpt-4o-mini",
|
|
24
|
+
use_citations: bool = False,
|
|
25
|
+
):
|
|
26
|
+
super().__init__(client, model or self._model, use_citations)
|
|
20
27
|
self.tools: Sequence[ChatCompletionToolParam] = [
|
|
21
28
|
ChatCompletionToolParam(tool) for tool in self.tools
|
|
22
29
|
]
|
|
@@ -70,17 +77,7 @@ try:
|
|
|
70
77
|
query = args.get("query", question)
|
|
71
78
|
limit = int(args.get("limit", 3))
|
|
72
79
|
|
|
73
|
-
|
|
74
|
-
query, limit=limit
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
context_chunks = []
|
|
78
|
-
for chunk, score in search_results:
|
|
79
|
-
context_chunks.append(
|
|
80
|
-
f"Content: {chunk.content}\nScore: {score:.4f}"
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
context = "\n\n".join(context_chunks)
|
|
80
|
+
context = await self._search_and_expand(query, limit=limit)
|
|
84
81
|
|
|
85
82
|
messages.append(
|
|
86
83
|
ChatCompletionToolMessageParam(
|
haiku/rag/qa/prompts.py
CHANGED
|
@@ -19,3 +19,40 @@ Guidelines:
|
|
|
19
19
|
|
|
20
20
|
Be concise, and always maintain accuracy over completeness. Prefer short, direct answers that are well-supported by the documents.
|
|
21
21
|
"""
|
|
22
|
+
|
|
23
|
+
SYSTEM_PROMPT_WITH_CITATIONS = """
|
|
24
|
+
You are a knowledgeable assistant that helps users find information from a document knowledge base.
|
|
25
|
+
|
|
26
|
+
IMPORTANT: You MUST use the search_documents tool for every question. Do not answer any question without first searching the knowledge base.
|
|
27
|
+
|
|
28
|
+
Your process:
|
|
29
|
+
1. IMMEDIATELY call the search_documents tool with relevant keywords from the user's question
|
|
30
|
+
2. Review the search results and their relevance scores
|
|
31
|
+
3. If you need additional context, perform follow-up searches with different keywords
|
|
32
|
+
4. Provide a short and to the point comprehensive answer based only on the retrieved documents
|
|
33
|
+
5. Always include citations for the sources used in your answer
|
|
34
|
+
|
|
35
|
+
Guidelines:
|
|
36
|
+
- Base your answers strictly on the provided document content
|
|
37
|
+
- If multiple documents contain relevant information, synthesize them coherently
|
|
38
|
+
- Indicate when information is incomplete or when you need to search for additional context
|
|
39
|
+
- If the retrieved documents don't contain sufficient information, clearly state: "I cannot find enough information in the knowledge base to answer this question."
|
|
40
|
+
- For complex questions, consider breaking them down and performing multiple searches
|
|
41
|
+
- Stick to the answer, do not ellaborate or provide context unless explicitly asked for it.
|
|
42
|
+
- ALWAYS include citations at the end of your response using the format below
|
|
43
|
+
|
|
44
|
+
Citation Format:
|
|
45
|
+
After your answer, include a "Citations:" section that lists:
|
|
46
|
+
- The document URI from each search result used
|
|
47
|
+
- A brief excerpt (first 50-100 characters) of the content that supported your answer
|
|
48
|
+
- Format: "Citations:\n- [document_uri]: [content_excerpt]..."
|
|
49
|
+
|
|
50
|
+
Example response format:
|
|
51
|
+
[Your answer here]
|
|
52
|
+
|
|
53
|
+
Citations:
|
|
54
|
+
- /path/to/document1.pdf: "This document explains that AFMAN stands for Air Force Manual..."
|
|
55
|
+
- /path/to/document2.pdf: "The manual provides guidance on military procedures and..."
|
|
56
|
+
|
|
57
|
+
Be concise, and always maintain accuracy over completeness. Prefer short, direct answers that are well-supported by the documents.
|
|
58
|
+
"""
|
|
@@ -468,3 +468,49 @@ class ChunkRepository(BaseRepository[Chunk]):
|
|
|
468
468
|
)
|
|
469
469
|
for chunk_id, document_id, content, metadata_json, document_uri, document_metadata_json in rows
|
|
470
470
|
]
|
|
471
|
+
|
|
472
|
+
async def get_adjacent_chunks(self, chunk: Chunk, num_adjacent: int) -> list[Chunk]:
|
|
473
|
+
"""Get adjacent chunks before and after the given chunk within the same document."""
|
|
474
|
+
if self.store._connection is None:
|
|
475
|
+
raise ValueError("Store connection is not available")
|
|
476
|
+
if chunk.document_id is None:
|
|
477
|
+
return []
|
|
478
|
+
|
|
479
|
+
cursor = self.store._connection.cursor()
|
|
480
|
+
chunk_order = chunk.metadata.get("order")
|
|
481
|
+
if chunk_order is None:
|
|
482
|
+
return []
|
|
483
|
+
|
|
484
|
+
# Get adjacent chunks within the same document
|
|
485
|
+
cursor.execute(
|
|
486
|
+
"""
|
|
487
|
+
SELECT c.id, c.document_id, c.content, c.metadata, d.uri, d.metadata as document_metadata
|
|
488
|
+
FROM chunks c
|
|
489
|
+
JOIN documents d ON c.document_id = d.id
|
|
490
|
+
WHERE c.document_id = :document_id
|
|
491
|
+
AND JSON_EXTRACT(c.metadata, '$.order') BETWEEN :start_order AND :end_order
|
|
492
|
+
AND c.id != :chunk_id
|
|
493
|
+
ORDER BY JSON_EXTRACT(c.metadata, '$.order')
|
|
494
|
+
""",
|
|
495
|
+
{
|
|
496
|
+
"document_id": chunk.document_id,
|
|
497
|
+
"start_order": max(0, chunk_order - num_adjacent),
|
|
498
|
+
"end_order": chunk_order + num_adjacent,
|
|
499
|
+
"chunk_id": chunk.id,
|
|
500
|
+
},
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
rows = cursor.fetchall()
|
|
504
|
+
return [
|
|
505
|
+
Chunk(
|
|
506
|
+
id=chunk_id,
|
|
507
|
+
document_id=document_id,
|
|
508
|
+
content=content,
|
|
509
|
+
metadata=json.loads(metadata_json) if metadata_json else {},
|
|
510
|
+
document_uri=document_uri,
|
|
511
|
+
document_meta=json.loads(document_metadata_json)
|
|
512
|
+
if document_metadata_json
|
|
513
|
+
else {},
|
|
514
|
+
)
|
|
515
|
+
for chunk_id, document_id, content, metadata_json, document_uri, document_metadata_json in rows
|
|
516
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: haiku.rag
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.4
|
|
4
4
|
Summary: Retrieval Augmented Generation (RAG) with SQLite
|
|
5
5
|
Author-email: Yiorgis Gozadinos <ggozadinos@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -76,6 +76,9 @@ haiku-rag search "query"
|
|
|
76
76
|
# Ask questions
|
|
77
77
|
haiku-rag ask "Who is the author of haiku.rag?"
|
|
78
78
|
|
|
79
|
+
# Ask questions with citations
|
|
80
|
+
haiku-rag ask "Who is the author of haiku.rag?" --cite
|
|
81
|
+
|
|
79
82
|
# Rebuild database (re-chunk and re-embed all documents)
|
|
80
83
|
haiku-rag rebuild
|
|
81
84
|
|
|
@@ -101,6 +104,10 @@ async with HaikuRAG("database.db") as client:
|
|
|
101
104
|
# Ask questions
|
|
102
105
|
answer = await client.ask("Who is the author of haiku.rag?")
|
|
103
106
|
print(answer)
|
|
107
|
+
|
|
108
|
+
# Ask questions with citations
|
|
109
|
+
answer = await client.ask("Who is the author of haiku.rag?", cite=True)
|
|
110
|
+
print(answer)
|
|
104
111
|
```
|
|
105
112
|
|
|
106
113
|
## MCP Server
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
haiku/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
haiku/rag/app.py,sha256=
|
|
2
|
+
haiku/rag/app.py,sha256=k45EOz-rbYg_8RSII3btqsZo2TpGqj3ysamFehhaCGo,7673
|
|
3
3
|
haiku/rag/chunker.py,sha256=PVe6ysv8UlacUd4Zb3_8RFWIaWDXnzBAy2VDJ4TaUsE,1555
|
|
4
|
-
haiku/rag/cli.py,sha256=
|
|
5
|
-
haiku/rag/client.py,sha256=
|
|
6
|
-
haiku/rag/config.py,sha256=
|
|
4
|
+
haiku/rag/cli.py,sha256=mGpdnEH8rS-rZLGmE4MbcDci8uexci7UkGTdCxrz1Lg,5987
|
|
5
|
+
haiku/rag/client.py,sha256=CTc4OJ-rnAI3pcjQgazK7B06wkNLP6wYXD1spQtXXzg,20961
|
|
6
|
+
haiku/rag/config.py,sha256=oLrmwGp1OjcKPpJFnf9GgTpoBSOXalFWO6PCKFwQe0w,1615
|
|
7
7
|
haiku/rag/logging.py,sha256=zTTGpGq5tPdcd7RpCbd9EGw1IZlQDbYkrCg9t9pqRc4,580
|
|
8
8
|
haiku/rag/mcp.py,sha256=tMN6fNX7ZtAER1R6DL1GkC9HZozTC4HzuQs199p7icI,4551
|
|
9
9
|
haiku/rag/monitor.py,sha256=r386nkhdlsU8UECwIuVwnrSlgMk3vNIuUZGNIzkZuec,2770
|
|
@@ -14,12 +14,12 @@ haiku/rag/embeddings/base.py,sha256=NTQvuzbZPu0LBo5wAu3qGyJ4xXUaRAt1fjBO0ygWn_Y,
|
|
|
14
14
|
haiku/rag/embeddings/ollama.py,sha256=y6-lp0XpbnyIjoOEdtSzMdEVkU5glOwnWQ1FkpUZnpI,370
|
|
15
15
|
haiku/rag/embeddings/openai.py,sha256=i4Ui5hAJkcKqJkH9L3jJo7fuGYHn07td532w-ksg_T8,431
|
|
16
16
|
haiku/rag/embeddings/voyageai.py,sha256=0hiRTIqu-bpl-4OaCtMHvWfPdgbrzhnfZJowSV8pLRA,415
|
|
17
|
-
haiku/rag/qa/__init__.py,sha256=
|
|
18
|
-
haiku/rag/qa/anthropic.py,sha256=
|
|
19
|
-
haiku/rag/qa/base.py,sha256=
|
|
20
|
-
haiku/rag/qa/ollama.py,sha256=
|
|
21
|
-
haiku/rag/qa/openai.py,sha256=
|
|
22
|
-
haiku/rag/qa/prompts.py,sha256=
|
|
17
|
+
haiku/rag/qa/__init__.py,sha256=vC9S6cvZtPz-UfA_v4DMwI7eam6567BXNrUwHsMo_i8,1633
|
|
18
|
+
haiku/rag/qa/anthropic.py,sha256=o0RVn7lcdYvoCUGXh551jeuoB3ANJSZ7uz2R_h_pZ2w,4321
|
|
19
|
+
haiku/rag/qa/base.py,sha256=dCX14ifJW4QMCNFP_pmss9SYWM9Qm1cSWZrMl6A-2C8,3541
|
|
20
|
+
haiku/rag/qa/ollama.py,sha256=3T9ciKWpCIY7jejvdrsMC_wIvGRWQEWA0AwKjOlX35M,2131
|
|
21
|
+
haiku/rag/qa/openai.py,sha256=4BFc8pzFI-CTDxxKMskMxMKkacvUoRTVWI8kKntl3Jw,3718
|
|
22
|
+
haiku/rag/qa/prompts.py,sha256=WTA66brySfzIkuDZ_hRQQKGx12ngIu9nUDKMNGg2-Bg,3321
|
|
23
23
|
haiku/rag/reranking/__init__.py,sha256=fwC3pauteJwh9Ulm2270QvwAdwr4NMr4RUEuolC-wKU,1063
|
|
24
24
|
haiku/rag/reranking/base.py,sha256=LM9yUSSJ414UgBZhFTgxGprlRqzfTe4I1vgjricz2JY,405
|
|
25
25
|
haiku/rag/reranking/cohere.py,sha256=1iTdiaa8vvb6oHVB2qpWzUOVkyfUcimVSZp6Qr4aq4c,1049
|
|
@@ -32,13 +32,13 @@ haiku/rag/store/models/chunk.py,sha256=9-vIxW75-kMTelIhgVIMd_WhP-Drc1q65vjaWMP8w
|
|
|
32
32
|
haiku/rag/store/models/document.py,sha256=TVXVY-nQs-1vCORQEs9rA7zOtndeGC4dgCoujLAS054,396
|
|
33
33
|
haiku/rag/store/repositories/__init__.py,sha256=uIBhxjQh-4o3O-ck8b7BQ58qXQTuJdPvrDIHVhY5T1A,263
|
|
34
34
|
haiku/rag/store/repositories/base.py,sha256=cm3VyQXhtxvRfk1uJHpA0fDSxMpYN-mjQmRiDiLsQ68,1008
|
|
35
|
-
haiku/rag/store/repositories/chunk.py,sha256=
|
|
35
|
+
haiku/rag/store/repositories/chunk.py,sha256=R8dvNy3po2FspZvkWKZTGlqccbekLjY39GroXRfAU18,18808
|
|
36
36
|
haiku/rag/store/repositories/document.py,sha256=ki8LiDukwU1469Yw51i0rQFvBzUQeYkFYWs3Ly83akc,8815
|
|
37
37
|
haiku/rag/store/repositories/settings.py,sha256=qZLXvLsErnCWL0nBQQNfRnatHzCKhtUDLvUK9k-W_fU,2463
|
|
38
38
|
haiku/rag/store/upgrades/__init__.py,sha256=kKS1YWT_P-CYKhKtokOLTIFNKf9jlfjFFr8lyIMeogM,100
|
|
39
39
|
haiku/rag/store/upgrades/v0_3_4.py,sha256=GLogKZdZ40NX1vBHKdOJju7fFzNUCHoEnjSZg17Hm2U,663
|
|
40
|
-
haiku_rag-0.5.
|
|
41
|
-
haiku_rag-0.5.
|
|
42
|
-
haiku_rag-0.5.
|
|
43
|
-
haiku_rag-0.5.
|
|
44
|
-
haiku_rag-0.5.
|
|
40
|
+
haiku_rag-0.5.4.dist-info/METADATA,sha256=hUovrigbcJX6I3vewMVXut3QaI-PXe5BiDzs84noBts,4455
|
|
41
|
+
haiku_rag-0.5.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
42
|
+
haiku_rag-0.5.4.dist-info/entry_points.txt,sha256=G1U3nAkNd5YDYd4v0tuYFbriz0i-JheCsFuT9kIoGCI,48
|
|
43
|
+
haiku_rag-0.5.4.dist-info/licenses/LICENSE,sha256=eXZrWjSk9PwYFNK9yUczl3oPl95Z4V9UXH7bPN46iPo,1065
|
|
44
|
+
haiku_rag-0.5.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|