haiku.rag 0.3.2__tar.gz → 0.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/PKG-INFO +2 -1
  2. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/README.md +1 -0
  3. haiku_rag-0.3.4/docs/benchmarks.md +28 -0
  4. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/docs/configuration.md +3 -0
  5. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/mkdocs.yml +1 -0
  6. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/pyproject.toml +2 -2
  7. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/app.py +1 -1
  8. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/cli.py +18 -1
  9. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/client.py +23 -21
  10. haiku_rag-0.3.4/src/haiku/rag/qa/anthropic.py +106 -0
  11. haiku_rag-0.3.4/src/haiku/rag/qa/ollama.py +64 -0
  12. haiku_rag-0.3.4/src/haiku/rag/qa/openai.py +100 -0
  13. haiku_rag-0.3.4/src/haiku/rag/qa/prompts.py +20 -0
  14. haiku_rag-0.3.4/src/haiku/rag/store/engine.py +166 -0
  15. haiku_rag-0.3.4/src/haiku/rag/store/repositories/settings.py +78 -0
  16. haiku_rag-0.3.4/src/haiku/rag/store/upgrades/__init__.py +3 -0
  17. haiku_rag-0.3.4/src/haiku/rag/store/upgrades/v0_3_4.py +26 -0
  18. haiku_rag-0.3.4/src/haiku/rag/utils.py +80 -0
  19. haiku_rag-0.3.4/tests/generate_benchmark_db.py +151 -0
  20. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/llm_judge.py +1 -0
  21. haiku_rag-0.3.4/tests/test_client.py +451 -0
  22. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_monitor.py +6 -14
  23. haiku_rag-0.3.4/tests/test_rebuild.py +49 -0
  24. haiku_rag-0.3.4/tests/test_settings.py +80 -0
  25. haiku_rag-0.3.4/tests/test_utils.py +15 -0
  26. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/uv.lock +5 -5
  27. haiku_rag-0.3.2/BENCHMARKS.md +0 -13
  28. haiku_rag-0.3.2/src/haiku/rag/qa/anthropic.py +0 -112
  29. haiku_rag-0.3.2/src/haiku/rag/qa/ollama.py +0 -67
  30. haiku_rag-0.3.2/src/haiku/rag/qa/openai.py +0 -101
  31. haiku_rag-0.3.2/src/haiku/rag/qa/prompts.py +0 -7
  32. haiku_rag-0.3.2/src/haiku/rag/store/engine.py +0 -80
  33. haiku_rag-0.3.2/src/haiku/rag/utils.py +0 -25
  34. haiku_rag-0.3.2/tests/generate_benchmark_db.py +0 -129
  35. haiku_rag-0.3.2/tests/test_client.py +0 -499
  36. haiku_rag-0.3.2/tests/test_rebuild.py +0 -52
  37. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/.github/FUNDING.yml +0 -0
  38. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/.github/workflows/build-docs.yml +0 -0
  39. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/.github/workflows/build-publish.yml +0 -0
  40. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/.gitignore +0 -0
  41. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/.pre-commit-config.yaml +0 -0
  42. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/.python-version +0 -0
  43. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/LICENSE +0 -0
  44. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/docs/cli.md +0 -0
  45. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/docs/index.md +0 -0
  46. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/docs/installation.md +0 -0
  47. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/docs/mcp.md +0 -0
  48. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/docs/python.md +0 -0
  49. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/docs/server.md +0 -0
  50. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/__init__.py +0 -0
  51. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/chunker.py +0 -0
  52. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/config.py +0 -0
  53. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/embeddings/__init__.py +0 -0
  54. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/embeddings/base.py +0 -0
  55. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/embeddings/ollama.py +0 -0
  56. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/embeddings/openai.py +0 -0
  57. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/embeddings/voyageai.py +0 -0
  58. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/logging.py +0 -0
  59. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/mcp.py +0 -0
  60. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/monitor.py +0 -0
  61. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/qa/__init__.py +0 -0
  62. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/qa/base.py +0 -0
  63. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/reader.py +0 -0
  64. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/__init__.py +0 -0
  65. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/models/__init__.py +0 -0
  66. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/models/chunk.py +0 -0
  67. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/models/document.py +0 -0
  68. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/repositories/__init__.py +0 -0
  69. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/repositories/base.py +0 -0
  70. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/repositories/chunk.py +0 -0
  71. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/src/haiku/rag/store/repositories/document.py +0 -0
  72. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/__init__.py +0 -0
  73. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/conftest.py +0 -0
  74. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_app.py +0 -0
  75. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_chunk.py +0 -0
  76. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_chunker.py +0 -0
  77. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_cli.py +0 -0
  78. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_document.py +0 -0
  79. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_embedder.py +0 -0
  80. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_qa.py +0 -0
  81. {haiku_rag-0.3.2 → haiku_rag-0.3.4}/tests/test_search.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: haiku.rag
3
- Version: 0.3.2
3
+ Version: 0.3.4
4
4
  Summary: Retrieval Augmented Generation (RAG) with SQLite
5
5
  Author-email: Yiorgis Gozadinos <ggozadinos@gmail.com>
6
6
  License: MIT
@@ -116,3 +116,4 @@ Full documentation at: https://ggozad.github.io/haiku.rag/
116
116
  - [Configuration](https://ggozad.github.io/haiku.rag/configuration/) - Environment variables
117
117
  - [CLI](https://ggozad.github.io/haiku.rag/cli/) - Command reference
118
118
  - [Python API](https://ggozad.github.io/haiku.rag/python/) - Complete API docs
119
+ - [Benchmarks](https://ggozad.github.io/haiku.rag/benchmarks/) - Performance Benchmarks
@@ -77,3 +77,4 @@ Full documentation at: https://ggozad.github.io/haiku.rag/
77
77
  - [Configuration](https://ggozad.github.io/haiku.rag/configuration/) - Environment variables
78
78
  - [CLI](https://ggozad.github.io/haiku.rag/cli/) - Command reference
79
79
  - [Python API](https://ggozad.github.io/haiku.rag/python/) - Complete API docs
80
+ - [Benchmarks](https://ggozad.github.io/haiku.rag/benchmarks/) - Performance Benchmarks
@@ -0,0 +1,28 @@
1
+ # Benchmarks
2
+
3
+ We use the [repliqa](https://huggingface.co/datasets/ServiceNow/repliqa) dataset for the evaluation of `haiku.rag`.
4
+
5
+ You can perform your own evaluations using as example the script found at
6
+ `tests/generate_benchmark_db.py`.
7
+
8
+ ## Recall
9
+
10
+ In order to calculate recall, we load the `News Stories` from `repliqa_3` which is 1035 documents and index them in a sqlite db. Subsequently, we run a search over the `question` field for each row of the dataset and check whether we match the document that answers the question.
11
+
12
+
13
+ The recall obtained is ~0.73 for matching in the top result, raising to ~0.75 for the top 3 results.
14
+
15
+ | Model | Document in top 1 | Document in top 3 |
16
+ |---------------------------------------|-------------------|-------------------|
17
+ | Ollama / `mxbai-embed-large` | 0.77 | 0.89 |
18
+ | Ollama / `nomic-embed-text` | 0.74 | 0.88 |
19
+ | OpenAI / `text-embeddings-3-small` | 0.75 | 0.88 |
20
+
21
+ ## Question/Answer evaluation
22
+
23
+ Again using the same dataset, we use a QA agent to answer the question. In addition we use an LLM judge (using the Ollama `qwen3`) to evaluate whether the answer is correct or not. The obtained accuracy is as follows:
24
+
25
+ | Embedding Model | QA Model | Accuracy |
26
+ |------------------------------|-----------------------------------|-----------|
27
+ | Ollama / `mxbai-embed-large` | Ollama / `qwen3` | 0.64 |
28
+ | Ollama / `mxbai-embed-large` | Anthropic / `Claude Sonnet 3.7` | 0.79 |
@@ -2,6 +2,9 @@
2
2
 
3
3
  Configuration is done through the use of environment variables.
4
4
 
5
+ !!! note
6
+ If you create a db with certain settings and later change them, `haiku.rag` will detect incompatibilities (for example, if you change embedding provider) and will exit. You can **rebuild** the database to apply the new settings, see [Rebuild Database](./cli.md#rebuild-database).
7
+
5
8
  ## File Monitoring
6
9
 
7
10
  Set directories to monitor for automatic indexing:
@@ -63,6 +63,7 @@ nav:
63
63
  - Server: server.md
64
64
  - MCP: mcp.md
65
65
  - Python: python.md
66
+ - Benchmarks: benchmarks.md
66
67
  markdown_extensions:
67
68
  - admonition
68
69
  - attr_list
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "haiku.rag"
3
- version = "0.3.2"
3
+ version = "0.3.4"
4
4
  description = "Retrieval Augmented Generation (RAG) with SQLite"
5
5
  authors = [{ name = "Yiorgis Gozadinos", email = "ggozadinos@gmail.com" }]
6
6
  license = { text = "MIT" }
@@ -56,7 +56,7 @@ dev = [
56
56
  "mkdocs>=1.6.1",
57
57
  "mkdocs-material>=9.6.14",
58
58
  "pre-commit>=4.2.0",
59
- "pyright>=1.1.402",
59
+ "pyright>=1.1.403",
60
60
  "pytest>=8.4.0",
61
61
  "pytest-asyncio>=1.0.0",
62
62
  "pytest-cov>=6.2.1",
@@ -74,7 +74,7 @@ class HaikuRAGApp:
74
74
  self.console.print(f"[red]Error: {e}[/red]")
75
75
 
76
76
  async def rebuild(self):
77
- async with HaikuRAG(db_path=self.db_path) as client:
77
+ async with HaikuRAG(db_path=self.db_path, skip_validation=True) as client:
78
78
  try:
79
79
  documents = await client.list_documents()
80
80
  total_docs = len(documents)
@@ -5,7 +5,7 @@ import typer
5
5
  from rich.console import Console
6
6
 
7
7
  from haiku.rag.app import HaikuRAGApp
8
- from haiku.rag.utils import get_default_data_dir
8
+ from haiku.rag.utils import get_default_data_dir, is_up_to_date
9
9
 
10
10
  cli = typer.Typer(
11
11
  context_settings={"help_option_names": ["-h", "--help"]}, no_args_is_help=True
@@ -15,6 +15,23 @@ console = Console()
15
15
  event_loop = asyncio.get_event_loop()
16
16
 
17
17
 
18
+ async def check_version():
19
+ """Check if haiku.rag is up to date and show warning if not."""
20
+ up_to_date, current_version, latest_version = await is_up_to_date()
21
+ if not up_to_date:
22
+ console.print(
23
+ f"[yellow]Warning: haiku.rag is outdated. Current: {current_version}, Latest: {latest_version}[/yellow]"
24
+ )
25
+ console.print("[yellow]Please update.[/yellow]")
26
+
27
+
28
+ @cli.callback()
29
+ def main():
30
+ """haiku.rag CLI - SQLite-based RAG system"""
31
+ # Run version check before any command
32
+ event_loop.run_until_complete(check_version())
33
+
34
+
18
35
  @cli.command("list", help="List all stored documents")
19
36
  def list_documents(
20
37
  db: Path = typer.Option(
@@ -24,12 +24,13 @@ class HaikuRAG:
24
24
  self,
25
25
  db_path: Path | Literal[":memory:"] = Config.DEFAULT_DATA_DIR
26
26
  / "haiku.rag.sqlite",
27
+ skip_validation: bool = False,
27
28
  ):
28
29
  """Initialize the RAG client with a database path."""
29
30
  if isinstance(db_path, Path):
30
31
  if not db_path.parent.exists():
31
32
  Path.mkdir(db_path.parent, parents=True)
32
- self.store = Store(db_path)
33
+ self.store = Store(db_path, skip_validation=skip_validation)
33
34
  self.document_repository = DocumentRepository(self.store)
34
35
  self.chunk_repository = ChunkRepository(self.store)
35
36
 
@@ -165,29 +166,26 @@ class HaikuRAG:
165
166
 
166
167
  # Create a temporary file with the appropriate extension
167
168
  with tempfile.NamedTemporaryFile(
168
- mode="wb", suffix=file_extension, delete=False
169
+ mode="wb", suffix=file_extension
169
170
  ) as temp_file:
170
171
  temp_file.write(response.content)
172
+ temp_file.flush() # Ensure content is written to disk
171
173
  temp_path = Path(temp_file.name)
172
174
 
173
- try:
174
175
  # Parse the content using FileReader
175
176
  content = FileReader.parse_file(temp_path)
176
177
 
177
- # Merge metadata with contentType and md5
178
- metadata.update({"contentType": content_type, "md5": md5_hash})
179
-
180
- if existing_doc:
181
- existing_doc.content = content
182
- existing_doc.metadata = metadata
183
- return await self.update_document(existing_doc)
184
- else:
185
- return await self.create_document(
186
- content=content, uri=url, metadata=metadata
187
- )
188
- finally:
189
- # Clean up temporary file
190
- temp_path.unlink(missing_ok=True)
178
+ # Merge metadata with contentType and md5
179
+ metadata.update({"contentType": content_type, "md5": md5_hash})
180
+
181
+ if existing_doc:
182
+ existing_doc.content = content
183
+ existing_doc.metadata = metadata
184
+ return await self.update_document(existing_doc)
185
+ else:
186
+ return await self.create_document(
187
+ content=content, uri=url, metadata=metadata
188
+ )
191
189
 
192
190
  def _get_extension_from_content_type_or_url(
193
191
  self, url: str, content_type: str
@@ -277,12 +275,16 @@ class HaikuRAG:
277
275
  Yields:
278
276
  int: The ID of the document currently being processed
279
277
  """
280
- documents = await self.list_documents()
278
+ await self.chunk_repository.delete_all()
279
+ self.store.recreate_embeddings_table()
281
280
 
282
- if not documents:
283
- return
281
+ # Update settings to current config
282
+ from haiku.rag.store.repositories.settings import SettingsRepository
284
283
 
285
- await self.chunk_repository.delete_all()
284
+ settings_repo = SettingsRepository(self.store)
285
+ settings_repo.save()
286
+
287
+ documents = await self.list_documents()
286
288
 
287
289
  for doc in documents:
288
290
  if doc.id is not None:
@@ -0,0 +1,106 @@
1
+ from collections.abc import Sequence
2
+
3
+ try:
4
+ from anthropic import AsyncAnthropic
5
+ from anthropic.types import MessageParam, TextBlock, ToolParam, ToolUseBlock
6
+
7
+ from haiku.rag.client import HaikuRAG
8
+ from haiku.rag.qa.base import QuestionAnswerAgentBase
9
+
10
+ class QuestionAnswerAnthropicAgent(QuestionAnswerAgentBase):
11
+ def __init__(self, client: HaikuRAG, model: str = "claude-3-5-haiku-20241022"):
12
+ super().__init__(client, model or self._model)
13
+ self.tools: Sequence[ToolParam] = [
14
+ ToolParam(
15
+ name="search_documents",
16
+ description="Search the knowledge base for relevant documents",
17
+ input_schema={
18
+ "type": "object",
19
+ "properties": {
20
+ "query": {
21
+ "type": "string",
22
+ "description": "The search query to find relevant documents",
23
+ },
24
+ "limit": {
25
+ "type": "integer",
26
+ "description": "Maximum number of results to return",
27
+ "default": 3,
28
+ },
29
+ },
30
+ "required": ["query"],
31
+ },
32
+ )
33
+ ]
34
+
35
+ async def answer(self, question: str) -> str:
36
+ anthropic_client = AsyncAnthropic()
37
+
38
+ messages: list[MessageParam] = [{"role": "user", "content": question}]
39
+
40
+ max_rounds = 5 # Prevent infinite loops
41
+
42
+ for _ in range(max_rounds):
43
+ response = await anthropic_client.messages.create(
44
+ model=self._model,
45
+ max_tokens=4096,
46
+ system=self._system_prompt,
47
+ messages=messages,
48
+ tools=self.tools,
49
+ temperature=0.0,
50
+ )
51
+
52
+ if response.stop_reason == "tool_use":
53
+ messages.append({"role": "assistant", "content": response.content})
54
+
55
+ # Process tool calls
56
+ tool_results = []
57
+ for content_block in response.content:
58
+ if isinstance(content_block, ToolUseBlock):
59
+ if content_block.name == "search_documents":
60
+ args = content_block.input
61
+ query = (
62
+ args.get("query", question)
63
+ if isinstance(args, dict)
64
+ else question
65
+ )
66
+ limit = (
67
+ int(args.get("limit", 3))
68
+ if isinstance(args, dict)
69
+ else 3
70
+ )
71
+
72
+ search_results = await self._client.search(
73
+ query, limit=limit
74
+ )
75
+
76
+ context_chunks = []
77
+ for chunk, score in search_results:
78
+ context_chunks.append(
79
+ f"Content: {chunk.content}\nScore: {score:.4f}"
80
+ )
81
+
82
+ context = "\n\n".join(context_chunks)
83
+
84
+ tool_results.append(
85
+ {
86
+ "type": "tool_result",
87
+ "tool_use_id": content_block.id,
88
+ "content": context,
89
+ }
90
+ )
91
+
92
+ if tool_results:
93
+ messages.append({"role": "user", "content": tool_results})
94
+ else:
95
+ # No tool use, return the response
96
+ if response.content:
97
+ first_content = response.content[0]
98
+ if isinstance(first_content, TextBlock):
99
+ return first_content.text
100
+ return ""
101
+
102
+ # If we've exhausted max rounds, return empty string
103
+ return ""
104
+
105
+ except ImportError:
106
+ pass
@@ -0,0 +1,64 @@
1
+ from ollama import AsyncClient
2
+
3
+ from haiku.rag.client import HaikuRAG
4
+ from haiku.rag.config import Config
5
+ from haiku.rag.qa.base import QuestionAnswerAgentBase
6
+
7
+ OLLAMA_OPTIONS = {"temperature": 0.0, "seed": 42, "num_ctx": 64000}
8
+
9
+
10
+ class QuestionAnswerOllamaAgent(QuestionAnswerAgentBase):
11
+ def __init__(self, client: HaikuRAG, model: str = Config.QA_MODEL):
12
+ super().__init__(client, model or self._model)
13
+
14
+ async def answer(self, question: str) -> str:
15
+ ollama_client = AsyncClient(host=Config.OLLAMA_BASE_URL)
16
+
17
+ messages = [
18
+ {"role": "system", "content": self._system_prompt},
19
+ {"role": "user", "content": question},
20
+ ]
21
+
22
+ max_rounds = 5 # Prevent infinite loops
23
+
24
+ for _ in range(max_rounds):
25
+ response = await ollama_client.chat(
26
+ model=self._model,
27
+ messages=messages,
28
+ tools=self.tools,
29
+ options=OLLAMA_OPTIONS,
30
+ think=False,
31
+ )
32
+
33
+ if response.get("message", {}).get("tool_calls"):
34
+ messages.append(response["message"])
35
+
36
+ for tool_call in response["message"]["tool_calls"]:
37
+ if tool_call["function"]["name"] == "search_documents":
38
+ args = tool_call["function"]["arguments"]
39
+ query = args.get("query", question)
40
+ limit = int(args.get("limit", 3))
41
+
42
+ search_results = await self._client.search(query, limit=limit)
43
+
44
+ context_chunks = []
45
+ for chunk, score in search_results:
46
+ context_chunks.append(
47
+ f"Content: {chunk.content}\nScore: {score:.4f}"
48
+ )
49
+
50
+ context = "\n\n".join(context_chunks)
51
+
52
+ messages.append(
53
+ {
54
+ "role": "tool",
55
+ "content": context,
56
+ "tool_call_id": tool_call.get("id", "search_tool"),
57
+ }
58
+ )
59
+ else:
60
+ # No tool calls, return the response
61
+ return response["message"]["content"]
62
+
63
+ # If we've exhausted max rounds, return empty string
64
+ return ""
@@ -0,0 +1,100 @@
1
+ from collections.abc import Sequence
2
+
3
+ try:
4
+ from openai import AsyncOpenAI
5
+ from openai.types.chat import (
6
+ ChatCompletionAssistantMessageParam,
7
+ ChatCompletionMessageParam,
8
+ ChatCompletionSystemMessageParam,
9
+ ChatCompletionToolMessageParam,
10
+ ChatCompletionUserMessageParam,
11
+ )
12
+ from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
13
+
14
+ from haiku.rag.client import HaikuRAG
15
+ from haiku.rag.qa.base import QuestionAnswerAgentBase
16
+
17
+ class QuestionAnswerOpenAIAgent(QuestionAnswerAgentBase):
18
+ def __init__(self, client: HaikuRAG, model: str = "gpt-4o-mini"):
19
+ super().__init__(client, model or self._model)
20
+ self.tools: Sequence[ChatCompletionToolParam] = [
21
+ ChatCompletionToolParam(tool) for tool in self.tools
22
+ ]
23
+
24
+ async def answer(self, question: str) -> str:
25
+ openai_client = AsyncOpenAI()
26
+
27
+ messages: list[ChatCompletionMessageParam] = [
28
+ ChatCompletionSystemMessageParam(
29
+ role="system", content=self._system_prompt
30
+ ),
31
+ ChatCompletionUserMessageParam(role="user", content=question),
32
+ ]
33
+
34
+ max_rounds = 5 # Prevent infinite loops
35
+
36
+ for _ in range(max_rounds):
37
+ response = await openai_client.chat.completions.create(
38
+ model=self._model,
39
+ messages=messages,
40
+ tools=self.tools,
41
+ temperature=0.0,
42
+ )
43
+
44
+ response_message = response.choices[0].message
45
+
46
+ if response_message.tool_calls:
47
+ messages.append(
48
+ ChatCompletionAssistantMessageParam(
49
+ role="assistant",
50
+ content=response_message.content,
51
+ tool_calls=[
52
+ {
53
+ "id": tc.id,
54
+ "type": "function",
55
+ "function": {
56
+ "name": tc.function.name,
57
+ "arguments": tc.function.arguments,
58
+ },
59
+ }
60
+ for tc in response_message.tool_calls
61
+ ],
62
+ )
63
+ )
64
+
65
+ for tool_call in response_message.tool_calls:
66
+ if tool_call.function.name == "search_documents":
67
+ import json
68
+
69
+ args = json.loads(tool_call.function.arguments)
70
+ query = args.get("query", question)
71
+ limit = int(args.get("limit", 3))
72
+
73
+ search_results = await self._client.search(
74
+ query, limit=limit
75
+ )
76
+
77
+ context_chunks = []
78
+ for chunk, score in search_results:
79
+ context_chunks.append(
80
+ f"Content: {chunk.content}\nScore: {score:.4f}"
81
+ )
82
+
83
+ context = "\n\n".join(context_chunks)
84
+
85
+ messages.append(
86
+ ChatCompletionToolMessageParam(
87
+ role="tool",
88
+ content=context,
89
+ tool_call_id=tool_call.id,
90
+ )
91
+ )
92
+ else:
93
+ # No tool calls, return the response
94
+ return response_message.content or ""
95
+
96
+ # If we've exhausted max rounds, return empty string
97
+ return ""
98
+
99
+ except ImportError:
100
+ pass
@@ -0,0 +1,20 @@
1
+ SYSTEM_PROMPT = """
2
+ You are a knowledgeable assistant that helps users find information from a document knowledge base.
3
+
4
+ Your process:
5
+ 1. When a user asks a question, use the search_documents tool to find relevant information
6
+ 2. Search with specific keywords and phrases from the user's question
7
+ 3. Review the search results and their relevance scores
8
+ 4. If you need additional context, perform follow-up searches with different keywords
9
+ 5. Provide a comprehensive answer based only on the retrieved documents
10
+
11
+ Guidelines:
12
+ - Base your answers strictly on the provided document content
13
+ - Quote or reference specific information when possible
14
+ - If multiple documents contain relevant information, synthesize them coherently
15
+ - Indicate when information is incomplete or when you need to search for additional context
16
+ - If the retrieved documents don't contain sufficient information, clearly state: "I cannot find enough information in the knowledge base to answer this question."
17
+ - For complex questions, consider breaking them down and performing multiple searches
18
+
19
+ Be concise, and always maintain accuracy over completeness. Prefer short, direct answers that are well-supported by the documents.
20
+ """
@@ -0,0 +1,166 @@
1
+ import sqlite3
2
+ import struct
3
+ from importlib import metadata
4
+ from pathlib import Path
5
+ from typing import Literal
6
+
7
+ import sqlite_vec
8
+ from packaging.version import parse
9
+ from rich.console import Console
10
+
11
+ from haiku.rag.config import Config
12
+ from haiku.rag.embeddings import get_embedder
13
+ from haiku.rag.store.upgrades import upgrades
14
+ from haiku.rag.utils import int_to_semantic_version, semantic_version_to_int
15
+
16
+
17
+ class Store:
18
+ def __init__(
19
+ self, db_path: Path | Literal[":memory:"], skip_validation: bool = False
20
+ ):
21
+ self.db_path: Path | Literal[":memory:"] = db_path
22
+ self.create_or_update_db()
23
+
24
+ # Validate config compatibility after connection is established
25
+ if not skip_validation:
26
+ from haiku.rag.store.repositories.settings import SettingsRepository
27
+
28
+ settings_repo = SettingsRepository(self)
29
+ settings_repo.validate_config_compatibility()
30
+ current_version = metadata.version("haiku.rag")
31
+ self.set_user_version(current_version)
32
+
33
+ def create_or_update_db(self):
34
+ """Create the database and tables with sqlite-vec support for embeddings."""
35
+ current_version = metadata.version("haiku.rag")
36
+
37
+ db = sqlite3.connect(self.db_path)
38
+ db.enable_load_extension(True)
39
+ sqlite_vec.load(db)
40
+ self._connection = db
41
+ existing_tables = [
42
+ row[0]
43
+ for row in db.execute(
44
+ "SELECT name FROM sqlite_master WHERE type='table';"
45
+ ).fetchall()
46
+ ]
47
+
48
+ # If we have a db already, perform upgrades and return
49
+ if self.db_path != ":memory:" and "documents" in existing_tables:
50
+ # Upgrade database
51
+ console = Console()
52
+ db_version = self.get_user_version()
53
+ for version, steps in upgrades:
54
+ if parse(current_version) >= parse(version) and parse(version) > parse(
55
+ db_version
56
+ ):
57
+ for step in steps:
58
+ step(db)
59
+ console.print(
60
+ f"[green][b]DB Upgrade: [/b]{step.__doc__}[/green]"
61
+ )
62
+ return
63
+
64
+ # Create documents table
65
+ db.execute("""
66
+ CREATE TABLE IF NOT EXISTS documents (
67
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
68
+ content TEXT NOT NULL,
69
+ uri TEXT,
70
+ metadata TEXT DEFAULT '{}',
71
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
72
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
73
+ )
74
+ """)
75
+ # Create chunks table
76
+ db.execute("""
77
+ CREATE TABLE IF NOT EXISTS chunks (
78
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
79
+ document_id INTEGER NOT NULL,
80
+ content TEXT NOT NULL,
81
+ metadata TEXT DEFAULT '{}',
82
+ FOREIGN KEY (document_id) REFERENCES documents (id) ON DELETE CASCADE
83
+ )
84
+ """)
85
+ # Create vector table for chunk embeddings
86
+ embedder = get_embedder()
87
+ db.execute(f"""
88
+ CREATE VIRTUAL TABLE IF NOT EXISTS chunk_embeddings USING vec0(
89
+ chunk_id INTEGER PRIMARY KEY,
90
+ embedding FLOAT[{embedder._vector_dim}]
91
+ )
92
+ """)
93
+ # Create FTS5 table for full-text search
94
+ db.execute("""
95
+ CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
96
+ content,
97
+ content='chunks',
98
+ content_rowid='id'
99
+ )
100
+ """)
101
+ # Create settings table for storing current configuration
102
+ db.execute("""
103
+ CREATE TABLE IF NOT EXISTS settings (
104
+ id INTEGER PRIMARY KEY DEFAULT 1,
105
+ settings TEXT NOT NULL DEFAULT '{}'
106
+ )
107
+ """)
108
+ # Save current settings to the new database
109
+ settings_json = Config.model_dump_json()
110
+ db.execute(
111
+ "INSERT OR IGNORE INTO settings (id, settings) VALUES (1, ?)",
112
+ (settings_json,),
113
+ )
114
+ # Create indexes for better performance
115
+ db.execute(
116
+ "CREATE INDEX IF NOT EXISTS idx_chunks_document_id ON chunks(document_id)"
117
+ )
118
+ db.commit()
119
+
120
+ def get_user_version(self) -> str:
121
+ """Returns the SQLite user version"""
122
+ if self._connection is None:
123
+ raise ValueError("Store connection is not available")
124
+
125
+ cursor = self._connection.execute("PRAGMA user_version;")
126
+ version = cursor.fetchone()
127
+ return int_to_semantic_version(version[0])
128
+
129
+ def set_user_version(self, version: str) -> None:
130
+ """Updates the SQLite user version"""
131
+ if self._connection is None:
132
+ raise ValueError("Store connection is not available")
133
+
134
+ self._connection.execute(
135
+ f"PRAGMA user_version = {semantic_version_to_int(version)};"
136
+ )
137
+
138
+ def recreate_embeddings_table(self) -> None:
139
+ """Recreate the embeddings table with current vector dimensions."""
140
+ if self._connection is None:
141
+ raise ValueError("Store connection is not available")
142
+
143
+ # Drop existing embeddings table
144
+ self._connection.execute("DROP TABLE IF EXISTS chunk_embeddings")
145
+
146
+ # Recreate with current dimensions
147
+ embedder = get_embedder()
148
+ self._connection.execute(f"""
149
+ CREATE VIRTUAL TABLE chunk_embeddings USING vec0(
150
+ chunk_id INTEGER PRIMARY KEY,
151
+ embedding FLOAT[{embedder._vector_dim}]
152
+ )
153
+ """)
154
+
155
+ self._connection.commit()
156
+
157
+ @staticmethod
158
+ def serialize_embedding(embedding: list[float]) -> bytes:
159
+ """Serialize a list of floats to bytes for sqlite-vec storage."""
160
+ return struct.pack(f"{len(embedding)}f", *embedding)
161
+
162
+ def close(self):
163
+ """Close the database connection if it's an in-memory database."""
164
+ if self._connection is not None:
165
+ self._connection.close()
166
+ self._connection = None