haiku.rag 0.13.0__tar.gz → 0.13.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of haiku.rag might be problematic. Click here for more details.

Files changed (92) hide show
  1. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/PKG-INFO +3 -1
  2. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/README.md +2 -0
  3. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/pyproject.toml +1 -1
  4. haiku_rag-0.13.1/server.json +42 -0
  5. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/config/__init__.py +0 -4
  6. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/config/loader.py +0 -14
  7. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/config/models.py +5 -1
  8. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/embeddings/__init__.py +12 -4
  9. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/embeddings/base.py +4 -2
  10. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/embeddings/ollama.py +1 -2
  11. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/embeddings/vllm.py +2 -2
  12. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/uv.lock +1 -1
  13. haiku_rag-0.13.0/server.json +0 -253
  14. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/.dockerignore +0 -0
  15. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/.gitignore +0 -0
  16. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/.pre-commit-config.yaml +0 -0
  17. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/.python-version +0 -0
  18. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/LICENSE +0 -0
  19. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/mkdocs.yml +0 -0
  20. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/__init__.py +0 -0
  21. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/benchmark.py +0 -0
  22. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/config.py +0 -0
  23. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/datasets/__init__.py +0 -0
  24. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/datasets/repliqa.py +0 -0
  25. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/datasets/wix.py +0 -0
  26. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/llm_judge.py +0 -0
  27. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/evaluations/prompts.py +0 -0
  28. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/__init__.py +0 -0
  29. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/__init__.py +0 -0
  30. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/client.py +0 -0
  31. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/context.py +0 -0
  32. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/models.py +0 -0
  33. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/prompts.py +0 -0
  34. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/skills.py +0 -0
  35. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/storage.py +0 -0
  36. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/a2a/worker.py +0 -0
  37. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/app.py +0 -0
  38. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/chunker.py +0 -0
  39. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/cli.py +0 -0
  40. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/client.py +0 -0
  41. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/embeddings/openai.py +0 -0
  42. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/embeddings/voyageai.py +0 -0
  43. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/__init__.py +0 -0
  44. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/base.py +0 -0
  45. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/common.py +0 -0
  46. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/models.py +0 -0
  47. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/nodes/__init__.py +0 -0
  48. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/nodes/analysis.py +0 -0
  49. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/nodes/plan.py +0 -0
  50. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/nodes/search.py +0 -0
  51. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/nodes/synthesize.py +0 -0
  52. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/graph/prompts.py +0 -0
  53. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/logging.py +0 -0
  54. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/mcp.py +0 -0
  55. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/monitor.py +0 -0
  56. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/__init__.py +0 -0
  57. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/agent.py +0 -0
  58. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/deep/__init__.py +0 -0
  59. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/deep/dependencies.py +0 -0
  60. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/deep/graph.py +0 -0
  61. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/deep/models.py +0 -0
  62. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/deep/nodes.py +0 -0
  63. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/deep/prompts.py +0 -0
  64. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/deep/state.py +0 -0
  65. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/qa/prompts.py +0 -0
  66. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/reader.py +0 -0
  67. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/reranking/__init__.py +0 -0
  68. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/reranking/base.py +0 -0
  69. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/reranking/cohere.py +0 -0
  70. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/reranking/mxbai.py +0 -0
  71. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/reranking/vllm.py +0 -0
  72. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/__init__.py +0 -0
  73. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/common.py +0 -0
  74. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/dependencies.py +0 -0
  75. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/graph.py +0 -0
  76. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/models.py +0 -0
  77. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/prompts.py +0 -0
  78. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/state.py +0 -0
  79. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/research/stream.py +0 -0
  80. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/__init__.py +0 -0
  81. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/engine.py +0 -0
  82. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/models/__init__.py +0 -0
  83. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/models/chunk.py +0 -0
  84. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/models/document.py +0 -0
  85. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/repositories/__init__.py +0 -0
  86. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/repositories/chunk.py +0 -0
  87. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/repositories/document.py +0 -0
  88. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/repositories/settings.py +0 -0
  89. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/upgrades/__init__.py +0 -0
  90. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/upgrades/v0_10_1.py +0 -0
  91. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/store/upgrades/v0_9_3.py +0 -0
  92. {haiku_rag-0.13.0 → haiku_rag-0.13.1}/src/haiku/rag/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: haiku.rag
3
- Version: 0.13.0
3
+ Version: 0.13.1
4
4
  Summary: Agentic Retrieval Augmented Generation (RAG) with LanceDB
5
5
  Author-email: Yiorgis Gozadinos <ggozadinos@gmail.com>
6
6
  License: MIT
@@ -40,6 +40,8 @@ Description-Content-Type: text/markdown
40
40
 
41
41
  # Haiku RAG
42
42
 
43
+ mcp-name: io.github.ggozad/haiku-rag
44
+
43
45
  Retrieval-Augmented Generation (RAG) library built on LanceDB.
44
46
 
45
47
  `haiku.rag` is a Retrieval-Augmented Generation (RAG) library built to work with LanceDB as a local vector database. It uses LanceDB for storing embeddings and performs semantic (vector) search as well as full-text search combined through native hybrid search with Reciprocal Rank Fusion. Both open-source (Ollama) as well as commercial (OpenAI, VoyageAI) embedding providers are supported.
@@ -1,5 +1,7 @@
1
1
  # Haiku RAG
2
2
 
3
+ mcp-name: io.github.ggozad/haiku-rag
4
+
3
5
  Retrieval-Augmented Generation (RAG) library built on LanceDB.
4
6
 
5
7
  `haiku.rag` is a Retrieval-Augmented Generation (RAG) library built to work with LanceDB as a local vector database. It uses LanceDB for storing embeddings and performs semantic (vector) search as well as full-text search combined through native hybrid search with Reciprocal Rank Fusion. Both open-source (Ollama) as well as commercial (OpenAI, VoyageAI) embedding providers are supported.
@@ -2,7 +2,7 @@
2
2
 
3
3
  name = "haiku.rag"
4
4
  description = "Agentic Retrieval Augmented Generation (RAG) with LanceDB"
5
- version = "0.13.0"
5
+ version = "0.13.1"
6
6
  authors = [{ name = "Yiorgis Gozadinos", email = "ggozadinos@gmail.com" }]
7
7
  license = { text = "MIT" }
8
8
  readme = { file = "README.md", content-type = "text/markdown" }
@@ -0,0 +1,42 @@
1
+ {
2
+ "$schema": "https://static.modelcontextprotocol.io/schemas/2025-10-17/server.schema.json",
3
+ "name": "io.github.ggozad/haiku-rag",
4
+ "version": "{{VERSION}}",
5
+ "description": "Agentic Retrieval Augmented Generation (RAG) with LanceDB",
6
+ "repository": {
7
+ "url": "https://github.com/ggozad/haiku.rag",
8
+ "source": "github"
9
+ },
10
+ "license": "MIT",
11
+ "keywords": [
12
+ "rag",
13
+ "lancedb",
14
+ "vector-database",
15
+ "embeddings",
16
+ "search",
17
+ "qa",
18
+ "research"
19
+ ],
20
+ "packages": [
21
+ {
22
+ "registryType": "pypi",
23
+ "registryBaseUrl": "https://pypi.org",
24
+ "identifier": "haiku-rag",
25
+ "version": "{{VERSION}}",
26
+ "runtimeHint": "uvx",
27
+ "runtimeArguments": [
28
+ {
29
+ "type": "positional",
30
+ "value": "serve"
31
+ },
32
+ {
33
+ "type": "named",
34
+ "name": "--mcp"
35
+ }
36
+ ],
37
+ "transport": {
38
+ "type": "stdio"
39
+ }
40
+ }
41
+ ]
42
+ }
@@ -1,7 +1,6 @@
1
1
  import os
2
2
 
3
3
  from haiku.rag.config.loader import (
4
- check_for_deprecated_env,
5
4
  find_config_file,
6
5
  generate_default_config,
7
6
  load_config_from_env,
@@ -49,6 +48,3 @@ if config_path:
49
48
  Config = AppConfig.model_validate(yaml_data)
50
49
  else:
51
50
  Config = AppConfig()
52
-
53
- # Check for deprecated .env file
54
- check_for_deprecated_env()
@@ -1,5 +1,4 @@
1
1
  import os
2
- import warnings
3
2
  from pathlib import Path
4
3
 
5
4
  import yaml
@@ -45,19 +44,6 @@ def load_yaml_config(path: Path) -> dict:
45
44
  return data or {}
46
45
 
47
46
 
48
- def check_for_deprecated_env() -> None:
49
- """Check for .env file and warn if found."""
50
- env_file = Path.cwd() / ".env"
51
- if env_file.exists():
52
- warnings.warn(
53
- ".env file detected but YAML configuration is now preferred. "
54
- "Environment variable configuration is deprecated and will be removed in future versions."
55
- "Run 'haiku-rag init-config' to generate a YAML config file.",
56
- DeprecationWarning,
57
- stacklevel=2,
58
- )
59
-
60
-
61
47
  def generate_default_config() -> dict:
62
48
  """Generate a default YAML config structure with documentation."""
63
49
  return {
@@ -46,7 +46,11 @@ class ProcessingConfig(BaseModel):
46
46
 
47
47
 
48
48
  class OllamaConfig(BaseModel):
49
- base_url: str = "http://localhost:11434"
49
+ base_url: str = Field(
50
+ default_factory=lambda: __import__("os").environ.get(
51
+ "OLLAMA_BASE_URL", "http://localhost:11434"
52
+ )
53
+ )
50
54
 
51
55
 
52
56
  class VLLMConfig(BaseModel):
@@ -15,7 +15,9 @@ def get_embedder(config: AppConfig = Config) -> EmbedderBase:
15
15
  """
16
16
 
17
17
  if config.embeddings.provider == "ollama":
18
- return OllamaEmbedder(config.embeddings.model, config.embeddings.vector_dim)
18
+ return OllamaEmbedder(
19
+ config.embeddings.model, config.embeddings.vector_dim, config
20
+ )
19
21
 
20
22
  if config.embeddings.provider == "voyageai":
21
23
  try:
@@ -26,16 +28,22 @@ def get_embedder(config: AppConfig = Config) -> EmbedderBase:
26
28
  "Please install haiku.rag with the 'voyageai' extra: "
27
29
  "uv pip install haiku.rag[voyageai]"
28
30
  )
29
- return VoyageAIEmbedder(config.embeddings.model, config.embeddings.vector_dim)
31
+ return VoyageAIEmbedder(
32
+ config.embeddings.model, config.embeddings.vector_dim, config
33
+ )
30
34
 
31
35
  if config.embeddings.provider == "openai":
32
36
  from haiku.rag.embeddings.openai import Embedder as OpenAIEmbedder
33
37
 
34
- return OpenAIEmbedder(config.embeddings.model, config.embeddings.vector_dim)
38
+ return OpenAIEmbedder(
39
+ config.embeddings.model, config.embeddings.vector_dim, config
40
+ )
35
41
 
36
42
  if config.embeddings.provider == "vllm":
37
43
  from haiku.rag.embeddings.vllm import Embedder as VllmEmbedder
38
44
 
39
- return VllmEmbedder(config.embeddings.model, config.embeddings.vector_dim)
45
+ return VllmEmbedder(
46
+ config.embeddings.model, config.embeddings.vector_dim, config
47
+ )
40
48
 
41
49
  raise ValueError(f"Unsupported embedding provider: {config.embeddings.provider}")
@@ -1,15 +1,17 @@
1
1
  from typing import overload
2
2
 
3
- from haiku.rag.config import Config
3
+ from haiku.rag.config import AppConfig, Config
4
4
 
5
5
 
6
6
  class EmbedderBase:
7
7
  _model: str = Config.embeddings.model
8
8
  _vector_dim: int = Config.embeddings.vector_dim
9
+ _config: AppConfig = Config
9
10
 
10
- def __init__(self, model: str, vector_dim: int):
11
+ def __init__(self, model: str, vector_dim: int, config: AppConfig = Config):
11
12
  self._model = model
12
13
  self._vector_dim = vector_dim
14
+ self._config = config
13
15
 
14
16
  @overload
15
17
  async def embed(self, text: str) -> list[float]: ...
@@ -2,7 +2,6 @@ from typing import overload
2
2
 
3
3
  from openai import AsyncOpenAI
4
4
 
5
- from haiku.rag.config import Config
6
5
  from haiku.rag.embeddings.base import EmbedderBase
7
6
 
8
7
 
@@ -15,7 +14,7 @@ class Embedder(EmbedderBase):
15
14
 
16
15
  async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
17
16
  client = AsyncOpenAI(
18
- base_url=f"{Config.providers.ollama.base_url}/v1", api_key="dummy"
17
+ base_url=f"{self._config.providers.ollama.base_url}/v1", api_key="dummy"
19
18
  )
20
19
  if not text:
21
20
  return []
@@ -2,7 +2,6 @@ from typing import overload
2
2
 
3
3
  from openai import AsyncOpenAI
4
4
 
5
- from haiku.rag.config import Config
6
5
  from haiku.rag.embeddings.base import EmbedderBase
7
6
 
8
7
 
@@ -15,7 +14,8 @@ class Embedder(EmbedderBase):
15
14
 
16
15
  async def embed(self, text: str | list[str]) -> list[float] | list[list[float]]:
17
16
  client = AsyncOpenAI(
18
- base_url=f"{Config.providers.vllm.embeddings_base_url}/v1", api_key="dummy"
17
+ base_url=f"{self._config.providers.vllm.embeddings_base_url}/v1",
18
+ api_key="dummy",
19
19
  )
20
20
  if not text:
21
21
  return []
@@ -1123,7 +1123,7 @@ wheels = [
1123
1123
 
1124
1124
  [[package]]
1125
1125
  name = "haiku-rag"
1126
- version = "0.13.0"
1126
+ version = "0.13.1"
1127
1127
  source = { editable = "." }
1128
1128
  dependencies = [
1129
1129
  { name = "docling" },
@@ -1,253 +0,0 @@
1
- {
2
- "$schema": "https://static.modelcontextprotocol.io/schemas/2025-09-29/server.schema.json",
3
- "name": "io.github.ggozad/haiku-rag",
4
- "version": "{{VERSION}}",
5
- "description": "Agentic Retrieval Augmented Generation (RAG) with LanceDB",
6
- "repository": {
7
- "url": "https://github.com/ggozad/haiku.rag",
8
- "source": "github"
9
- },
10
- "homepage": "https://github.com/ggozad/haiku.rag",
11
- "license": "MIT",
12
- "keywords": ["rag", "lancedb", "vector-database", "embeddings", "search", "qa", "research"],
13
- "vendor": {
14
- "name": "Yiorgis Gozadinos",
15
- "url": "https://github.com/ggozad"
16
- },
17
- "deployment": {
18
- "packages": [
19
- {
20
- "type": "pypi",
21
- "package": "haiku.rag",
22
- "command": {
23
- "linux-x86_64": {
24
- "shell": "uvx",
25
- "args": ["haiku.rag", "serve", "--stdio"]
26
- },
27
- "darwin-arm64": {
28
- "shell": "uvx",
29
- "args": ["haiku.rag", "serve", "--stdio"]
30
- },
31
- "darwin-x86_64": {
32
- "shell": "uvx",
33
- "args": ["haiku.rag", "serve", "--stdio"]
34
- },
35
- "win32-x86_64": {
36
- "shell": "uvx.exe",
37
- "args": ["haiku.rag", "serve", "--stdio"]
38
- }
39
- },
40
- "environmentVariables": [
41
- {
42
- "name": "ENV",
43
- "description": "Runtime environment (production or development)",
44
- "format": "string",
45
- "isRequired": false,
46
- "isSecret": false
47
- },
48
- {
49
- "name": "DEFAULT_DATA_DIR",
50
- "description": "Default directory for LanceDB data and assets",
51
- "format": "string",
52
- "isRequired": false,
53
- "isSecret": false
54
- },
55
- {
56
- "name": "MONITOR_DIRECTORIES",
57
- "description": "Comma-separated paths to watch for file changes in server mode",
58
- "format": "string",
59
- "isRequired": false,
60
- "isSecret": false
61
- },
62
- {
63
- "name": "LANCEDB_URI",
64
- "description": "LanceDB connection URI (use db:// for cloud or a filesystem path)",
65
- "format": "string",
66
- "isRequired": false,
67
- "isSecret": false
68
- },
69
- {
70
- "name": "LANCEDB_REGION",
71
- "description": "LanceDB cloud region (if using cloud)",
72
- "format": "string",
73
- "isRequired": false,
74
- "isSecret": false
75
- },
76
- {
77
- "name": "LANCEDB_API_KEY",
78
- "description": "LanceDB API key (required for LanceDB Cloud)",
79
- "format": "string",
80
- "isRequired": false,
81
- "isSecret": true
82
- },
83
- {
84
- "name": "EMBEDDINGS_PROVIDER",
85
- "description": "Embeddings provider (e.g. ollama, openai, voyageai)",
86
- "format": "string",
87
- "isRequired": false,
88
- "isSecret": false
89
- },
90
- {
91
- "name": "EMBEDDINGS_MODEL",
92
- "description": "Embeddings model name (provider-specific)",
93
- "format": "string",
94
- "isRequired": false,
95
- "isSecret": false
96
- },
97
- {
98
- "name": "EMBEDDINGS_VECTOR_DIM",
99
- "description": "Embedding vector dimension (must match model)",
100
- "format": "number",
101
- "isRequired": false,
102
- "isSecret": false
103
- },
104
- {
105
- "name": "QA_PROVIDER",
106
- "description": "Question answering provider (e.g. ollama, openai, anthropic)",
107
- "format": "string",
108
- "isRequired": false,
109
- "isSecret": false
110
- },
111
- {
112
- "name": "QA_MODEL",
113
- "description": "Question answering model name (provider-specific)",
114
- "format": "string",
115
- "isRequired": false,
116
- "isSecret": false
117
- },
118
- {
119
- "name": "RESEARCH_PROVIDER",
120
- "description": "Research provider for multi-agent research (e.g. ollama, openai, anthropic)",
121
- "format": "string",
122
- "isRequired": false,
123
- "isSecret": false
124
- },
125
- {
126
- "name": "RESEARCH_MODEL",
127
- "description": "Research model name for multi-agent research (provider-specific)",
128
- "format": "string",
129
- "isRequired": false,
130
- "isSecret": false
131
- },
132
- {
133
- "name": "RERANK_PROVIDER",
134
- "description": "Rerank provider (e.g. mixedbread, cohere)",
135
- "format": "string",
136
- "isRequired": false,
137
- "isSecret": false
138
- },
139
- {
140
- "name": "RERANK_MODEL",
141
- "description": "Rerank model name (provider-specific)",
142
- "format": "string",
143
- "isRequired": false,
144
- "isSecret": false
145
- },
146
- {
147
- "name": "CHUNK_SIZE",
148
- "description": "Chunk size for splitting documents (characters)",
149
- "format": "number",
150
- "isRequired": false,
151
- "isSecret": false
152
- },
153
- {
154
- "name": "CONTEXT_CHUNK_RADIUS",
155
- "description": "Number of adjacent chunks to include around search hits",
156
- "format": "number",
157
- "isRequired": false,
158
- "isSecret": false
159
- },
160
- {
161
- "name": "OLLAMA_BASE_URL",
162
- "description": "Base URL for Ollama server",
163
- "format": "string",
164
- "isRequired": false,
165
- "isSecret": false
166
- },
167
- {
168
- "name": "VLLM_EMBEDDINGS_BASE_URL",
169
- "description": "Base URL for vLLM embeddings endpoint",
170
- "format": "string",
171
- "isRequired": false,
172
- "isSecret": false
173
- },
174
- {
175
- "name": "VLLM_RERANK_BASE_URL",
176
- "description": "Base URL for vLLM rerank endpoint",
177
- "format": "string",
178
- "isRequired": false,
179
- "isSecret": false
180
- },
181
- {
182
- "name": "VLLM_QA_BASE_URL",
183
- "description": "Base URL for vLLM QA endpoint",
184
- "format": "string",
185
- "isRequired": false,
186
- "isSecret": false
187
- },
188
- {
189
- "name": "VLLM_RESEARCH_BASE_URL",
190
- "description": "Base URL for vLLM research endpoint",
191
- "format": "string",
192
- "isRequired": false,
193
- "isSecret": false
194
- },
195
- {
196
- "name": "MARKDOWN_PREPROCESSOR",
197
- "description": "Dotted path or file path to a callable that preprocesses markdown content before chunking",
198
- "format": "string",
199
- "isRequired": false,
200
- "isSecret": false
201
- },
202
- {
203
- "name": "DISABLE_DB_AUTOCREATE",
204
- "description": "If true, refuse to auto-create a new LanceDB database or tables",
205
- "format": "boolean",
206
- "isRequired": false,
207
- "isSecret": false
208
- },
209
- {
210
- "name": "VACUUM_RETENTION_SECONDS",
211
- "description": "Vacuum retention threshold in seconds (default: 60)",
212
- "format": "number",
213
- "isRequired": false,
214
- "isSecret": false
215
- },
216
- {
217
- "name": "OPENAI_API_KEY",
218
- "description": "OpenAI API key (if using OpenAI for embeddings or QA)",
219
- "format": "string",
220
- "isRequired": false,
221
- "isSecret": true
222
- },
223
- {
224
- "name": "VOYAGE_API_KEY",
225
- "description": "VoyageAI API key (if using VoyageAI for embeddings)",
226
- "format": "string",
227
- "isRequired": false,
228
- "isSecret": true
229
- },
230
- {
231
- "name": "ANTHROPIC_API_KEY",
232
- "description": "Anthropic API key (if using Anthropic for QA)",
233
- "format": "string",
234
- "isRequired": false,
235
- "isSecret": true
236
- },
237
- {
238
- "name": "COHERE_API_KEY",
239
- "description": "Cohere API key (if using Cohere for reranking)",
240
- "format": "string",
241
- "isRequired": false,
242
- "isSecret": true
243
- }
244
- ]
245
- }
246
- ]
247
- },
248
- "transports": [
249
- {
250
- "type": "stdio"
251
- }
252
- ]
253
- }
File without changes
File without changes
File without changes
File without changes
File without changes