agent-brain-rag 1.1.0__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agent_brain_rag-1.1.0.dist-info → agent_brain_rag-2.0.0.dist-info}/METADATA +68 -27
- agent_brain_rag-2.0.0.dist-info/RECORD +50 -0
- agent_brain_rag-2.0.0.dist-info/entry_points.txt +4 -0
- {doc_serve_server → agent_brain_server}/__init__.py +1 -1
- {doc_serve_server → agent_brain_server}/api/main.py +90 -26
- {doc_serve_server → agent_brain_server}/api/routers/health.py +4 -2
- {doc_serve_server → agent_brain_server}/api/routers/index.py +1 -1
- {doc_serve_server → agent_brain_server}/api/routers/query.py +3 -3
- agent_brain_server/config/provider_config.py +308 -0
- {doc_serve_server → agent_brain_server}/config/settings.py +12 -1
- agent_brain_server/indexing/__init__.py +40 -0
- {doc_serve_server → agent_brain_server}/indexing/bm25_index.py +1 -1
- {doc_serve_server → agent_brain_server}/indexing/chunking.py +1 -1
- agent_brain_server/indexing/embedding.py +225 -0
- agent_brain_server/indexing/graph_extractors.py +582 -0
- agent_brain_server/indexing/graph_index.py +536 -0
- {doc_serve_server → agent_brain_server}/models/__init__.py +9 -0
- agent_brain_server/models/graph.py +253 -0
- {doc_serve_server → agent_brain_server}/models/health.py +15 -3
- {doc_serve_server → agent_brain_server}/models/query.py +14 -1
- agent_brain_server/providers/__init__.py +64 -0
- agent_brain_server/providers/base.py +251 -0
- agent_brain_server/providers/embedding/__init__.py +23 -0
- agent_brain_server/providers/embedding/cohere.py +163 -0
- agent_brain_server/providers/embedding/ollama.py +150 -0
- agent_brain_server/providers/embedding/openai.py +118 -0
- agent_brain_server/providers/exceptions.py +95 -0
- agent_brain_server/providers/factory.py +157 -0
- agent_brain_server/providers/summarization/__init__.py +41 -0
- agent_brain_server/providers/summarization/anthropic.py +87 -0
- agent_brain_server/providers/summarization/gemini.py +96 -0
- agent_brain_server/providers/summarization/grok.py +95 -0
- agent_brain_server/providers/summarization/ollama.py +114 -0
- agent_brain_server/providers/summarization/openai.py +87 -0
- {doc_serve_server → agent_brain_server}/services/indexing_service.py +43 -4
- {doc_serve_server → agent_brain_server}/services/query_service.py +212 -4
- agent_brain_server/storage/__init__.py +21 -0
- agent_brain_server/storage/graph_store.py +519 -0
- {doc_serve_server → agent_brain_server}/storage/vector_store.py +36 -1
- {doc_serve_server → agent_brain_server}/storage_paths.py +2 -0
- agent_brain_rag-1.1.0.dist-info/RECORD +0 -31
- agent_brain_rag-1.1.0.dist-info/entry_points.txt +0 -3
- doc_serve_server/indexing/__init__.py +0 -19
- doc_serve_server/indexing/embedding.py +0 -274
- doc_serve_server/storage/__init__.py +0 -5
- {agent_brain_rag-1.1.0.dist-info → agent_brain_rag-2.0.0.dist-info}/WHEEL +0 -0
- {doc_serve_server → agent_brain_server}/api/__init__.py +0 -0
- {doc_serve_server → agent_brain_server}/api/routers/__init__.py +0 -0
- {doc_serve_server → agent_brain_server}/config/__init__.py +0 -0
- {doc_serve_server → agent_brain_server}/indexing/document_loader.py +0 -0
- {doc_serve_server → agent_brain_server}/locking.py +0 -0
- {doc_serve_server → agent_brain_server}/models/index.py +0 -0
- {doc_serve_server → agent_brain_server}/project_root.py +0 -0
- {doc_serve_server → agent_brain_server}/runtime.py +0 -0
- {doc_serve_server → agent_brain_server}/services/__init__.py +0 -0
|
@@ -7,10 +7,20 @@ from typing import Any, Optional
|
|
|
7
7
|
from llama_index.core.retrievers import BaseRetriever
|
|
8
8
|
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
|
|
9
9
|
|
|
10
|
-
from
|
|
11
|
-
from
|
|
12
|
-
from
|
|
13
|
-
from
|
|
10
|
+
from agent_brain_server.config import settings
|
|
11
|
+
from agent_brain_server.indexing import EmbeddingGenerator, get_embedding_generator
|
|
12
|
+
from agent_brain_server.indexing.bm25_index import BM25IndexManager, get_bm25_manager
|
|
13
|
+
from agent_brain_server.indexing.graph_index import (
|
|
14
|
+
GraphIndexManager,
|
|
15
|
+
get_graph_index_manager,
|
|
16
|
+
)
|
|
17
|
+
from agent_brain_server.models import (
|
|
18
|
+
QueryMode,
|
|
19
|
+
QueryRequest,
|
|
20
|
+
QueryResponse,
|
|
21
|
+
QueryResult,
|
|
22
|
+
)
|
|
23
|
+
from agent_brain_server.storage import VectorStoreManager, get_vector_store
|
|
14
24
|
|
|
15
25
|
logger = logging.getLogger(__name__)
|
|
16
26
|
|
|
@@ -64,6 +74,7 @@ class QueryService:
|
|
|
64
74
|
vector_store: Optional[VectorStoreManager] = None,
|
|
65
75
|
embedding_generator: Optional[EmbeddingGenerator] = None,
|
|
66
76
|
bm25_manager: Optional[BM25IndexManager] = None,
|
|
77
|
+
graph_index_manager: Optional[GraphIndexManager] = None,
|
|
67
78
|
):
|
|
68
79
|
"""
|
|
69
80
|
Initialize the query service.
|
|
@@ -72,10 +83,12 @@ class QueryService:
|
|
|
72
83
|
vector_store: Vector store manager instance.
|
|
73
84
|
embedding_generator: Embedding generator instance.
|
|
74
85
|
bm25_manager: BM25 index manager instance.
|
|
86
|
+
graph_index_manager: Graph index manager instance (Feature 113).
|
|
75
87
|
"""
|
|
76
88
|
self.vector_store = vector_store or get_vector_store()
|
|
77
89
|
self.embedding_generator = embedding_generator or get_embedding_generator()
|
|
78
90
|
self.bm25_manager = bm25_manager or get_bm25_manager()
|
|
91
|
+
self.graph_index_manager = graph_index_manager or get_graph_index_manager()
|
|
79
92
|
|
|
80
93
|
def is_ready(self) -> bool:
|
|
81
94
|
"""
|
|
@@ -110,6 +123,10 @@ class QueryService:
|
|
|
110
123
|
results = await self._execute_bm25_query(request)
|
|
111
124
|
elif request.mode == QueryMode.VECTOR:
|
|
112
125
|
results = await self._execute_vector_query(request)
|
|
126
|
+
elif request.mode == QueryMode.GRAPH:
|
|
127
|
+
results = await self._execute_graph_query(request)
|
|
128
|
+
elif request.mode == QueryMode.MULTI:
|
|
129
|
+
results = await self._execute_multi_query(request)
|
|
113
130
|
else: # HYBRID
|
|
114
131
|
results = await self._execute_hybrid_query(request)
|
|
115
132
|
|
|
@@ -313,6 +330,197 @@ class QueryService:
|
|
|
313
330
|
|
|
314
331
|
return fused_nodes
|
|
315
332
|
|
|
333
|
+
async def _execute_graph_query(
|
|
334
|
+
self,
|
|
335
|
+
request: QueryRequest,
|
|
336
|
+
traversal_depth: int = 2,
|
|
337
|
+
) -> list[QueryResult]:
|
|
338
|
+
"""Execute graph-only query using entity relationships.
|
|
339
|
+
|
|
340
|
+
Uses the knowledge graph to find documents related to
|
|
341
|
+
entities mentioned in the query.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
request: Query request.
|
|
345
|
+
traversal_depth: How many hops to traverse in graph.
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
List of QueryResult from graph retrieval.
|
|
349
|
+
|
|
350
|
+
Raises:
|
|
351
|
+
ValueError: If GraphRAG is not enabled.
|
|
352
|
+
"""
|
|
353
|
+
if not settings.ENABLE_GRAPH_INDEX:
|
|
354
|
+
raise ValueError(
|
|
355
|
+
"GraphRAG not enabled. Set ENABLE_GRAPH_INDEX=true in environment."
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
# Query the graph for related entities
|
|
359
|
+
graph_results = self.graph_index_manager.query(
|
|
360
|
+
query_text=request.query,
|
|
361
|
+
top_k=request.top_k,
|
|
362
|
+
traversal_depth=traversal_depth,
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
if not graph_results:
|
|
366
|
+
logger.debug("No graph results found, falling back to vector search")
|
|
367
|
+
return await self._execute_vector_query(request)
|
|
368
|
+
|
|
369
|
+
# Convert graph results to QueryResults
|
|
370
|
+
results: list[QueryResult] = []
|
|
371
|
+
chunk_ids = [
|
|
372
|
+
r.get("source_chunk_id") for r in graph_results if r.get("source_chunk_id")
|
|
373
|
+
]
|
|
374
|
+
|
|
375
|
+
if not chunk_ids:
|
|
376
|
+
# No source chunks in graph, fall back to vector search
|
|
377
|
+
return await self._execute_vector_query(request)
|
|
378
|
+
|
|
379
|
+
# Look up the actual documents from vector store
|
|
380
|
+
for graph_result in graph_results:
|
|
381
|
+
chunk_id = graph_result.get("source_chunk_id")
|
|
382
|
+
if not chunk_id:
|
|
383
|
+
continue
|
|
384
|
+
|
|
385
|
+
# Get document from vector store by ID
|
|
386
|
+
try:
|
|
387
|
+
doc = await self.vector_store.get_by_id(chunk_id)
|
|
388
|
+
if doc:
|
|
389
|
+
result = QueryResult(
|
|
390
|
+
text=doc.get("text", ""),
|
|
391
|
+
source=doc.get("metadata", {}).get(
|
|
392
|
+
"source",
|
|
393
|
+
doc.get("metadata", {}).get("file_path", "unknown"),
|
|
394
|
+
),
|
|
395
|
+
score=graph_result.get("graph_score", 0.5),
|
|
396
|
+
graph_score=graph_result.get("graph_score", 0.5),
|
|
397
|
+
chunk_id=chunk_id,
|
|
398
|
+
source_type=doc.get("metadata", {}).get("source_type", "doc"),
|
|
399
|
+
language=doc.get("metadata", {}).get("language"),
|
|
400
|
+
related_entities=[
|
|
401
|
+
graph_result.get("subject", ""),
|
|
402
|
+
graph_result.get("object", ""),
|
|
403
|
+
],
|
|
404
|
+
relationship_path=[graph_result.get("relationship_path", "")],
|
|
405
|
+
metadata={
|
|
406
|
+
k: v
|
|
407
|
+
for k, v in doc.get("metadata", {}).items()
|
|
408
|
+
if k
|
|
409
|
+
not in ("source", "file_path", "source_type", "language")
|
|
410
|
+
},
|
|
411
|
+
)
|
|
412
|
+
results.append(result)
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.debug(f"Failed to retrieve chunk {chunk_id}: {e}")
|
|
415
|
+
continue
|
|
416
|
+
|
|
417
|
+
# If no results from graph, fall back to vector search
|
|
418
|
+
if not results:
|
|
419
|
+
logger.debug("No documents found from graph, falling back to vector search")
|
|
420
|
+
return await self._execute_vector_query(request)
|
|
421
|
+
|
|
422
|
+
return results[: request.top_k]
|
|
423
|
+
|
|
424
|
+
async def _execute_multi_query(self, request: QueryRequest) -> list[QueryResult]:
|
|
425
|
+
"""Execute multi-retrieval query combining vector, BM25, and graph.
|
|
426
|
+
|
|
427
|
+
Uses Reciprocal Rank Fusion (RRF) to combine results from
|
|
428
|
+
all three retrieval methods.
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
request: Query request.
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
List of QueryResult with combined scores.
|
|
435
|
+
"""
|
|
436
|
+
# Get results from each retriever
|
|
437
|
+
vector_results = await self._execute_vector_query(request)
|
|
438
|
+
bm25_results = await self._execute_bm25_query(request)
|
|
439
|
+
|
|
440
|
+
# Get graph results if enabled
|
|
441
|
+
graph_results: list[QueryResult] = []
|
|
442
|
+
if settings.ENABLE_GRAPH_INDEX:
|
|
443
|
+
try:
|
|
444
|
+
graph_results = await self._execute_graph_query(request)
|
|
445
|
+
except ValueError:
|
|
446
|
+
pass # Graph not enabled, skip
|
|
447
|
+
|
|
448
|
+
# Apply Reciprocal Rank Fusion
|
|
449
|
+
rrf_k = settings.GRAPH_RRF_K # Typical value is 60
|
|
450
|
+
combined_scores: dict[str, dict[str, Any]] = {}
|
|
451
|
+
|
|
452
|
+
# Process vector results
|
|
453
|
+
for rank, result in enumerate(vector_results):
|
|
454
|
+
chunk_id = result.chunk_id
|
|
455
|
+
rrf_score = 1.0 / (rrf_k + rank + 1)
|
|
456
|
+
if chunk_id not in combined_scores:
|
|
457
|
+
combined_scores[chunk_id] = {
|
|
458
|
+
"result": result,
|
|
459
|
+
"rrf_score": 0.0,
|
|
460
|
+
"vector_rank": None,
|
|
461
|
+
"bm25_rank": None,
|
|
462
|
+
"graph_rank": None,
|
|
463
|
+
}
|
|
464
|
+
combined_scores[chunk_id]["rrf_score"] += rrf_score
|
|
465
|
+
combined_scores[chunk_id]["vector_rank"] = rank + 1
|
|
466
|
+
|
|
467
|
+
# Process BM25 results
|
|
468
|
+
for rank, result in enumerate(bm25_results):
|
|
469
|
+
chunk_id = result.chunk_id
|
|
470
|
+
rrf_score = 1.0 / (rrf_k + rank + 1)
|
|
471
|
+
if chunk_id not in combined_scores:
|
|
472
|
+
combined_scores[chunk_id] = {
|
|
473
|
+
"result": result,
|
|
474
|
+
"rrf_score": 0.0,
|
|
475
|
+
"vector_rank": None,
|
|
476
|
+
"bm25_rank": None,
|
|
477
|
+
"graph_rank": None,
|
|
478
|
+
}
|
|
479
|
+
combined_scores[chunk_id]["rrf_score"] += rrf_score
|
|
480
|
+
combined_scores[chunk_id]["bm25_rank"] = rank + 1
|
|
481
|
+
|
|
482
|
+
# Process graph results
|
|
483
|
+
for rank, result in enumerate(graph_results):
|
|
484
|
+
chunk_id = result.chunk_id
|
|
485
|
+
rrf_score = 1.0 / (rrf_k + rank + 1)
|
|
486
|
+
if chunk_id not in combined_scores:
|
|
487
|
+
combined_scores[chunk_id] = {
|
|
488
|
+
"result": result,
|
|
489
|
+
"rrf_score": 0.0,
|
|
490
|
+
"vector_rank": None,
|
|
491
|
+
"bm25_rank": None,
|
|
492
|
+
"graph_rank": None,
|
|
493
|
+
}
|
|
494
|
+
combined_scores[chunk_id]["rrf_score"] += rrf_score
|
|
495
|
+
combined_scores[chunk_id]["graph_rank"] = rank + 1
|
|
496
|
+
# Preserve graph-specific fields
|
|
497
|
+
if result.related_entities:
|
|
498
|
+
combined_scores[chunk_id][
|
|
499
|
+
"result"
|
|
500
|
+
].related_entities = result.related_entities
|
|
501
|
+
if result.relationship_path:
|
|
502
|
+
combined_scores[chunk_id][
|
|
503
|
+
"result"
|
|
504
|
+
].relationship_path = result.relationship_path
|
|
505
|
+
if result.graph_score:
|
|
506
|
+
combined_scores[chunk_id]["result"].graph_score = result.graph_score
|
|
507
|
+
|
|
508
|
+
# Sort by RRF score and take top_k
|
|
509
|
+
sorted_results = sorted(
|
|
510
|
+
combined_scores.values(),
|
|
511
|
+
key=lambda x: x["rrf_score"],
|
|
512
|
+
reverse=True,
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
# Update scores and return
|
|
516
|
+
final_results: list[QueryResult] = []
|
|
517
|
+
for data in sorted_results[: request.top_k]:
|
|
518
|
+
result = data["result"]
|
|
519
|
+
result.score = data["rrf_score"]
|
|
520
|
+
final_results.append(result)
|
|
521
|
+
|
|
522
|
+
return final_results
|
|
523
|
+
|
|
316
524
|
async def get_document_count(self) -> int:
|
|
317
525
|
"""
|
|
318
526
|
Get the total number of indexed documents.
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Storage layer for vector database and graph operations."""
|
|
2
|
+
|
|
3
|
+
from .graph_store import (
|
|
4
|
+
GraphStoreManager,
|
|
5
|
+
get_graph_store_manager,
|
|
6
|
+
initialize_graph_store,
|
|
7
|
+
reset_graph_store_manager,
|
|
8
|
+
)
|
|
9
|
+
from .vector_store import VectorStoreManager, get_vector_store, initialize_vector_store
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
# Vector store
|
|
13
|
+
"VectorStoreManager",
|
|
14
|
+
"get_vector_store",
|
|
15
|
+
"initialize_vector_store",
|
|
16
|
+
# Graph store (Feature 113)
|
|
17
|
+
"GraphStoreManager",
|
|
18
|
+
"get_graph_store_manager",
|
|
19
|
+
"initialize_graph_store",
|
|
20
|
+
"reset_graph_store_manager",
|
|
21
|
+
]
|