agent-brain-rag 1.1.0__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agent_brain_rag-1.1.0.dist-info → agent_brain_rag-2.0.0.dist-info}/METADATA +68 -27
- agent_brain_rag-2.0.0.dist-info/RECORD +50 -0
- agent_brain_rag-2.0.0.dist-info/entry_points.txt +4 -0
- {doc_serve_server → agent_brain_server}/__init__.py +1 -1
- {doc_serve_server → agent_brain_server}/api/main.py +90 -26
- {doc_serve_server → agent_brain_server}/api/routers/health.py +4 -2
- {doc_serve_server → agent_brain_server}/api/routers/index.py +1 -1
- {doc_serve_server → agent_brain_server}/api/routers/query.py +3 -3
- agent_brain_server/config/provider_config.py +308 -0
- {doc_serve_server → agent_brain_server}/config/settings.py +12 -1
- agent_brain_server/indexing/__init__.py +40 -0
- {doc_serve_server → agent_brain_server}/indexing/bm25_index.py +1 -1
- {doc_serve_server → agent_brain_server}/indexing/chunking.py +1 -1
- agent_brain_server/indexing/embedding.py +225 -0
- agent_brain_server/indexing/graph_extractors.py +582 -0
- agent_brain_server/indexing/graph_index.py +536 -0
- {doc_serve_server → agent_brain_server}/models/__init__.py +9 -0
- agent_brain_server/models/graph.py +253 -0
- {doc_serve_server → agent_brain_server}/models/health.py +15 -3
- {doc_serve_server → agent_brain_server}/models/query.py +14 -1
- agent_brain_server/providers/__init__.py +64 -0
- agent_brain_server/providers/base.py +251 -0
- agent_brain_server/providers/embedding/__init__.py +23 -0
- agent_brain_server/providers/embedding/cohere.py +163 -0
- agent_brain_server/providers/embedding/ollama.py +150 -0
- agent_brain_server/providers/embedding/openai.py +118 -0
- agent_brain_server/providers/exceptions.py +95 -0
- agent_brain_server/providers/factory.py +157 -0
- agent_brain_server/providers/summarization/__init__.py +41 -0
- agent_brain_server/providers/summarization/anthropic.py +87 -0
- agent_brain_server/providers/summarization/gemini.py +96 -0
- agent_brain_server/providers/summarization/grok.py +95 -0
- agent_brain_server/providers/summarization/ollama.py +114 -0
- agent_brain_server/providers/summarization/openai.py +87 -0
- {doc_serve_server → agent_brain_server}/services/indexing_service.py +43 -4
- {doc_serve_server → agent_brain_server}/services/query_service.py +212 -4
- agent_brain_server/storage/__init__.py +21 -0
- agent_brain_server/storage/graph_store.py +519 -0
- {doc_serve_server → agent_brain_server}/storage/vector_store.py +36 -1
- {doc_serve_server → agent_brain_server}/storage_paths.py +2 -0
- agent_brain_rag-1.1.0.dist-info/RECORD +0 -31
- agent_brain_rag-1.1.0.dist-info/entry_points.txt +0 -3
- doc_serve_server/indexing/__init__.py +0 -19
- doc_serve_server/indexing/embedding.py +0 -274
- doc_serve_server/storage/__init__.py +0 -5
- {agent_brain_rag-1.1.0.dist-info → agent_brain_rag-2.0.0.dist-info}/WHEEL +0 -0
- {doc_serve_server → agent_brain_server}/api/__init__.py +0 -0
- {doc_serve_server → agent_brain_server}/api/routers/__init__.py +0 -0
- {doc_serve_server → agent_brain_server}/config/__init__.py +0 -0
- {doc_serve_server → agent_brain_server}/indexing/document_loader.py +0 -0
- {doc_serve_server → agent_brain_server}/locking.py +0 -0
- {doc_serve_server → agent_brain_server}/models/index.py +0 -0
- {doc_serve_server → agent_brain_server}/project_root.py +0 -0
- {doc_serve_server → agent_brain_server}/runtime.py +0 -0
- {doc_serve_server → agent_brain_server}/services/__init__.py +0 -0
|
@@ -1,274 +0,0 @@
|
|
|
1
|
-
"""Embedding generation using OpenAI's text-embedding models."""
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
from collections.abc import Awaitable, Callable
|
|
5
|
-
from typing import Optional
|
|
6
|
-
|
|
7
|
-
from anthropic import AsyncAnthropic
|
|
8
|
-
from openai import AsyncOpenAI
|
|
9
|
-
|
|
10
|
-
from doc_serve_server.config import settings
|
|
11
|
-
|
|
12
|
-
from .chunking import TextChunk
|
|
13
|
-
|
|
14
|
-
logger = logging.getLogger(__name__)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class EmbeddingGenerator:
|
|
18
|
-
"""
|
|
19
|
-
Generates embeddings using OpenAI's embedding models.
|
|
20
|
-
|
|
21
|
-
Supports batch processing with configurable batch sizes
|
|
22
|
-
and automatic retry on rate limits.
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
|
-
def __init__(
|
|
26
|
-
self,
|
|
27
|
-
api_key: Optional[str] = None,
|
|
28
|
-
model: Optional[str] = None,
|
|
29
|
-
batch_size: Optional[int] = None,
|
|
30
|
-
):
|
|
31
|
-
"""
|
|
32
|
-
Initialize the embedding generator.
|
|
33
|
-
|
|
34
|
-
Args:
|
|
35
|
-
api_key: OpenAI API key. Defaults to config value.
|
|
36
|
-
model: Embedding model name. Defaults to config value.
|
|
37
|
-
batch_size: Number of texts to embed per API call. Defaults to 100.
|
|
38
|
-
"""
|
|
39
|
-
self.model = model or settings.EMBEDDING_MODEL
|
|
40
|
-
self.batch_size = batch_size or settings.EMBEDDING_BATCH_SIZE
|
|
41
|
-
|
|
42
|
-
# Initialize OpenAI async client
|
|
43
|
-
self.client = AsyncOpenAI(
|
|
44
|
-
api_key=api_key or settings.OPENAI_API_KEY,
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
# Initialize Anthropic client for summarization
|
|
48
|
-
self.anthropic_client = AsyncAnthropic(
|
|
49
|
-
api_key=settings.ANTHROPIC_API_KEY,
|
|
50
|
-
)
|
|
51
|
-
|
|
52
|
-
# Initialize prompt template
|
|
53
|
-
self.summary_prompt_template = (
|
|
54
|
-
"You are an expert software engineer analyzing source code. "
|
|
55
|
-
"Provide a concise 1-2 sentence summary of what this code does. "
|
|
56
|
-
"Focus on the functionality, purpose, and behavior. "
|
|
57
|
-
"Be specific about inputs, outputs, and side effects. "
|
|
58
|
-
"Ignore implementation details and focus on what the code accomplishes.\n\n"
|
|
59
|
-
"Code to summarize:\n{context_str}\n\n"
|
|
60
|
-
"Summary:"
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
async def embed_text(self, text: str) -> list[float]:
|
|
64
|
-
"""
|
|
65
|
-
Generate embedding for a single text.
|
|
66
|
-
|
|
67
|
-
Args:
|
|
68
|
-
text: Text to embed.
|
|
69
|
-
|
|
70
|
-
Returns:
|
|
71
|
-
Embedding vector as list of floats.
|
|
72
|
-
"""
|
|
73
|
-
response = await self.client.embeddings.create(
|
|
74
|
-
model=self.model,
|
|
75
|
-
input=text,
|
|
76
|
-
)
|
|
77
|
-
return response.data[0].embedding
|
|
78
|
-
|
|
79
|
-
async def embed_texts(
|
|
80
|
-
self,
|
|
81
|
-
texts: list[str],
|
|
82
|
-
progress_callback: Optional[Callable[[int, int], Awaitable[None]]] = None,
|
|
83
|
-
) -> list[list[float]]:
|
|
84
|
-
"""
|
|
85
|
-
Generate embeddings for multiple texts.
|
|
86
|
-
|
|
87
|
-
Args:
|
|
88
|
-
texts: List of texts to embed.
|
|
89
|
-
progress_callback: Optional callback(processed, total) for progress.
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
List of embedding vectors.
|
|
93
|
-
"""
|
|
94
|
-
if not texts:
|
|
95
|
-
return []
|
|
96
|
-
|
|
97
|
-
all_embeddings: list[list[float]] = []
|
|
98
|
-
|
|
99
|
-
# Process in batches to respect API limits
|
|
100
|
-
for i in range(0, len(texts), self.batch_size):
|
|
101
|
-
batch = texts[i : i + self.batch_size]
|
|
102
|
-
|
|
103
|
-
try:
|
|
104
|
-
response = await self.client.embeddings.create(
|
|
105
|
-
model=self.model,
|
|
106
|
-
input=batch,
|
|
107
|
-
)
|
|
108
|
-
|
|
109
|
-
# Extract embeddings in order
|
|
110
|
-
batch_embeddings = [item.embedding for item in response.data]
|
|
111
|
-
all_embeddings.extend(batch_embeddings)
|
|
112
|
-
|
|
113
|
-
if progress_callback:
|
|
114
|
-
await progress_callback(
|
|
115
|
-
min(i + self.batch_size, len(texts)),
|
|
116
|
-
len(texts),
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
logger.debug(
|
|
120
|
-
f"Generated embeddings for batch {i // self.batch_size + 1} "
|
|
121
|
-
f"({len(batch)} texts)"
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
except Exception as e:
|
|
125
|
-
logger.error(f"Failed to generate embeddings for batch: {e}")
|
|
126
|
-
raise
|
|
127
|
-
|
|
128
|
-
return all_embeddings
|
|
129
|
-
|
|
130
|
-
async def embed_chunks(
|
|
131
|
-
self,
|
|
132
|
-
chunks: list[TextChunk],
|
|
133
|
-
progress_callback: Optional[Callable[[int, int], Awaitable[None]]] = None,
|
|
134
|
-
) -> list[list[float]]:
|
|
135
|
-
"""
|
|
136
|
-
Generate embeddings for a list of text chunks.
|
|
137
|
-
|
|
138
|
-
Args:
|
|
139
|
-
chunks: List of TextChunk objects.
|
|
140
|
-
progress_callback: Optional callback for progress updates.
|
|
141
|
-
|
|
142
|
-
Returns:
|
|
143
|
-
List of embedding vectors corresponding to each chunk.
|
|
144
|
-
"""
|
|
145
|
-
texts = [chunk.text for chunk in chunks]
|
|
146
|
-
return await self.embed_texts(texts, progress_callback)
|
|
147
|
-
|
|
148
|
-
async def embed_query(self, query: str) -> list[float]:
|
|
149
|
-
"""
|
|
150
|
-
Generate embedding for a search query.
|
|
151
|
-
|
|
152
|
-
This is a convenience wrapper around embed_text for queries.
|
|
153
|
-
|
|
154
|
-
Args:
|
|
155
|
-
query: The search query text.
|
|
156
|
-
|
|
157
|
-
Returns:
|
|
158
|
-
Query embedding vector.
|
|
159
|
-
"""
|
|
160
|
-
return await self.embed_text(query)
|
|
161
|
-
|
|
162
|
-
def get_embedding_dimensions(self) -> int:
|
|
163
|
-
"""
|
|
164
|
-
Get the expected embedding dimensions for the current model.
|
|
165
|
-
|
|
166
|
-
Returns:
|
|
167
|
-
Number of dimensions in the embedding vector.
|
|
168
|
-
"""
|
|
169
|
-
# Known dimensions for OpenAI models
|
|
170
|
-
model_dimensions = {
|
|
171
|
-
"text-embedding-3-large": 3072,
|
|
172
|
-
"text-embedding-3-small": 1536,
|
|
173
|
-
"text-embedding-ada-002": 1536,
|
|
174
|
-
}
|
|
175
|
-
return model_dimensions.get(self.model, settings.EMBEDDING_DIMENSIONS)
|
|
176
|
-
|
|
177
|
-
def _get_summary_prompt_template(self) -> str:
|
|
178
|
-
"""
|
|
179
|
-
Get the prompt template for code summarization.
|
|
180
|
-
|
|
181
|
-
Returns:
|
|
182
|
-
Prompt template string.
|
|
183
|
-
"""
|
|
184
|
-
template = (
|
|
185
|
-
"You are an expert software engineer analyzing source code. "
|
|
186
|
-
"Provide a concise 1-2 sentence summary of what this code does. "
|
|
187
|
-
"Focus on the functionality, purpose, and behavior. "
|
|
188
|
-
"Be specific about inputs, outputs, and side effects. "
|
|
189
|
-
"Ignore implementation details and focus on what the code accomplishes.\n\n"
|
|
190
|
-
"Code to summarize:\n{context_str}\n\n"
|
|
191
|
-
"Summary:"
|
|
192
|
-
)
|
|
193
|
-
return template
|
|
194
|
-
|
|
195
|
-
async def generate_summary(self, code_text: str) -> str:
|
|
196
|
-
"""
|
|
197
|
-
Generate a natural language summary of code using Claude.
|
|
198
|
-
|
|
199
|
-
Args:
|
|
200
|
-
code_text: The source code to summarize.
|
|
201
|
-
|
|
202
|
-
Returns:
|
|
203
|
-
Natural language summary of the code's functionality.
|
|
204
|
-
"""
|
|
205
|
-
try:
|
|
206
|
-
# Use Claude directly with custom prompt
|
|
207
|
-
prompt = self.summary_prompt_template.format(context_str=code_text)
|
|
208
|
-
|
|
209
|
-
response = await self.anthropic_client.messages.create(
|
|
210
|
-
model=settings.CLAUDE_MODEL,
|
|
211
|
-
max_tokens=300,
|
|
212
|
-
temperature=0.1, # Low temperature for consistent summaries
|
|
213
|
-
messages=[{"role": "user", "content": prompt}],
|
|
214
|
-
)
|
|
215
|
-
|
|
216
|
-
# Extract text from Claude response
|
|
217
|
-
summary = response.content[0].text # type: ignore
|
|
218
|
-
|
|
219
|
-
if summary and len(summary) > 10: # Ensure we got a meaningful summary
|
|
220
|
-
return summary
|
|
221
|
-
else:
|
|
222
|
-
logger.warning("Claude returned empty or too short summary")
|
|
223
|
-
return self._extract_fallback_summary(code_text)
|
|
224
|
-
|
|
225
|
-
except Exception as e:
|
|
226
|
-
logger.error(f"Failed to generate code summary: {e}")
|
|
227
|
-
# Fallback: try to extract from docstrings/comments
|
|
228
|
-
return self._extract_fallback_summary(code_text)
|
|
229
|
-
|
|
230
|
-
def _extract_fallback_summary(self, code_text: str) -> str:
|
|
231
|
-
"""
|
|
232
|
-
Extract summary from docstrings or comments as fallback.
|
|
233
|
-
|
|
234
|
-
Args:
|
|
235
|
-
code_text: Source code to analyze.
|
|
236
|
-
|
|
237
|
-
Returns:
|
|
238
|
-
Extracted summary or empty string.
|
|
239
|
-
"""
|
|
240
|
-
import re
|
|
241
|
-
|
|
242
|
-
# Try to find Python docstrings
|
|
243
|
-
docstring_match = re.search(r'""".*?"""', code_text, re.DOTALL)
|
|
244
|
-
if docstring_match:
|
|
245
|
-
docstring = docstring_match.group(0)[3:-3] # Remove leading/trailing """
|
|
246
|
-
if len(docstring) > 10: # Only use if substantial
|
|
247
|
-
return docstring[:200] + "..." if len(docstring) > 200 else docstring
|
|
248
|
-
|
|
249
|
-
# Try to find function/class comments
|
|
250
|
-
comment_match = re.search(
|
|
251
|
-
r"#.*(?:function|class|method|def)", code_text, re.IGNORECASE
|
|
252
|
-
)
|
|
253
|
-
if comment_match:
|
|
254
|
-
return comment_match.group(0).strip("#").strip()
|
|
255
|
-
|
|
256
|
-
# Last resort: first line if it looks like a comment
|
|
257
|
-
lines = code_text.strip().split("\n")
|
|
258
|
-
first_line = lines[0].strip()
|
|
259
|
-
if first_line.startswith(("#", "//", "/*")):
|
|
260
|
-
return first_line.lstrip("#/*").strip()
|
|
261
|
-
|
|
262
|
-
return "" # No summary available
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
# Singleton instance
|
|
266
|
-
_embedding_generator: Optional[EmbeddingGenerator] = None
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
def get_embedding_generator() -> EmbeddingGenerator:
|
|
270
|
-
"""Get the global embedding generator instance."""
|
|
271
|
-
global _embedding_generator
|
|
272
|
-
if _embedding_generator is None:
|
|
273
|
-
_embedding_generator = EmbeddingGenerator()
|
|
274
|
-
return _embedding_generator
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|