cognee 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognee/api/v1/save/save.py +335 -0
- cognee/api/v1/search/routers/get_search_router.py +3 -3
- cognee/api/v1/ui/ui.py +116 -21
- cognee/cli/_cognee.py +27 -18
- cognee/modules/retrieval/graph_completion_context_extension_retriever.py +1 -1
- cognee/modules/retrieval/graph_completion_cot_retriever.py +1 -1
- cognee/modules/retrieval/graph_completion_retriever.py +1 -1
- cognee/modules/retrieval/insights_retriever.py +12 -11
- cognee/modules/retrieval/temporal_retriever.py +1 -1
- cognee/modules/search/methods/search.py +31 -8
- cognee/tests/test_permissions.py +3 -3
- cognee/tests/test_relational_db_migration.py +3 -5
- cognee/tests/test_save_export_path.py +116 -0
- cognee/tests/test_search_db.py +10 -7
- cognee/tests/unit/modules/retrieval/graph_completion_retriever_context_extension_test.py +12 -6
- cognee/tests/unit/modules/retrieval/graph_completion_retriever_cot_test.py +12 -6
- cognee/tests/unit/modules/retrieval/insights_retriever_test.py +2 -4
- {cognee-0.3.1.dist-info → cognee-0.3.2.dist-info}/METADATA +1 -1
- {cognee-0.3.1.dist-info → cognee-0.3.2.dist-info}/RECORD +31 -29
- /cognee/tests/{integration/cli → cli_tests/cli_integration_tests}/__init__.py +0 -0
- /cognee/tests/{integration/cli → cli_tests/cli_integration_tests}/test_cli_integration.py +0 -0
- /cognee/tests/{unit/cli → cli_tests/cli_unit_tests}/__init__.py +0 -0
- /cognee/tests/{unit/cli → cli_tests/cli_unit_tests}/test_cli_commands.py +0 -0
- /cognee/tests/{unit/cli → cli_tests/cli_unit_tests}/test_cli_edge_cases.py +0 -0
- /cognee/tests/{unit/cli → cli_tests/cli_unit_tests}/test_cli_main.py +0 -0
- /cognee/tests/{unit/cli → cli_tests/cli_unit_tests}/test_cli_runner.py +0 -0
- /cognee/tests/{unit/cli → cli_tests/cli_unit_tests}/test_cli_utils.py +0 -0
- {cognee-0.3.1.dist-info → cognee-0.3.2.dist-info}/WHEEL +0 -0
- {cognee-0.3.1.dist-info → cognee-0.3.2.dist-info}/entry_points.txt +0 -0
- {cognee-0.3.1.dist-info → cognee-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {cognee-0.3.1.dist-info → cognee-0.3.2.dist-info}/licenses/NOTICE.md +0 -0
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
import json
|
|
4
|
+
from typing import Optional, Union, List, Dict
|
|
5
|
+
from uuid import UUID
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
|
|
9
|
+
from cognee.base_config import get_base_config
|
|
10
|
+
from cognee.modules.users.models import User
|
|
11
|
+
from cognee.modules.users.methods import get_default_user
|
|
12
|
+
from cognee.modules.data.methods import get_authorized_existing_datasets, get_dataset_data
|
|
13
|
+
from cognee.infrastructure.files.utils.get_data_file_path import get_data_file_path
|
|
14
|
+
from cognee.infrastructure.llm.LLMGateway import LLMGateway
|
|
15
|
+
from cognee.shared.logging_utils import get_logger
|
|
16
|
+
from cognee.api.v1.search import search
|
|
17
|
+
from cognee.modules.search.types import SearchType
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
logger = get_logger("save")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class QuestionsModel(BaseModel):
|
|
24
|
+
questions: List[str]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _sanitize_filename(name: str) -> str:
|
|
28
|
+
safe = "".join(c if c.isalnum() or c in ("-", "_", ".", " ") else "_" for c in name)
|
|
29
|
+
return safe.strip().replace(" ", "_")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _dataset_dir_name(dataset) -> str:
|
|
33
|
+
# Prefer readable dataset name when available, fallback to id
|
|
34
|
+
if getattr(dataset, "name", None):
|
|
35
|
+
return _sanitize_filename(str(dataset.name))
|
|
36
|
+
return str(dataset.id)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _file_markdown_name(data_item, used_names: set[str]) -> str:
|
|
40
|
+
# Use original file name if present, else data.name
|
|
41
|
+
name = getattr(data_item, "name", None) or "file"
|
|
42
|
+
base = _sanitize_filename(str(name))
|
|
43
|
+
filename = f"{base}.md"
|
|
44
|
+
if filename in used_names:
|
|
45
|
+
short_id = str(getattr(data_item, "id", ""))[:8]
|
|
46
|
+
filename = f"{base}__{short_id}.md"
|
|
47
|
+
used_names.add(filename)
|
|
48
|
+
return filename
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _ascii_path_tree(path_str: str) -> str:
|
|
52
|
+
if not path_str:
|
|
53
|
+
return "(no path)"
|
|
54
|
+
|
|
55
|
+
# Normalize special schemes but keep segments readable
|
|
56
|
+
try:
|
|
57
|
+
normalized = get_data_file_path(path_str)
|
|
58
|
+
except Exception:
|
|
59
|
+
normalized = path_str
|
|
60
|
+
|
|
61
|
+
# Keep the path compact – show last 5 segments
|
|
62
|
+
parts = [p for p in normalized.replace("\\", "/").split("/") if p]
|
|
63
|
+
if len(parts) > 6:
|
|
64
|
+
display = ["…"] + parts[-5:]
|
|
65
|
+
else:
|
|
66
|
+
display = parts
|
|
67
|
+
|
|
68
|
+
# Render a single-branch tree
|
|
69
|
+
lines = []
|
|
70
|
+
for idx, seg in enumerate(display):
|
|
71
|
+
prefix = "└── " if idx == 0 else (" " * idx + "└── ")
|
|
72
|
+
lines.append(f"{prefix}{seg}")
|
|
73
|
+
return "\n".join(lines)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
async def _get_summary_via_summaries(query_text: str, dataset_id: UUID, top_k: int) -> str:
|
|
77
|
+
try:
|
|
78
|
+
results = await search(
|
|
79
|
+
query_text=query_text,
|
|
80
|
+
query_type=SearchType.SUMMARIES,
|
|
81
|
+
dataset_ids=[dataset_id],
|
|
82
|
+
top_k=top_k,
|
|
83
|
+
)
|
|
84
|
+
if not results:
|
|
85
|
+
return ""
|
|
86
|
+
texts: List[str] = []
|
|
87
|
+
for r in results[:top_k]:
|
|
88
|
+
texts.append(str(r))
|
|
89
|
+
return "\n\n".join(texts)
|
|
90
|
+
except Exception as e:
|
|
91
|
+
logger.error(
|
|
92
|
+
"SUMMARIES search failed for '%s' in dataset %s: %s",
|
|
93
|
+
query_text,
|
|
94
|
+
str(dataset_id),
|
|
95
|
+
str(e),
|
|
96
|
+
)
|
|
97
|
+
return ""
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
async def _generate_questions(file_name: str, summary_text: str) -> List[str]:
|
|
101
|
+
prompt = (
|
|
102
|
+
"You are an expert analyst. Given a file and its summary, propose 10 diverse, high-signal "
|
|
103
|
+
"questions to further explore the file's content, implications, relationships, and gaps. "
|
|
104
|
+
"Avoid duplicates; vary depth and angle (overview, details, cross-references, temporal, quality).\n\n"
|
|
105
|
+
f"File: {file_name}\n\nSummary:\n{summary_text[:4000]}"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
model = await LLMGateway.acreate_structured_output(
|
|
109
|
+
text_input=prompt,
|
|
110
|
+
system_prompt="Return strictly a JSON with key 'questions' and value as an array of 10 concise strings.",
|
|
111
|
+
response_model=QuestionsModel,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# model can be either pydantic model or dict-like, normalize
|
|
115
|
+
try:
|
|
116
|
+
questions = list(getattr(model, "questions", []))
|
|
117
|
+
except Exception:
|
|
118
|
+
questions = []
|
|
119
|
+
|
|
120
|
+
# Fallback if the tool returned a dict-like
|
|
121
|
+
if not questions and isinstance(model, dict):
|
|
122
|
+
questions = list(model.get("questions", []) or [])
|
|
123
|
+
|
|
124
|
+
# Enforce 10 max
|
|
125
|
+
return questions[:10]
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
async def _run_searches_for_question(
|
|
129
|
+
question: str, dataset_id: UUID, search_types: List[SearchType], top_k: int
|
|
130
|
+
) -> Dict[str, Union[str, List[dict], List[str]]]:
|
|
131
|
+
async def run_one(st: SearchType):
|
|
132
|
+
try:
|
|
133
|
+
result = await search(
|
|
134
|
+
query_text=question,
|
|
135
|
+
query_type=st,
|
|
136
|
+
dataset_ids=[dataset_id],
|
|
137
|
+
top_k=top_k,
|
|
138
|
+
)
|
|
139
|
+
return st.value, result
|
|
140
|
+
except Exception as e:
|
|
141
|
+
logger.error("Search failed for type %s: %s", st.value, str(e))
|
|
142
|
+
return st.value, [f"Error: {str(e)}"]
|
|
143
|
+
|
|
144
|
+
pairs = await asyncio.gather(*[run_one(st) for st in search_types])
|
|
145
|
+
return {k: v for k, v in pairs}
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _format_results_md(results: Dict[str, Union[str, List[dict], List[str]]]) -> str:
|
|
149
|
+
lines: List[str] = []
|
|
150
|
+
for st, payload in results.items():
|
|
151
|
+
lines.append(f"#### {st}")
|
|
152
|
+
if isinstance(payload, list):
|
|
153
|
+
# Printed as bullet items; stringify dicts
|
|
154
|
+
for item in payload[:5]:
|
|
155
|
+
if isinstance(item, dict):
|
|
156
|
+
# compact representation
|
|
157
|
+
snippet = json.dumps(item, ensure_ascii=False)[:800]
|
|
158
|
+
lines.append(f"- {snippet}")
|
|
159
|
+
else:
|
|
160
|
+
text = str(item)
|
|
161
|
+
lines.append(f"- {text[:800]}")
|
|
162
|
+
else:
|
|
163
|
+
lines.append(str(payload))
|
|
164
|
+
lines.append("")
|
|
165
|
+
return "\n".join(lines)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
async def save(
|
|
169
|
+
datasets: Optional[Union[List[str], List[UUID]]] = None,
|
|
170
|
+
export_root_directory: Optional[str] = None,
|
|
171
|
+
user: Optional[User] = None,
|
|
172
|
+
# Configurable knobs
|
|
173
|
+
max_questions: int = 10,
|
|
174
|
+
search_types: Optional[List[Union[str, SearchType]]] = None,
|
|
175
|
+
top_k: int = 5,
|
|
176
|
+
include_summary: bool = True,
|
|
177
|
+
include_ascii_tree: bool = True,
|
|
178
|
+
concurrency: int = 4,
|
|
179
|
+
timeout: Optional[float] = None,
|
|
180
|
+
) -> Dict[str, str]:
|
|
181
|
+
"""
|
|
182
|
+
Export per-dataset markdown summaries and search insights for each ingested file.
|
|
183
|
+
|
|
184
|
+
For every dataset the user can read:
|
|
185
|
+
- Create a folder under export_root_directory (or data_root_directory/exports)
|
|
186
|
+
- For each data item (file), create a .md containing:
|
|
187
|
+
- Summary of the file (from existing TextSummary nodes)
|
|
188
|
+
- A small ASCII path tree showing its folder position
|
|
189
|
+
- Up to N LLM-generated question ideas (configurable)
|
|
190
|
+
- Results of configured Cognee searches per question
|
|
191
|
+
Also creates an index.md per dataset with links to files and an optional dataset summary.
|
|
192
|
+
|
|
193
|
+
Returns a mapping of dataset_id -> export_directory path.
|
|
194
|
+
"""
|
|
195
|
+
base_config = get_base_config()
|
|
196
|
+
export_root = export_root_directory or os.path.join(
|
|
197
|
+
base_config.data_root_directory, "memory_export"
|
|
198
|
+
)
|
|
199
|
+
os.makedirs(export_root, exist_ok=True)
|
|
200
|
+
|
|
201
|
+
if user is None:
|
|
202
|
+
user = await get_default_user()
|
|
203
|
+
|
|
204
|
+
datasets_list = await get_authorized_existing_datasets(datasets, "read", user)
|
|
205
|
+
results: Dict[str, str] = {}
|
|
206
|
+
|
|
207
|
+
for dataset in datasets_list:
|
|
208
|
+
ds_dir = os.path.join(export_root, _dataset_dir_name(dataset))
|
|
209
|
+
os.makedirs(ds_dir, exist_ok=True)
|
|
210
|
+
results[str(dataset.id)] = ds_dir
|
|
211
|
+
|
|
212
|
+
data_items = await get_dataset_data(dataset.id)
|
|
213
|
+
|
|
214
|
+
# Normalize search types
|
|
215
|
+
if not search_types:
|
|
216
|
+
effective_search_types = [
|
|
217
|
+
SearchType.GRAPH_COMPLETION,
|
|
218
|
+
SearchType.INSIGHTS,
|
|
219
|
+
SearchType.CHUNKS,
|
|
220
|
+
]
|
|
221
|
+
else:
|
|
222
|
+
effective_search_types = []
|
|
223
|
+
for st in search_types:
|
|
224
|
+
if isinstance(st, SearchType):
|
|
225
|
+
effective_search_types.append(st)
|
|
226
|
+
else:
|
|
227
|
+
try:
|
|
228
|
+
effective_search_types.append(SearchType[str(st)])
|
|
229
|
+
except Exception:
|
|
230
|
+
logger.warning("Unknown search type '%s', skipping", str(st))
|
|
231
|
+
|
|
232
|
+
sem = asyncio.Semaphore(max(1, int(concurrency)))
|
|
233
|
+
used_names: set[str] = set()
|
|
234
|
+
index_entries: List[tuple[str, str]] = []
|
|
235
|
+
|
|
236
|
+
async def process_one(data_item):
|
|
237
|
+
async with sem:
|
|
238
|
+
file_label = getattr(data_item, "name", str(data_item.id))
|
|
239
|
+
original_path = getattr(data_item, "original_data_location", None)
|
|
240
|
+
|
|
241
|
+
ascii_tree = (
|
|
242
|
+
_ascii_path_tree(original_path or file_label) if include_ascii_tree else ""
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
summary_text = ""
|
|
246
|
+
if include_summary:
|
|
247
|
+
# Use SUMMARIES search scoped to dataset to derive file summary
|
|
248
|
+
file_query = getattr(data_item, "name", str(data_item.id)) or "file"
|
|
249
|
+
summary_text = await _get_summary_via_summaries(file_query, dataset.id, top_k)
|
|
250
|
+
if not summary_text:
|
|
251
|
+
summary_text = "Summary not available."
|
|
252
|
+
|
|
253
|
+
if max_questions == 0:
|
|
254
|
+
questions = []
|
|
255
|
+
else:
|
|
256
|
+
questions = await _generate_questions(file_label, summary_text)
|
|
257
|
+
if max_questions is not None and max_questions >= 0:
|
|
258
|
+
questions = questions[:max_questions]
|
|
259
|
+
|
|
260
|
+
async def searches_for_question(q: str):
|
|
261
|
+
return await _run_searches_for_question(
|
|
262
|
+
q, dataset.id, effective_search_types, top_k
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
# Run per-question searches concurrently
|
|
266
|
+
per_q_results = await asyncio.gather(*[searches_for_question(q) for q in questions])
|
|
267
|
+
|
|
268
|
+
# Build markdown content
|
|
269
|
+
md_lines = [f"# {file_label}", ""]
|
|
270
|
+
if include_ascii_tree:
|
|
271
|
+
md_lines.extend(["## Location", "", "```", ascii_tree, "```", ""])
|
|
272
|
+
if include_summary:
|
|
273
|
+
md_lines.extend(["## Summary", "", summary_text, ""])
|
|
274
|
+
|
|
275
|
+
md_lines.append("## Question ideas")
|
|
276
|
+
for idx, q in enumerate(questions, start=1):
|
|
277
|
+
md_lines.append(f"- {idx}. {q}")
|
|
278
|
+
md_lines.append("")
|
|
279
|
+
|
|
280
|
+
md_lines.append("## Searches")
|
|
281
|
+
md_lines.append("")
|
|
282
|
+
for q, per_type in zip(questions, per_q_results):
|
|
283
|
+
md_lines.append(f"### Q: {q}")
|
|
284
|
+
md_lines.append(_format_results_md(per_type))
|
|
285
|
+
md_lines.append("")
|
|
286
|
+
|
|
287
|
+
# Write to file (collision-safe)
|
|
288
|
+
md_filename = _file_markdown_name(data_item, used_names)
|
|
289
|
+
export_path = os.path.join(ds_dir, md_filename)
|
|
290
|
+
tmp_path = export_path + ".tmp"
|
|
291
|
+
with open(tmp_path, "w", encoding="utf-8") as f:
|
|
292
|
+
f.write("\n".join(md_lines))
|
|
293
|
+
os.replace(tmp_path, export_path)
|
|
294
|
+
|
|
295
|
+
index_entries.append((file_label, md_filename))
|
|
296
|
+
|
|
297
|
+
tasks = [asyncio.create_task(process_one(item)) for item in data_items]
|
|
298
|
+
|
|
299
|
+
if timeout and timeout > 0:
|
|
300
|
+
try:
|
|
301
|
+
await asyncio.wait_for(asyncio.gather(*tasks, return_exceptions=True), timeout)
|
|
302
|
+
except asyncio.TimeoutError:
|
|
303
|
+
logger.error("Save timed out for dataset %s", str(dataset.id))
|
|
304
|
+
else:
|
|
305
|
+
await asyncio.gather(*tasks, return_exceptions=True)
|
|
306
|
+
|
|
307
|
+
# Build dataset index.md with TOC and optional dataset summary via SUMMARIES
|
|
308
|
+
try:
|
|
309
|
+
index_lines = [f"# Dataset: {_dataset_dir_name(dataset)}", "", "## Files", ""]
|
|
310
|
+
for display, fname in sorted(index_entries, key=lambda x: x[0].lower()):
|
|
311
|
+
index_lines.append(f"- [{display}]({fname})")
|
|
312
|
+
|
|
313
|
+
# Dataset summary section
|
|
314
|
+
try:
|
|
315
|
+
summaries = await search(
|
|
316
|
+
query_text="dataset overview",
|
|
317
|
+
query_type=SearchType.SUMMARIES,
|
|
318
|
+
dataset_ids=[dataset.id],
|
|
319
|
+
top_k=top_k,
|
|
320
|
+
)
|
|
321
|
+
except Exception as e:
|
|
322
|
+
logger.error("Dataset summary search failed: %s", str(e))
|
|
323
|
+
summaries = []
|
|
324
|
+
|
|
325
|
+
if summaries:
|
|
326
|
+
index_lines.extend(["", "## Dataset summary (top summaries)", ""])
|
|
327
|
+
for s in summaries[:top_k]:
|
|
328
|
+
index_lines.append(f"- {str(s)[:800]}")
|
|
329
|
+
|
|
330
|
+
with open(os.path.join(ds_dir, "index.md"), "w", encoding="utf-8") as f:
|
|
331
|
+
f.write("\n".join(index_lines))
|
|
332
|
+
except Exception as e:
|
|
333
|
+
logger.error("Failed to write dataset index for %s: %s", str(dataset.id), str(e))
|
|
334
|
+
|
|
335
|
+
return results
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
from uuid import UUID
|
|
2
|
-
from typing import Optional
|
|
2
|
+
from typing import Optional, Union, List, Any
|
|
3
3
|
from datetime import datetime
|
|
4
4
|
from pydantic import Field
|
|
5
5
|
from fastapi import Depends, APIRouter
|
|
6
6
|
from fastapi.responses import JSONResponse
|
|
7
7
|
from fastapi.encoders import jsonable_encoder
|
|
8
8
|
|
|
9
|
-
from cognee.modules.search.types import SearchType
|
|
9
|
+
from cognee.modules.search.types import SearchType, SearchResult, CombinedSearchResult
|
|
10
10
|
from cognee.api.DTO import InDTO, OutDTO
|
|
11
11
|
from cognee.modules.users.exceptions.exceptions import PermissionDeniedError
|
|
12
12
|
from cognee.modules.users.models import User
|
|
@@ -73,7 +73,7 @@ def get_search_router() -> APIRouter:
|
|
|
73
73
|
except Exception as error:
|
|
74
74
|
return JSONResponse(status_code=500, content={"error": str(error)})
|
|
75
75
|
|
|
76
|
-
@router.post("", response_model=
|
|
76
|
+
@router.post("", response_model=Union[List[SearchResult], CombinedSearchResult, List])
|
|
77
77
|
async def search(payload: SearchPayloadDTO, user: User = Depends(get_authenticated_user)):
|
|
78
78
|
"""
|
|
79
79
|
Search for nodes in the graph database.
|
cognee/api/v1/ui/ui.py
CHANGED
|
@@ -7,7 +7,7 @@ import webbrowser
|
|
|
7
7
|
import zipfile
|
|
8
8
|
import requests
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import Optional, Tuple
|
|
10
|
+
from typing import Callable, Optional, Tuple
|
|
11
11
|
import tempfile
|
|
12
12
|
import shutil
|
|
13
13
|
|
|
@@ -326,38 +326,93 @@ def prompt_user_for_download() -> bool:
|
|
|
326
326
|
|
|
327
327
|
|
|
328
328
|
def start_ui(
|
|
329
|
+
pid_callback: Callable[[int], None],
|
|
329
330
|
host: str = "localhost",
|
|
330
331
|
port: int = 3000,
|
|
331
332
|
open_browser: bool = True,
|
|
332
333
|
auto_download: bool = False,
|
|
334
|
+
start_backend: bool = False,
|
|
335
|
+
backend_host: str = "localhost",
|
|
336
|
+
backend_port: int = 8000,
|
|
333
337
|
) -> Optional[subprocess.Popen]:
|
|
334
338
|
"""
|
|
335
|
-
Start the cognee frontend UI server.
|
|
339
|
+
Start the cognee frontend UI server, optionally with the backend API server.
|
|
336
340
|
|
|
337
341
|
This function will:
|
|
338
|
-
1.
|
|
339
|
-
2.
|
|
340
|
-
3.
|
|
341
|
-
4.
|
|
342
|
-
5.
|
|
342
|
+
1. Optionally start the cognee backend API server
|
|
343
|
+
2. Find the cognee-frontend directory (development) or download it (pip install)
|
|
344
|
+
3. Check if Node.js and npm are available (for development mode)
|
|
345
|
+
4. Install dependencies if needed (development mode)
|
|
346
|
+
5. Start the frontend server
|
|
347
|
+
6. Optionally open the browser
|
|
343
348
|
|
|
344
349
|
Args:
|
|
345
|
-
|
|
346
|
-
|
|
350
|
+
pid_callback: Callback to notify with PID of each spawned process
|
|
351
|
+
host: Host to bind the frontend server to (default: localhost)
|
|
352
|
+
port: Port to run the frontend server on (default: 3000)
|
|
347
353
|
open_browser: Whether to open the browser automatically (default: True)
|
|
348
354
|
auto_download: If True, download frontend without prompting (default: False)
|
|
355
|
+
start_backend: If True, also start the cognee API backend server (default: False)
|
|
356
|
+
backend_host: Host to bind the backend server to (default: localhost)
|
|
357
|
+
backend_port: Port to run the backend server on (default: 8000)
|
|
349
358
|
|
|
350
359
|
Returns:
|
|
351
|
-
subprocess.Popen object representing the running server, or None if failed
|
|
360
|
+
subprocess.Popen object representing the running frontend server, or None if failed
|
|
361
|
+
Note: If backend is started, it runs in a separate process that will be cleaned up
|
|
362
|
+
when the frontend process is terminated.
|
|
352
363
|
|
|
353
364
|
Example:
|
|
354
365
|
>>> import cognee
|
|
366
|
+
>>> # Start just the frontend
|
|
355
367
|
>>> server = cognee.start_ui()
|
|
368
|
+
>>>
|
|
369
|
+
>>> # Start both frontend and backend
|
|
370
|
+
>>> server = cognee.start_ui(start_backend=True)
|
|
356
371
|
>>> # UI will be available at http://localhost:3000
|
|
357
|
-
>>> #
|
|
372
|
+
>>> # API will be available at http://localhost:8000
|
|
373
|
+
>>> # To stop both servers later:
|
|
358
374
|
>>> server.terminate()
|
|
359
375
|
"""
|
|
360
376
|
logger.info("Starting cognee UI...")
|
|
377
|
+
backend_process = None
|
|
378
|
+
|
|
379
|
+
# Start backend server if requested
|
|
380
|
+
if start_backend:
|
|
381
|
+
logger.info("Starting cognee backend API server...")
|
|
382
|
+
try:
|
|
383
|
+
import sys
|
|
384
|
+
|
|
385
|
+
backend_process = subprocess.Popen(
|
|
386
|
+
[
|
|
387
|
+
sys.executable,
|
|
388
|
+
"-m",
|
|
389
|
+
"uvicorn",
|
|
390
|
+
"cognee.api.client:app",
|
|
391
|
+
"--host",
|
|
392
|
+
backend_host,
|
|
393
|
+
"--port",
|
|
394
|
+
str(backend_port),
|
|
395
|
+
],
|
|
396
|
+
# Inherit stdout/stderr from parent process to show logs
|
|
397
|
+
stdout=None,
|
|
398
|
+
stderr=None,
|
|
399
|
+
preexec_fn=os.setsid if hasattr(os, "setsid") else None,
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
pid_callback(backend_process.pid)
|
|
403
|
+
|
|
404
|
+
# Give the backend a moment to start
|
|
405
|
+
time.sleep(2)
|
|
406
|
+
|
|
407
|
+
if backend_process.poll() is not None:
|
|
408
|
+
logger.error("Backend server failed to start - process exited early")
|
|
409
|
+
return None
|
|
410
|
+
|
|
411
|
+
logger.info(f"✓ Backend API started at http://{backend_host}:{backend_port}")
|
|
412
|
+
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.error(f"Failed to start backend server: {str(e)}")
|
|
415
|
+
return None
|
|
361
416
|
|
|
362
417
|
# Find frontend directory
|
|
363
418
|
frontend_path = find_frontend_path()
|
|
@@ -406,7 +461,7 @@ def start_ui(
|
|
|
406
461
|
logger.info("This may take a moment to compile and start...")
|
|
407
462
|
|
|
408
463
|
try:
|
|
409
|
-
#
|
|
464
|
+
# Create frontend in its own process group for clean termination
|
|
410
465
|
process = subprocess.Popen(
|
|
411
466
|
["npm", "run", "dev"],
|
|
412
467
|
cwd=frontend_path,
|
|
@@ -414,11 +469,11 @@ def start_ui(
|
|
|
414
469
|
stdout=subprocess.PIPE,
|
|
415
470
|
stderr=subprocess.PIPE,
|
|
416
471
|
text=True,
|
|
417
|
-
preexec_fn=os.setsid
|
|
418
|
-
if hasattr(os, "setsid")
|
|
419
|
-
else None, # Create new process group on Unix
|
|
472
|
+
preexec_fn=os.setsid if hasattr(os, "setsid") else None,
|
|
420
473
|
)
|
|
421
474
|
|
|
475
|
+
pid_callback(process.pid)
|
|
476
|
+
|
|
422
477
|
# Give it a moment to start up
|
|
423
478
|
time.sleep(3)
|
|
424
479
|
|
|
@@ -447,16 +502,32 @@ def start_ui(
|
|
|
447
502
|
logger.info(f"✓ Open your browser to: http://{host}:{port}")
|
|
448
503
|
logger.info("✓ The UI will be available once Next.js finishes compiling")
|
|
449
504
|
|
|
505
|
+
# Store backend process reference in the frontend process for cleanup
|
|
506
|
+
if backend_process:
|
|
507
|
+
process._cognee_backend_process = backend_process
|
|
508
|
+
|
|
450
509
|
return process
|
|
451
510
|
|
|
452
511
|
except Exception as e:
|
|
453
512
|
logger.error(f"Failed to start frontend server: {str(e)}")
|
|
513
|
+
# Clean up backend process if it was started
|
|
514
|
+
if backend_process:
|
|
515
|
+
logger.info("Cleaning up backend process due to frontend failure...")
|
|
516
|
+
try:
|
|
517
|
+
backend_process.terminate()
|
|
518
|
+
backend_process.wait(timeout=5)
|
|
519
|
+
except (subprocess.TimeoutExpired, OSError, ProcessLookupError):
|
|
520
|
+
try:
|
|
521
|
+
backend_process.kill()
|
|
522
|
+
backend_process.wait()
|
|
523
|
+
except (OSError, ProcessLookupError):
|
|
524
|
+
pass
|
|
454
525
|
return None
|
|
455
526
|
|
|
456
527
|
|
|
457
528
|
def stop_ui(process: subprocess.Popen) -> bool:
|
|
458
529
|
"""
|
|
459
|
-
Stop a running UI server process and all
|
|
530
|
+
Stop a running UI server process and backend process (if started), along with all their children.
|
|
460
531
|
|
|
461
532
|
Args:
|
|
462
533
|
process: The subprocess.Popen object returned by start_ui()
|
|
@@ -467,7 +538,29 @@ def stop_ui(process: subprocess.Popen) -> bool:
|
|
|
467
538
|
if not process:
|
|
468
539
|
return False
|
|
469
540
|
|
|
541
|
+
success = True
|
|
542
|
+
|
|
470
543
|
try:
|
|
544
|
+
# First, stop the backend process if it exists
|
|
545
|
+
backend_process = getattr(process, "_cognee_backend_process", None)
|
|
546
|
+
if backend_process:
|
|
547
|
+
logger.info("Stopping backend server...")
|
|
548
|
+
try:
|
|
549
|
+
backend_process.terminate()
|
|
550
|
+
try:
|
|
551
|
+
backend_process.wait(timeout=5)
|
|
552
|
+
logger.info("Backend server stopped gracefully")
|
|
553
|
+
except subprocess.TimeoutExpired:
|
|
554
|
+
logger.warning("Backend didn't terminate gracefully, forcing kill")
|
|
555
|
+
backend_process.kill()
|
|
556
|
+
backend_process.wait()
|
|
557
|
+
logger.info("Backend server stopped")
|
|
558
|
+
except Exception as e:
|
|
559
|
+
logger.error(f"Error stopping backend server: {str(e)}")
|
|
560
|
+
success = False
|
|
561
|
+
|
|
562
|
+
# Now stop the frontend process
|
|
563
|
+
logger.info("Stopping frontend server...")
|
|
471
564
|
# Try to terminate the process group (includes child processes like Next.js)
|
|
472
565
|
if hasattr(os, "killpg"):
|
|
473
566
|
try:
|
|
@@ -484,9 +577,9 @@ def stop_ui(process: subprocess.Popen) -> bool:
|
|
|
484
577
|
|
|
485
578
|
try:
|
|
486
579
|
process.wait(timeout=10)
|
|
487
|
-
logger.info("
|
|
580
|
+
logger.info("Frontend server stopped gracefully")
|
|
488
581
|
except subprocess.TimeoutExpired:
|
|
489
|
-
logger.warning("
|
|
582
|
+
logger.warning("Frontend didn't terminate gracefully, forcing kill")
|
|
490
583
|
|
|
491
584
|
# Force kill the process group
|
|
492
585
|
if hasattr(os, "killpg"):
|
|
@@ -502,11 +595,13 @@ def stop_ui(process: subprocess.Popen) -> bool:
|
|
|
502
595
|
|
|
503
596
|
process.wait()
|
|
504
597
|
|
|
505
|
-
|
|
506
|
-
|
|
598
|
+
if success:
|
|
599
|
+
logger.info("UI servers stopped successfully")
|
|
600
|
+
|
|
601
|
+
return success
|
|
507
602
|
|
|
508
603
|
except Exception as e:
|
|
509
|
-
logger.error(f"Error stopping UI
|
|
604
|
+
logger.error(f"Error stopping UI servers: {str(e)}")
|
|
510
605
|
return False
|
|
511
606
|
|
|
512
607
|
|
cognee/cli/_cognee.py
CHANGED
|
@@ -174,30 +174,23 @@ def main() -> int:
|
|
|
174
174
|
|
|
175
175
|
# Handle UI flag
|
|
176
176
|
if hasattr(args, "start_ui") and args.start_ui:
|
|
177
|
-
|
|
177
|
+
spawned_pids = []
|
|
178
178
|
|
|
179
179
|
def signal_handler(signum, frame):
|
|
180
180
|
"""Handle Ctrl+C and other termination signals"""
|
|
181
|
-
nonlocal
|
|
181
|
+
nonlocal spawned_pids
|
|
182
182
|
fmt.echo("\nShutting down UI server...")
|
|
183
|
-
|
|
183
|
+
|
|
184
|
+
for pid in spawned_pids:
|
|
184
185
|
try:
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
# If graceful termination fails, force kill
|
|
192
|
-
fmt.echo("Force stopping UI server...")
|
|
193
|
-
server_process.kill()
|
|
194
|
-
server_process.wait()
|
|
195
|
-
fmt.success("UI server stopped.")
|
|
196
|
-
except Exception as e:
|
|
197
|
-
fmt.warning(f"Error stopping server: {e}")
|
|
186
|
+
pgid = os.getpgid(pid)
|
|
187
|
+
os.killpg(pgid, signal.SIGTERM)
|
|
188
|
+
fmt.success(f"✓ Process group {pgid} (PID {pid}) terminated.")
|
|
189
|
+
except (OSError, ProcessLookupError) as e:
|
|
190
|
+
fmt.warning(f"Could not terminate process {pid}: {e}")
|
|
191
|
+
|
|
198
192
|
sys.exit(0)
|
|
199
193
|
|
|
200
|
-
# Set up signal handlers
|
|
201
194
|
signal.signal(signal.SIGINT, signal_handler) # Ctrl+C
|
|
202
195
|
signal.signal(signal.SIGTERM, signal_handler) # Termination request
|
|
203
196
|
|
|
@@ -205,11 +198,25 @@ def main() -> int:
|
|
|
205
198
|
from cognee import start_ui
|
|
206
199
|
|
|
207
200
|
fmt.echo("Starting cognee UI...")
|
|
208
|
-
|
|
201
|
+
|
|
202
|
+
# Callback to capture PIDs of all spawned processes
|
|
203
|
+
def pid_callback(pid):
|
|
204
|
+
nonlocal spawned_pids
|
|
205
|
+
spawned_pids.append(pid)
|
|
206
|
+
|
|
207
|
+
server_process = start_ui(
|
|
208
|
+
host="localhost",
|
|
209
|
+
port=3000,
|
|
210
|
+
open_browser=True,
|
|
211
|
+
start_backend=True,
|
|
212
|
+
auto_download=True,
|
|
213
|
+
pid_callback=pid_callback,
|
|
214
|
+
)
|
|
209
215
|
|
|
210
216
|
if server_process:
|
|
211
217
|
fmt.success("UI server started successfully!")
|
|
212
218
|
fmt.echo("The interface is available at: http://localhost:3000")
|
|
219
|
+
fmt.echo("The API backend is available at: http://localhost:8000")
|
|
213
220
|
fmt.note("Press Ctrl+C to stop the server...")
|
|
214
221
|
|
|
215
222
|
try:
|
|
@@ -225,10 +232,12 @@ def main() -> int:
|
|
|
225
232
|
return 0
|
|
226
233
|
else:
|
|
227
234
|
fmt.error("Failed to start UI server. Check the logs above for details.")
|
|
235
|
+
signal_handler(signal.SIGTERM, None)
|
|
228
236
|
return 1
|
|
229
237
|
|
|
230
238
|
except Exception as ex:
|
|
231
239
|
fmt.error(f"Error starting UI: {str(ex)}")
|
|
240
|
+
signal_handler(signal.SIGTERM, None)
|
|
232
241
|
if debug.is_debug_enabled():
|
|
233
242
|
raise ex
|
|
234
243
|
return 1
|
|
@@ -171,7 +171,7 @@ class GraphCompletionRetriever(BaseGraphRetriever):
|
|
|
171
171
|
question=query, answer=completion, context=context_text, triplets=triplets
|
|
172
172
|
)
|
|
173
173
|
|
|
174
|
-
return completion
|
|
174
|
+
return [completion]
|
|
175
175
|
|
|
176
176
|
async def save_qa(self, question: str, answer: str, context: str, triplets: List) -> None:
|
|
177
177
|
"""
|
|
@@ -96,17 +96,18 @@ class InsightsRetriever(BaseGraphRetriever):
|
|
|
96
96
|
unique_node_connections_map[unique_id] = True
|
|
97
97
|
unique_node_connections.append(node_connection)
|
|
98
98
|
|
|
99
|
-
return
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
99
|
+
return unique_node_connections
|
|
100
|
+
# return [
|
|
101
|
+
# Edge(
|
|
102
|
+
# node1=Node(node_id=connection[0]["id"], attributes=connection[0]),
|
|
103
|
+
# node2=Node(node_id=connection[2]["id"], attributes=connection[2]),
|
|
104
|
+
# attributes={
|
|
105
|
+
# **connection[1],
|
|
106
|
+
# "relationship_type": connection[1]["relationship_name"],
|
|
107
|
+
# },
|
|
108
|
+
# )
|
|
109
|
+
# for connection in unique_node_connections
|
|
110
|
+
# ]
|
|
110
111
|
|
|
111
112
|
async def get_completion(self, query: str, context: Optional[Any] = None) -> Any:
|
|
112
113
|
"""
|
|
@@ -132,14 +132,37 @@ async def search(
|
|
|
132
132
|
],
|
|
133
133
|
)
|
|
134
134
|
else:
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
135
|
+
# This is for maintaining backwards compatibility
|
|
136
|
+
if os.getenv("ENABLE_BACKEND_ACCESS_CONTROL", "false").lower() == "true":
|
|
137
|
+
return_value = []
|
|
138
|
+
for search_result in search_results:
|
|
139
|
+
result, context, datasets = search_result
|
|
140
|
+
return_value.append(
|
|
141
|
+
{
|
|
142
|
+
"search_result": result,
|
|
143
|
+
"dataset_id": datasets[0].id,
|
|
144
|
+
"dataset_name": datasets[0].name,
|
|
145
|
+
}
|
|
146
|
+
)
|
|
147
|
+
return return_value
|
|
148
|
+
else:
|
|
149
|
+
return_value = []
|
|
150
|
+
for search_result in search_results:
|
|
151
|
+
result, context, datasets = search_result
|
|
152
|
+
return_value.append(result)
|
|
153
|
+
# For maintaining backwards compatibility
|
|
154
|
+
if len(return_value) == 1 and isinstance(return_value[0], list):
|
|
155
|
+
return return_value[0]
|
|
156
|
+
else:
|
|
157
|
+
return return_value
|
|
158
|
+
# return [
|
|
159
|
+
# SearchResult(
|
|
160
|
+
# search_result=result,
|
|
161
|
+
# dataset_id=datasets[min(index, len(datasets) - 1)].id if datasets else None,
|
|
162
|
+
# dataset_name=datasets[min(index, len(datasets) - 1)].name if datasets else None,
|
|
163
|
+
# )
|
|
164
|
+
# for index, (result, _, datasets) in enumerate(search_results)
|
|
165
|
+
# ]
|
|
143
166
|
|
|
144
167
|
|
|
145
168
|
async def authorized_search(
|
cognee/tests/test_permissions.py
CHANGED
|
@@ -79,7 +79,7 @@ async def main():
|
|
|
79
79
|
print("\n\nExtracted sentences are:\n")
|
|
80
80
|
for result in search_results:
|
|
81
81
|
print(f"{result}\n")
|
|
82
|
-
assert search_results[0]
|
|
82
|
+
assert search_results[0]["dataset_name"] == "NLP", (
|
|
83
83
|
f"Dict must contain dataset name 'NLP': {search_results[0]}"
|
|
84
84
|
)
|
|
85
85
|
|
|
@@ -93,7 +93,7 @@ async def main():
|
|
|
93
93
|
print("\n\nExtracted sentences are:\n")
|
|
94
94
|
for result in search_results:
|
|
95
95
|
print(f"{result}\n")
|
|
96
|
-
assert search_results[0]
|
|
96
|
+
assert search_results[0]["dataset_name"] == "QUANTUM", (
|
|
97
97
|
f"Dict must contain dataset name 'QUANTUM': {search_results[0]}"
|
|
98
98
|
)
|
|
99
99
|
|
|
@@ -170,7 +170,7 @@ async def main():
|
|
|
170
170
|
for result in search_results:
|
|
171
171
|
print(f"{result}\n")
|
|
172
172
|
|
|
173
|
-
assert search_results[0]
|
|
173
|
+
assert search_results[0]["dataset_name"] == "QUANTUM", (
|
|
174
174
|
f"Dict must contain dataset name 'QUANTUM': {search_results[0]}"
|
|
175
175
|
)
|
|
176
176
|
|
|
@@ -45,15 +45,13 @@ async def relational_db_migration():
|
|
|
45
45
|
await migrate_relational_database(graph_engine, schema=schema)
|
|
46
46
|
|
|
47
47
|
# 1. Search the graph
|
|
48
|
-
search_results
|
|
48
|
+
search_results = await cognee.search(
|
|
49
49
|
query_type=SearchType.GRAPH_COMPLETION, query_text="Tell me about the artist AC/DC"
|
|
50
|
-
)
|
|
50
|
+
)
|
|
51
51
|
print("Search results:", search_results)
|
|
52
52
|
|
|
53
53
|
# 2. Assert that the search results contain "AC/DC"
|
|
54
|
-
assert any("AC/DC" in r
|
|
55
|
-
"AC/DC not found in search results!"
|
|
56
|
-
)
|
|
54
|
+
assert any("AC/DC" in r for r in search_results), "AC/DC not found in search results!"
|
|
57
55
|
|
|
58
56
|
migration_db_provider = migration_engine.engine.dialect.name
|
|
59
57
|
if migration_db_provider == "postgresql":
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
from uuid import uuid4
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@pytest.mark.asyncio
|
|
9
|
+
async def test_save_uses_custom_export_path(tmp_path, monkeypatch):
|
|
10
|
+
# Import target after tmp fixtures are ready
|
|
11
|
+
from cognee.api.v1.save import save as save_mod
|
|
12
|
+
|
|
13
|
+
# Prepare two mock datasets
|
|
14
|
+
class Dataset:
|
|
15
|
+
def __init__(self, id_, name):
|
|
16
|
+
self.id = id_
|
|
17
|
+
self.name = name
|
|
18
|
+
|
|
19
|
+
ds1 = Dataset(uuid4(), "dataset_alpha")
|
|
20
|
+
ds2 = Dataset(uuid4(), "dataset_beta")
|
|
21
|
+
|
|
22
|
+
# Mock dataset discovery
|
|
23
|
+
async def mock_get_authorized_existing_datasets(datasets, permission_type, user):
|
|
24
|
+
return [ds1, ds2]
|
|
25
|
+
|
|
26
|
+
monkeypatch.setattr(
|
|
27
|
+
save_mod, "get_authorized_existing_datasets", mock_get_authorized_existing_datasets
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Mock data items (with filename collision in ds1)
|
|
31
|
+
class DataItem:
|
|
32
|
+
def __init__(self, id_, name, original_path=None):
|
|
33
|
+
self.id = id_
|
|
34
|
+
self.name = name
|
|
35
|
+
self.original_data_location = original_path
|
|
36
|
+
|
|
37
|
+
ds1_items = [
|
|
38
|
+
DataItem(uuid4(), "report.txt", "/root/a/report.txt"),
|
|
39
|
+
DataItem(uuid4(), "report.txt", "/root/b/report.txt"), # collision
|
|
40
|
+
]
|
|
41
|
+
ds2_items = [
|
|
42
|
+
DataItem(uuid4(), "notes.md", "/root/x/notes.md"),
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
async def mock_get_dataset_data(dataset_id):
|
|
46
|
+
if dataset_id == ds1.id:
|
|
47
|
+
return ds1_items
|
|
48
|
+
if dataset_id == ds2.id:
|
|
49
|
+
return ds2_items
|
|
50
|
+
return []
|
|
51
|
+
|
|
52
|
+
monkeypatch.setattr(save_mod, "get_dataset_data", mock_get_dataset_data)
|
|
53
|
+
|
|
54
|
+
# Mock summary retrieval
|
|
55
|
+
async def mock_get_document_summaries_text(data_id: str) -> str:
|
|
56
|
+
return "This is a summary."
|
|
57
|
+
|
|
58
|
+
monkeypatch.setattr(save_mod, "_get_document_summaries_text", mock_get_document_summaries_text)
|
|
59
|
+
|
|
60
|
+
# Mock questions
|
|
61
|
+
async def mock_generate_questions(file_name: str, summary_text: str):
|
|
62
|
+
return ["Q1?", "Q2?", "Q3?"]
|
|
63
|
+
|
|
64
|
+
monkeypatch.setattr(save_mod, "_generate_questions", mock_generate_questions)
|
|
65
|
+
|
|
66
|
+
# Mock searches per question
|
|
67
|
+
async def mock_run_searches_for_question(question, dataset_id, search_types, top_k):
|
|
68
|
+
return {st.value: [f"{question} -> ok"] for st in search_types}
|
|
69
|
+
|
|
70
|
+
monkeypatch.setattr(save_mod, "_run_searches_for_question", mock_run_searches_for_question)
|
|
71
|
+
|
|
72
|
+
# Use custom export path
|
|
73
|
+
export_dir = tmp_path / "my_exports"
|
|
74
|
+
export_dir_str = str(export_dir)
|
|
75
|
+
|
|
76
|
+
# Run
|
|
77
|
+
result = await save_mod.save(
|
|
78
|
+
datasets=None,
|
|
79
|
+
export_root_directory=export_dir_str,
|
|
80
|
+
max_questions=3,
|
|
81
|
+
search_types=["GRAPH_COMPLETION", "INSIGHTS", "CHUNKS"],
|
|
82
|
+
top_k=2,
|
|
83
|
+
include_summary=True,
|
|
84
|
+
include_ascii_tree=True,
|
|
85
|
+
concurrency=2,
|
|
86
|
+
timeout=None,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Verify returned mapping points to our custom path
|
|
90
|
+
assert str(ds1.id) in result and str(ds2.id) in result
|
|
91
|
+
assert result[str(ds1.id)].startswith(export_dir_str)
|
|
92
|
+
assert result[str(ds2.id)].startswith(export_dir_str)
|
|
93
|
+
|
|
94
|
+
# Verify directories and files exist
|
|
95
|
+
ds1_dir = result[str(ds1.id)]
|
|
96
|
+
ds2_dir = result[str(ds2.id)]
|
|
97
|
+
|
|
98
|
+
assert os.path.isdir(ds1_dir)
|
|
99
|
+
assert os.path.isdir(ds2_dir)
|
|
100
|
+
|
|
101
|
+
# index.md present
|
|
102
|
+
assert os.path.isfile(os.path.join(ds1_dir, "index.md"))
|
|
103
|
+
assert os.path.isfile(os.path.join(ds2_dir, "index.md"))
|
|
104
|
+
|
|
105
|
+
# File markdowns exist; collision handling: two files with similar base
|
|
106
|
+
ds1_files = [f for f in os.listdir(ds1_dir) if f.endswith(".md") and f != "index.md"]
|
|
107
|
+
assert len(ds1_files) == 2
|
|
108
|
+
assert any(f == "report.txt.md" for f in ds1_files)
|
|
109
|
+
assert any(f.startswith("report.txt__") and f.endswith(".md") for f in ds1_files)
|
|
110
|
+
|
|
111
|
+
# Content sanity: ensure question headers exist in one file
|
|
112
|
+
sample_md_path = os.path.join(ds1_dir, ds1_files[0])
|
|
113
|
+
with open(sample_md_path, "r", encoding="utf-8") as fh:
|
|
114
|
+
content = fh.read()
|
|
115
|
+
assert "## Question ideas" in content
|
|
116
|
+
assert "## Searches" in content
|
cognee/tests/test_search_db.py
CHANGED
|
@@ -144,13 +144,16 @@ async def main():
|
|
|
144
144
|
("GRAPH_COMPLETION_CONTEXT_EXTENSION", completion_ext),
|
|
145
145
|
("GRAPH_SUMMARY_COMPLETION", completion_sum),
|
|
146
146
|
]:
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
147
|
+
assert isinstance(search_results, list), f"{name}: should return a list"
|
|
148
|
+
assert len(search_results) == 1, (
|
|
149
|
+
f"{name}: expected single-element list, got {len(search_results)}"
|
|
150
|
+
)
|
|
151
|
+
text = search_results[0]
|
|
152
|
+
assert isinstance(text, str), f"{name}: element should be a string"
|
|
153
|
+
assert text.strip(), f"{name}: string should not be empty"
|
|
154
|
+
assert "netherlands" in text.lower(), (
|
|
155
|
+
f"{name}: expected 'netherlands' in result, got: {text!r}"
|
|
156
|
+
)
|
|
154
157
|
|
|
155
158
|
graph_engine = await get_graph_engine()
|
|
156
159
|
graph = await graph_engine.get_graph_data()
|
|
@@ -59,8 +59,10 @@ class TestGraphCompletionWithContextExtensionRetriever:
|
|
|
59
59
|
|
|
60
60
|
answer = await retriever.get_completion("Who works at Canva?")
|
|
61
61
|
|
|
62
|
-
assert isinstance(answer,
|
|
63
|
-
assert
|
|
62
|
+
assert isinstance(answer, list), f"Expected list, got {type(answer).__name__}"
|
|
63
|
+
assert all(isinstance(item, str) and item.strip() for item in answer), (
|
|
64
|
+
"Answer must contain only non-empty strings"
|
|
65
|
+
)
|
|
64
66
|
|
|
65
67
|
@pytest.mark.asyncio
|
|
66
68
|
async def test_graph_completion_extension_context_complex(self):
|
|
@@ -140,8 +142,10 @@ class TestGraphCompletionWithContextExtensionRetriever:
|
|
|
140
142
|
|
|
141
143
|
answer = await retriever.get_completion("Who works at Figma?")
|
|
142
144
|
|
|
143
|
-
assert isinstance(answer,
|
|
144
|
-
assert
|
|
145
|
+
assert isinstance(answer, list), f"Expected list, got {type(answer).__name__}"
|
|
146
|
+
assert all(isinstance(item, str) and item.strip() for item in answer), (
|
|
147
|
+
"Answer must contain only non-empty strings"
|
|
148
|
+
)
|
|
145
149
|
|
|
146
150
|
@pytest.mark.asyncio
|
|
147
151
|
async def test_get_graph_completion_extension_context_on_empty_graph(self):
|
|
@@ -171,5 +175,7 @@ class TestGraphCompletionWithContextExtensionRetriever:
|
|
|
171
175
|
|
|
172
176
|
answer = await retriever.get_completion("Who works at Figma?")
|
|
173
177
|
|
|
174
|
-
assert isinstance(answer,
|
|
175
|
-
assert
|
|
178
|
+
assert isinstance(answer, list), f"Expected list, got {type(answer).__name__}"
|
|
179
|
+
assert all(isinstance(item, str) and item.strip() for item in answer), (
|
|
180
|
+
"Answer must contain only non-empty strings"
|
|
181
|
+
)
|
|
@@ -55,8 +55,10 @@ class TestGraphCompletionCoTRetriever:
|
|
|
55
55
|
|
|
56
56
|
answer = await retriever.get_completion("Who works at Canva?")
|
|
57
57
|
|
|
58
|
-
assert isinstance(answer,
|
|
59
|
-
assert
|
|
58
|
+
assert isinstance(answer, list), f"Expected list, got {type(answer).__name__}"
|
|
59
|
+
assert all(isinstance(item, str) and item.strip() for item in answer), (
|
|
60
|
+
"Answer must contain only non-empty strings"
|
|
61
|
+
)
|
|
60
62
|
|
|
61
63
|
@pytest.mark.asyncio
|
|
62
64
|
async def test_graph_completion_cot_context_complex(self):
|
|
@@ -133,8 +135,10 @@ class TestGraphCompletionCoTRetriever:
|
|
|
133
135
|
|
|
134
136
|
answer = await retriever.get_completion("Who works at Figma?")
|
|
135
137
|
|
|
136
|
-
assert isinstance(answer,
|
|
137
|
-
assert
|
|
138
|
+
assert isinstance(answer, list), f"Expected list, got {type(answer).__name__}"
|
|
139
|
+
assert all(isinstance(item, str) and item.strip() for item in answer), (
|
|
140
|
+
"Answer must contain only non-empty strings"
|
|
141
|
+
)
|
|
138
142
|
|
|
139
143
|
@pytest.mark.asyncio
|
|
140
144
|
async def test_get_graph_completion_cot_context_on_empty_graph(self):
|
|
@@ -164,5 +168,7 @@ class TestGraphCompletionCoTRetriever:
|
|
|
164
168
|
|
|
165
169
|
answer = await retriever.get_completion("Who works at Figma?")
|
|
166
170
|
|
|
167
|
-
assert isinstance(answer,
|
|
168
|
-
assert
|
|
171
|
+
assert isinstance(answer, list), f"Expected list, got {type(answer).__name__}"
|
|
172
|
+
assert all(isinstance(item, str) and item.strip() for item in answer), (
|
|
173
|
+
"Answer must contain only non-empty strings"
|
|
174
|
+
)
|
|
@@ -82,7 +82,7 @@ class TestInsightsRetriever:
|
|
|
82
82
|
|
|
83
83
|
context = await retriever.get_context("Mike")
|
|
84
84
|
|
|
85
|
-
assert context[0]
|
|
85
|
+
assert context[0][0]["name"] == "Mike Broski", "Failed to get Mike Broski"
|
|
86
86
|
|
|
87
87
|
@pytest.mark.asyncio
|
|
88
88
|
async def test_insights_context_complex(self):
|
|
@@ -222,9 +222,7 @@ class TestInsightsRetriever:
|
|
|
222
222
|
|
|
223
223
|
context = await retriever.get_context("Christina")
|
|
224
224
|
|
|
225
|
-
assert context[0]
|
|
226
|
-
"Failed to get Christina Mayer"
|
|
227
|
-
)
|
|
225
|
+
assert context[0][0]["name"] == "Christina Mayer", "Failed to get Christina Mayer"
|
|
228
226
|
|
|
229
227
|
@pytest.mark.asyncio
|
|
230
228
|
async def test_insights_context_on_empty_graph(self):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cognee
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.2
|
|
4
4
|
Summary: Cognee - is a library for enriching LLM context with a semantic layer for better understanding and reasoning.
|
|
5
5
|
Project-URL: Homepage, https://www.cognee.ai
|
|
6
6
|
Project-URL: Repository, https://github.com/topoteretes/cognee
|
|
@@ -53,10 +53,11 @@ cognee/api/v1/responses/models.py,sha256=MylzSnK-QB0kXe7nS-Mu4XRKZa-uBw8qP7Ke9On
|
|
|
53
53
|
cognee/api/v1/responses/routers/__init__.py,sha256=X2qishwGRVFXawnvkZ5bv420PuPRLvknaFO2jdfiR10,122
|
|
54
54
|
cognee/api/v1/responses/routers/default_tools.py,sha256=9qqzEZhrt3_YMKzUA06ke8P-2WeLXhYpKgVW6mLHlzw,3004
|
|
55
55
|
cognee/api/v1/responses/routers/get_responses_router.py,sha256=ggbLhY9IXaInCgIs5TUuOCkFW64xmTKZQsc2ENq2Ocs,5979
|
|
56
|
+
cognee/api/v1/save/save.py,sha256=xRthVNANIsrVJlLa5QKrdSiwCSckr7HBLmoeVJ_gEdE,12639
|
|
56
57
|
cognee/api/v1/search/__init__.py,sha256=Sqw60DcOj4Bnvt-EWFknT31sPcvROIRKCWLr5pbkFr4,39
|
|
57
58
|
cognee/api/v1/search/search.py,sha256=YQicNVi9q4FteAmt_EtY75I_EuNZ9ZjGE73wg-NcDwY,8824
|
|
58
59
|
cognee/api/v1/search/routers/__init__.py,sha256=6RebeLX_2NTRxIMPH_mGuLztPxnGnMJK1y_O93CtRm8,49
|
|
59
|
-
cognee/api/v1/search/routers/get_search_router.py,sha256
|
|
60
|
+
cognee/api/v1/search/routers/get_search_router.py,sha256=-5GLgHipflEblYAwl3uiPAZ2i3TgrLEjDuiO_cCqcB8,6252
|
|
60
61
|
cognee/api/v1/settings/routers/__init__.py,sha256=wj_UYAXNMPCkn6Mo1YB01dCBiV9DQwTIf6OWjnGRpf8,53
|
|
61
62
|
cognee/api/v1/settings/routers/get_settings_router.py,sha256=EKVj2kw5MDKZcxAIAyi7ltz7wD6Hfs5feGrkd9R_vCA,3195
|
|
62
63
|
cognee/api/v1/sync/__init__.py,sha256=hx2Af6GtX8soyHiYpWieWpAglLD05_7BK7PgdBqGbVE,313
|
|
@@ -64,7 +65,7 @@ cognee/api/v1/sync/sync.py,sha256=zzCVJD1AvcSXtNsgLJr1iPMRxY6vRxGdkt7sVdJ8W2c,33
|
|
|
64
65
|
cognee/api/v1/sync/routers/__init__.py,sha256=hZArat9DDyzBll8qej0_o16QhtQRciTB37b5rc3ckGM,76
|
|
65
66
|
cognee/api/v1/sync/routers/get_sync_router.py,sha256=7fD0QL0IIjyg9VBadNcLD7G7rypy_1glyWv8HVHBrao,9703
|
|
66
67
|
cognee/api/v1/ui/__init__.py,sha256=SKfmAWokGT3_ZGqDkEtQihrvXCog6WTP3UdZrD20DBc,38
|
|
67
|
-
cognee/api/v1/ui/ui.py,sha256=
|
|
68
|
+
cognee/api/v1/ui/ui.py,sha256=CTgEmVrpeG174jTalgc8F_4hO6LsBXtTFkczMaLlNvc,23137
|
|
68
69
|
cognee/api/v1/users/__init__.py,sha256=TMOZ_3puQxVqVIjWNA0yb16Tpp8yoNKAfwxIxoFpgus,37
|
|
69
70
|
cognee/api/v1/users/create_user.py,sha256=PRuc7aUhOpyb-g5nUGDKSegp3cxkZy5TDeX1sxX6jjM,324
|
|
70
71
|
cognee/api/v1/users/routers/__init__.py,sha256=_m3tyK2deFQCBjx6p-0t23e7qnnhAyx-2PBM7Wc6E7A,314
|
|
@@ -78,7 +79,7 @@ cognee/api/v1/visualize/__init__.py,sha256=TBk58R8cza6Qx7IP2r9RvAtE8Fmoo9vOh9VjC
|
|
|
78
79
|
cognee/api/v1/visualize/start_visualization_server.py,sha256=3esCKYYmBx9Sb2H5JWrliT47qNyt_rGrv1OvR0LJVAg,440
|
|
79
80
|
cognee/api/v1/visualize/visualize.py,sha256=xKhh1N-doIgFcnq9Tz1acwrS4fOqBFZlgif4prMBqP4,1077
|
|
80
81
|
cognee/cli/__init__.py,sha256=MaKUkdFaETdbuMFoV02V8BZNuYr7tZQJKt6y25CaUhk,243
|
|
81
|
-
cognee/cli/_cognee.py,sha256=
|
|
82
|
+
cognee/cli/_cognee.py,sha256=v7GeAGLXaVkkkjUpbmo4Ya3ff-cycE297F41Sjx0kNA,8878
|
|
82
83
|
cognee/cli/config.py,sha256=8XhUqpkmNNzCFbnIpRvNQIO2Hvw0OD44zWYM0eADozA,998
|
|
83
84
|
cognee/cli/debug.py,sha256=-u3REG2xloCFLwOWQ3wVM7RpZRn06QlnfDyCRoxrrek,444
|
|
84
85
|
cognee/cli/echo.py,sha256=3G4qYcYn1cShTeIKaZMPD_TgoS7LBqyUnMnTFaj5dUE,1128
|
|
@@ -549,14 +550,14 @@ cognee/modules/retrieval/code_retriever.py,sha256=cnOjgfCATzz0-XZGFrIIkuVZLc6HBh
|
|
|
549
550
|
cognee/modules/retrieval/coding_rules_retriever.py,sha256=3GU259jTbGLqmp_A8sUdE4fyf0td06SKuxBJVW-npIQ,1134
|
|
550
551
|
cognee/modules/retrieval/completion_retriever.py,sha256=Lw5sxN_UrtmWSOtcSS7Yj50Gw9p4nNBmW3dr2kV9JJ0,3754
|
|
551
552
|
cognee/modules/retrieval/cypher_search_retriever.py,sha256=_3rZJ23hSZpDa8kVyOSWN3fwjMI_aLF2m5p-FtBek8k,2440
|
|
552
|
-
cognee/modules/retrieval/graph_completion_context_extension_retriever.py,sha256=
|
|
553
|
-
cognee/modules/retrieval/graph_completion_cot_retriever.py,sha256=
|
|
554
|
-
cognee/modules/retrieval/graph_completion_retriever.py,sha256=
|
|
553
|
+
cognee/modules/retrieval/graph_completion_context_extension_retriever.py,sha256=PUJRR13MZ6eAjOH3HeQRRl0rEElHEBh4IKahgrDUXPo,4526
|
|
554
|
+
cognee/modules/retrieval/graph_completion_cot_retriever.py,sha256=quJYusaUNTvy7A3V_PAIbDLBMrFDuX8_wT0NnTcE5x8,6134
|
|
555
|
+
cognee/modules/retrieval/graph_completion_retriever.py,sha256=XHuu1kvANGNMz-j6UkW2mpUpb1sf3hXcmMhu4TVM03c,8816
|
|
555
556
|
cognee/modules/retrieval/graph_summary_completion_retriever.py,sha256=3AMisk3fObk2Vh1heY4veHkDjLsHgSSUc_ChZseJUYw,2456
|
|
556
|
-
cognee/modules/retrieval/insights_retriever.py,sha256=
|
|
557
|
+
cognee/modules/retrieval/insights_retriever.py,sha256=1pcYd34EfKk85MSPFQ8b-ZbSARmnauks8TxXfNOxvOw,4953
|
|
557
558
|
cognee/modules/retrieval/natural_language_retriever.py,sha256=zJz35zRmBP8-pRlkoxxSxn3-jtG2lUW0xcu58bq9Ebs,5761
|
|
558
559
|
cognee/modules/retrieval/summaries_retriever.py,sha256=joXYphypACm2JiCjbC8nBS61m1q2oYkzyIt9bdgALNw,3384
|
|
559
|
-
cognee/modules/retrieval/temporal_retriever.py,sha256=
|
|
560
|
+
cognee/modules/retrieval/temporal_retriever.py,sha256=mYik14shVjK24fNEtYzjte5ovwwTdROn5Kxy5FrOE10,5679
|
|
560
561
|
cognee/modules/retrieval/user_qa_feedback.py,sha256=WSMPg6WjteR-XgK0vK9f_bkZ_o0JMPb4XZ9OAcFyz9E,3371
|
|
561
562
|
cognee/modules/retrieval/context_providers/DummyContextProvider.py,sha256=9GsvINc7ekRyRWO5IefFGyytRYqsSlhpwAOw6Q691cA,419
|
|
562
563
|
cognee/modules/retrieval/context_providers/SummarizedTripletSearchContextProvider.py,sha256=ypO6yWLxvmRsj_5dyYdvXTbztJmB_ioLrgyG6bF5WGA,894
|
|
@@ -578,7 +579,7 @@ cognee/modules/search/exceptions/exceptions.py,sha256=Zc5Y0M-r-UnSSlpKzHKBplfjZ-
|
|
|
578
579
|
cognee/modules/search/methods/__init__.py,sha256=jGfRvNwM5yIzj025gaVhcx7nCupRSXbUUnFjYVjL_Js,27
|
|
579
580
|
cognee/modules/search/methods/get_search_type_tools.py,sha256=wXxOZx3uEnMhRhUO2HGswQ5iVbWvjUj17UT_qdJg6Oo,6837
|
|
580
581
|
cognee/modules/search/methods/no_access_control_search.py,sha256=R08aMgaB8AkD0_XVaX15qLyC9KJ3fSVFv9zeZwuyez4,1566
|
|
581
|
-
cognee/modules/search/methods/search.py,sha256=
|
|
582
|
+
cognee/modules/search/methods/search.py,sha256=Akqf4a913_nG56TMxTKU65kOwL0tWURDLHEXlwcgV1c,12459
|
|
582
583
|
cognee/modules/search/models/Query.py,sha256=9WcF5Z1oCFtA4O-7An37eNAPX3iyygO4B5NSwhx7iIg,558
|
|
583
584
|
cognee/modules/search/models/Result.py,sha256=U7QtoNzAtZnUDwGWhjVfcalHQd4daKtYYvJz2BeWQ4w,564
|
|
584
585
|
cognee/modules/search/operations/__init__.py,sha256=AwJl6v9BTpocoefEZLk-flo1EtydYb46NSUoNFHkhX0,156
|
|
@@ -781,19 +782,26 @@ cognee/tests/test_neptune_analytics_graph.py,sha256=bZqPNk8ag_tilpRobK5RJVwTS473
|
|
|
781
782
|
cognee/tests/test_neptune_analytics_hybrid.py,sha256=Q9mCGGqroLnHrRo3kHdhkMZnlNtvCshRG1BgU81voBc,6222
|
|
782
783
|
cognee/tests/test_neptune_analytics_vector.py,sha256=h_Ofp4ZAdyGpCWzuQyoXmLO5lOycNLtliIFvJt7nXHg,8652
|
|
783
784
|
cognee/tests/test_parallel_databases.py,sha256=Hhm4zh-luaXKmy7mjEHq3VkMppt6QaJ3IB2IRUVkwSk,1997
|
|
784
|
-
cognee/tests/test_permissions.py,sha256=
|
|
785
|
+
cognee/tests/test_permissions.py,sha256=h2Gyug-1DI8YycYMBhfEY0XdZbG3qt7ubiK5x7EJCVc,11509
|
|
785
786
|
cognee/tests/test_pgvector.py,sha256=ZAaeWcnNBSYuyciYPBnzJSrGkuIjmKYWoNu3Jj7cPOM,9568
|
|
786
|
-
cognee/tests/test_relational_db_migration.py,sha256=
|
|
787
|
+
cognee/tests/test_relational_db_migration.py,sha256=QUgS40w3ZDO3fwvM0x0b1U0SxfFVJ3J6UEK5C48GVHA,8695
|
|
787
788
|
cognee/tests/test_remote_kuzu.py,sha256=2GG05MtGuhOo6ST82OxjdVDetBS0GWHvKKmmmEtQO2U,7245
|
|
788
789
|
cognee/tests/test_remote_kuzu_stress.py,sha256=5vgnu4Uz_NoKKqFZJeVceHwb2zNhvdTVBgpN3NjhfAE,5304
|
|
789
790
|
cognee/tests/test_s3.py,sha256=rY2UDK15cdyywlyVrR8N2DRtVXWYIW5REaaz99gaQeE,2694
|
|
790
791
|
cognee/tests/test_s3_file_storage.py,sha256=62tvIFyh_uTP0TFF9Ck4Y-sxWPW-cwJKYEJUJI1atPI,5654
|
|
791
|
-
cognee/tests/
|
|
792
|
+
cognee/tests/test_save_export_path.py,sha256=z07oQao82INzldg2mesS3ZGt7fl7rcjKx15JwoGT5tI,3898
|
|
793
|
+
cognee/tests/test_search_db.py,sha256=4GpLx8ZJoMjkp-XqQ-LCrkf3NhAM4j_rMmlOFgmDO-A,13420
|
|
792
794
|
cognee/tests/test_starter_pipelines.py,sha256=X1J8RDD0bFMKnRETyi5nyaF4TYdmUIu0EuD3WQwShNs,2475
|
|
793
795
|
cognee/tests/test_telemetry.py,sha256=FIneuVofSKWFYqxNC88sT_P5GPzgfjVyqDCf2TYBE2E,4130
|
|
794
796
|
cognee/tests/test_temporal_graph.py,sha256=G0PyzuvIYylwFT-3eZSzjtBik9O1g75sGLj3QK9RYTA,12624
|
|
795
|
-
cognee/tests/
|
|
796
|
-
cognee/tests/
|
|
797
|
+
cognee/tests/cli_tests/cli_integration_tests/__init__.py,sha256=xYkvpZkxv_HRWmX71pGM3NUw2KKkDQIM-V6Ehxu-f0I,39
|
|
798
|
+
cognee/tests/cli_tests/cli_integration_tests/test_cli_integration.py,sha256=3hdz1DoGeidJInqbCy1YQte6J0QeQG1_WKGs9utjAFg,11560
|
|
799
|
+
cognee/tests/cli_tests/cli_unit_tests/__init__.py,sha256=U069aFvdwfKPd6YsR_FJML5LRphHHF5wx9mwug1hRh4,32
|
|
800
|
+
cognee/tests/cli_tests/cli_unit_tests/test_cli_commands.py,sha256=5a3vPiSFmKumq6sTfdfMyeUpJGjbZ6_5zX4TUcV0ZJQ,17625
|
|
801
|
+
cognee/tests/cli_tests/cli_unit_tests/test_cli_edge_cases.py,sha256=PyFCnClvbXG1GaiS16qwcuyXXDJ4sRyBCKV5WHrOUxk,23501
|
|
802
|
+
cognee/tests/cli_tests/cli_unit_tests/test_cli_main.py,sha256=Gsj2zYlVL80iU9EjRj4Q4QzgsYuIngUvDbA9suV99oA,6098
|
|
803
|
+
cognee/tests/cli_tests/cli_unit_tests/test_cli_runner.py,sha256=WZ8oZIlc_JintDq_cnEg9tmLEMZMGFPQGhU7Y_7sfgs,1497
|
|
804
|
+
cognee/tests/cli_tests/cli_unit_tests/test_cli_utils.py,sha256=Flej8LNYRXNkWd2tq8elMm8MkqbhCUb8RtXaPzfNYm4,4323
|
|
797
805
|
cognee/tests/integration/documents/AudioDocument_test.py,sha256=0mJnlWRc7gWqOxAUfdSSIxntcUrzkPXhlsd-MFsiRoM,2790
|
|
798
806
|
cognee/tests/integration/documents/ImageDocument_test.py,sha256=vrb3uti0RF6a336LLI95i8fso3hOFw9AFe1NxPnOf6k,2802
|
|
799
807
|
cognee/tests/integration/documents/PdfDocument_test.py,sha256=IY0Cck8J2gEyuJHPK0HODPbZPIXQ799KhWrgkjn5feM,1798
|
|
@@ -821,12 +829,6 @@ cognee/tests/test_data/text_to_speech.mp3,sha256=h0xuFwn_ddt-q2AeBu_BdLmMJUc4QtE
|
|
|
821
829
|
cognee/tests/test_data/text_to_speech_copy.mp3,sha256=h0xuFwn_ddt-q2AeBu_BdLmMJUc4QtEKWdBQ9ydGYXI,28173
|
|
822
830
|
cognee/tests/unit/api/__init__.py,sha256=tKoksC3QC3r43L7MDdEdjE2A34r8iOD1YPv8mT-iZzk,29
|
|
823
831
|
cognee/tests/unit/api/test_conditional_authentication_endpoints.py,sha256=t5HX6s8D-5pFANy9IJEtY5ht_GhlJSZK_KkpqVj8ZdI,9349
|
|
824
|
-
cognee/tests/unit/cli/__init__.py,sha256=U069aFvdwfKPd6YsR_FJML5LRphHHF5wx9mwug1hRh4,32
|
|
825
|
-
cognee/tests/unit/cli/test_cli_commands.py,sha256=5a3vPiSFmKumq6sTfdfMyeUpJGjbZ6_5zX4TUcV0ZJQ,17625
|
|
826
|
-
cognee/tests/unit/cli/test_cli_edge_cases.py,sha256=PyFCnClvbXG1GaiS16qwcuyXXDJ4sRyBCKV5WHrOUxk,23501
|
|
827
|
-
cognee/tests/unit/cli/test_cli_main.py,sha256=Gsj2zYlVL80iU9EjRj4Q4QzgsYuIngUvDbA9suV99oA,6098
|
|
828
|
-
cognee/tests/unit/cli/test_cli_runner.py,sha256=WZ8oZIlc_JintDq_cnEg9tmLEMZMGFPQGhU7Y_7sfgs,1497
|
|
829
|
-
cognee/tests/unit/cli/test_cli_utils.py,sha256=Flej8LNYRXNkWd2tq8elMm8MkqbhCUb8RtXaPzfNYm4,4323
|
|
830
832
|
cognee/tests/unit/entity_extraction/regex_entity_extraction_test.py,sha256=3zNvSI56FBltg_lda06n93l2vl702i5O1ewoQXoo50E,10234
|
|
831
833
|
cognee/tests/unit/eval_framework/answer_generation_test.py,sha256=TVrAJneOiTSztq7J6poo4GGPsow3MWnBtpBwPkDHq08,1309
|
|
832
834
|
cognee/tests/unit/eval_framework/benchmark_adapters_test.py,sha256=yXmr5089j1KB5lrLs4v17JXPuUk2iwXJRJGOb_wdnqk,3382
|
|
@@ -854,10 +856,10 @@ cognee/tests/unit/modules/pipelines/run_task_from_queue_test.py,sha256=X2clLQYoP
|
|
|
854
856
|
cognee/tests/unit/modules/pipelines/run_tasks_test.py,sha256=IJ_2NBOizC-PtW4c1asYZB-SI85dQswB0Lt5e_n-5zI,1399
|
|
855
857
|
cognee/tests/unit/modules/pipelines/run_tasks_with_context_test.py,sha256=Bi5XgQWfrgCgTtRu1nrUAqraDYHUzILleOka5fpTsKE,1058
|
|
856
858
|
cognee/tests/unit/modules/retrieval/chunks_retriever_test.py,sha256=jHsvi1Y-bsOVdFrGIfGaTXV4UvwI4KzQF_hV0i3oy2I,6341
|
|
857
|
-
cognee/tests/unit/modules/retrieval/graph_completion_retriever_context_extension_test.py,sha256=
|
|
858
|
-
cognee/tests/unit/modules/retrieval/graph_completion_retriever_cot_test.py,sha256=
|
|
859
|
+
cognee/tests/unit/modules/retrieval/graph_completion_retriever_context_extension_test.py,sha256=nVKITq5N0cOLCjvJvS6e9vAdbwFcNWxe11O1F7rEzok,6906
|
|
860
|
+
cognee/tests/unit/modules/retrieval/graph_completion_retriever_cot_test.py,sha256=n5tMlsRzTsrMWpdqWS7nKOITwRnv3wNMMKFnXHTsdtU,6682
|
|
859
861
|
cognee/tests/unit/modules/retrieval/graph_completion_retriever_test.py,sha256=T-WXkG9opjtlxY8R6CXcHazc2xouPOdUR9bumxxzFsY,8986
|
|
860
|
-
cognee/tests/unit/modules/retrieval/insights_retriever_test.py,sha256=
|
|
862
|
+
cognee/tests/unit/modules/retrieval/insights_retriever_test.py,sha256=xkbxlNiHY6evVbBYMncllXDNs3nNC_jZeYP47oT8vG0,8592
|
|
861
863
|
cognee/tests/unit/modules/retrieval/rag_completion_retriever_test.py,sha256=RQi5EAjB5ffiIcsFmfg3Hao3AVPOkTEL72xnXIxbTKM,6551
|
|
862
864
|
cognee/tests/unit/modules/retrieval/summaries_retriever_test.py,sha256=IfhDyVuKUrjCEy22-Mva9w7li2mtPZT9FlNIFvpFMKw,4950
|
|
863
865
|
cognee/tests/unit/modules/retrieval/temporal_retriever_test.py,sha256=bvGvJgq9JF8nxnFvBlSo2qOBc0FKjCe006Io5HityAo,7672
|
|
@@ -888,9 +890,9 @@ distributed/tasks/queued_add_edges.py,sha256=kz1DHE05y-kNHORQJjYWHUi6Q1QWUp_v3Dl
|
|
|
888
890
|
distributed/tasks/queued_add_nodes.py,sha256=aqK4Ij--ADwUWknxYpiwbYrpa6CcvFfqHWbUZW4Kh3A,452
|
|
889
891
|
distributed/workers/data_point_saving_worker.py,sha256=jFmA0-P_0Ru2IUDrSug0wML-5goAKrGtlBm5BA5Ryw4,3229
|
|
890
892
|
distributed/workers/graph_saving_worker.py,sha256=oUYl99CdhlrPAIsUOHbHnS3d4XhGoV0_OIbCO8wYzRg,3648
|
|
891
|
-
cognee-0.3.
|
|
892
|
-
cognee-0.3.
|
|
893
|
-
cognee-0.3.
|
|
894
|
-
cognee-0.3.
|
|
895
|
-
cognee-0.3.
|
|
896
|
-
cognee-0.3.
|
|
893
|
+
cognee-0.3.2.dist-info/METADATA,sha256=psFYLZRaPVkg3zbOTWugIyH0H_3RL0hxAQHqYNXCz54,14753
|
|
894
|
+
cognee-0.3.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
895
|
+
cognee-0.3.2.dist-info/entry_points.txt,sha256=4Fe5PRV0e3j5MFUo7kYyRFa3MhMNbOu69pGBazTxPps,51
|
|
896
|
+
cognee-0.3.2.dist-info/licenses/LICENSE,sha256=pHHjSQj1DD8SDppW88MMs04TPk7eAanL1c5xj8NY7NQ,11344
|
|
897
|
+
cognee-0.3.2.dist-info/licenses/NOTICE.md,sha256=6L3saP3kSpcingOxDh-SGjMS8GY79Rlh2dBNLaO0o5c,339
|
|
898
|
+
cognee-0.3.2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|