cognee 0.3.2__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. cognee/api/v1/cloud/routers/get_checks_router.py +1 -1
  2. cognee/api/v1/cognify/cognify.py +44 -7
  3. cognee/api/v1/cognify/routers/get_cognify_router.py +2 -1
  4. cognee/api/v1/notebooks/routers/get_notebooks_router.py +2 -1
  5. cognee/api/v1/prune/prune.py +2 -2
  6. cognee/api/v1/search/search.py +1 -1
  7. cognee/api/v1/sync/sync.py +16 -5
  8. cognee/base_config.py +19 -1
  9. cognee/eval_framework/corpus_builder/task_getters/get_default_tasks_by_indices.py +2 -2
  10. cognee/infrastructure/databases/graph/kuzu/remote_kuzu_adapter.py +4 -1
  11. cognee/infrastructure/databases/relational/ModelBase.py +2 -1
  12. cognee/infrastructure/databases/relational/sqlalchemy/SqlAlchemyAdapter.py +2 -2
  13. cognee/infrastructure/databases/vector/chromadb/ChromaDBAdapter.py +2 -6
  14. cognee/infrastructure/databases/vector/config.py +1 -1
  15. cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py +6 -5
  16. cognee/infrastructure/files/storage/LocalFileStorage.py +50 -0
  17. cognee/infrastructure/files/storage/S3FileStorage.py +56 -9
  18. cognee/infrastructure/files/storage/StorageManager.py +18 -0
  19. cognee/infrastructure/files/utils/get_file_metadata.py +6 -1
  20. cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py +4 -2
  21. cognee/infrastructure/utils/run_async.py +9 -4
  22. cognee/infrastructure/utils/run_sync.py +4 -3
  23. cognee/modules/cloud/operations/check_api_key.py +4 -1
  24. cognee/modules/data/deletion/prune_system.py +5 -1
  25. cognee/modules/data/methods/create_authorized_dataset.py +9 -0
  26. cognee/modules/data/methods/get_authorized_dataset.py +1 -1
  27. cognee/modules/data/methods/get_authorized_dataset_by_name.py +11 -0
  28. cognee/modules/graph/utils/expand_with_nodes_and_edges.py +22 -8
  29. cognee/modules/graph/utils/retrieve_existing_edges.py +0 -2
  30. cognee/modules/notebooks/methods/create_notebook.py +34 -0
  31. cognee/modules/notebooks/methods/get_notebook.py +2 -2
  32. cognee/modules/notebooks/methods/get_notebooks.py +27 -1
  33. cognee/modules/notebooks/methods/update_notebook.py +0 -1
  34. cognee/modules/notebooks/models/Notebook.py +206 -1
  35. cognee/modules/notebooks/operations/run_in_local_sandbox.py +8 -5
  36. cognee/modules/observability/get_observe.py +14 -0
  37. cognee/modules/observability/observers.py +1 -0
  38. cognee/modules/ontology/base_ontology_resolver.py +42 -0
  39. cognee/modules/ontology/get_default_ontology_resolver.py +41 -0
  40. cognee/modules/ontology/matching_strategies.py +53 -0
  41. cognee/modules/ontology/models.py +20 -0
  42. cognee/modules/ontology/ontology_config.py +24 -0
  43. cognee/modules/ontology/ontology_env_config.py +45 -0
  44. cognee/modules/ontology/rdf_xml/{OntologyResolver.py → RDFLibOntologyResolver.py} +20 -28
  45. cognee/modules/pipelines/layers/resolve_authorized_user_dataset.py +13 -0
  46. cognee/modules/pipelines/layers/resolve_authorized_user_datasets.py +1 -1
  47. cognee/modules/pipelines/models/PipelineRunInfo.py +7 -2
  48. cognee/modules/retrieval/graph_completion_context_extension_retriever.py +1 -1
  49. cognee/modules/retrieval/graph_completion_cot_retriever.py +1 -1
  50. cognee/modules/retrieval/graph_completion_retriever.py +1 -1
  51. cognee/modules/retrieval/temporal_retriever.py +3 -3
  52. cognee/modules/retrieval/user_qa_feedback.py +1 -1
  53. cognee/modules/search/methods/get_search_type_tools.py +7 -0
  54. cognee/modules/search/methods/search.py +12 -13
  55. cognee/modules/search/utils/prepare_search_result.py +31 -9
  56. cognee/modules/search/utils/transform_context_to_graph.py +1 -1
  57. cognee/modules/search/utils/transform_insights_to_graph.py +28 -0
  58. cognee/modules/users/methods/create_user.py +4 -24
  59. cognee/modules/users/permissions/methods/authorized_give_permission_on_datasets.py +12 -0
  60. cognee/modules/users/permissions/methods/check_permission_on_dataset.py +11 -0
  61. cognee/modules/users/permissions/methods/get_all_user_permission_datasets.py +19 -2
  62. cognee/modules/users/permissions/methods/get_document_ids_for_user.py +10 -0
  63. cognee/modules/users/permissions/methods/get_principal.py +9 -0
  64. cognee/modules/users/permissions/methods/get_principal_datasets.py +11 -0
  65. cognee/modules/users/permissions/methods/get_role.py +10 -0
  66. cognee/modules/users/permissions/methods/get_specific_user_permission_datasets.py +3 -3
  67. cognee/modules/users/permissions/methods/get_tenant.py +9 -0
  68. cognee/modules/users/permissions/methods/give_default_permission_to_role.py +9 -0
  69. cognee/modules/users/permissions/methods/give_default_permission_to_tenant.py +9 -0
  70. cognee/modules/users/permissions/methods/give_default_permission_to_user.py +9 -0
  71. cognee/modules/users/permissions/methods/give_permission_on_dataset.py +10 -0
  72. cognee/modules/users/roles/methods/add_user_to_role.py +11 -0
  73. cognee/modules/users/roles/methods/create_role.py +10 -0
  74. cognee/modules/users/tenants/methods/add_user_to_tenant.py +12 -0
  75. cognee/modules/users/tenants/methods/create_tenant.py +10 -0
  76. cognee/root_dir.py +5 -0
  77. cognee/shared/cache.py +346 -0
  78. cognee/shared/utils.py +12 -0
  79. cognee/tasks/graph/extract_graph_from_data.py +53 -10
  80. cognee/tasks/graph/extract_graph_from_data_v2.py +16 -4
  81. cognee/tasks/ingestion/save_data_item_to_storage.py +1 -0
  82. cognee/tasks/temporal_graph/models.py +11 -6
  83. cognee/tests/cli_tests/cli_unit_tests/test_cli_main.py +5 -5
  84. cognee/tests/test_cognee_server_start.py +4 -4
  85. cognee/tests/test_temporal_graph.py +6 -34
  86. cognee/tests/unit/modules/ontology/test_ontology_adapter.py +330 -13
  87. cognee/tests/unit/modules/users/test_tutorial_notebook_creation.py +399 -0
  88. {cognee-0.3.2.dist-info → cognee-0.3.4.dist-info}/METADATA +11 -8
  89. {cognee-0.3.2.dist-info → cognee-0.3.4.dist-info}/RECORD +93 -86
  90. cognee-0.3.4.dist-info/entry_points.txt +2 -0
  91. cognee/api/v1/save/save.py +0 -335
  92. cognee/tests/test_save_export_path.py +0 -116
  93. cognee-0.3.2.dist-info/entry_points.txt +0 -2
  94. {cognee-0.3.2.dist-info → cognee-0.3.4.dist-info}/WHEEL +0 -0
  95. {cognee-0.3.2.dist-info → cognee-0.3.4.dist-info}/licenses/LICENSE +0 -0
  96. {cognee-0.3.2.dist-info → cognee-0.3.4.dist-info}/licenses/NOTICE.md +0 -0
@@ -1,335 +0,0 @@
1
- import os
2
- import asyncio
3
- import json
4
- from typing import Optional, Union, List, Dict
5
- from uuid import UUID
6
-
7
- from pydantic import BaseModel
8
-
9
- from cognee.base_config import get_base_config
10
- from cognee.modules.users.models import User
11
- from cognee.modules.users.methods import get_default_user
12
- from cognee.modules.data.methods import get_authorized_existing_datasets, get_dataset_data
13
- from cognee.infrastructure.files.utils.get_data_file_path import get_data_file_path
14
- from cognee.infrastructure.llm.LLMGateway import LLMGateway
15
- from cognee.shared.logging_utils import get_logger
16
- from cognee.api.v1.search import search
17
- from cognee.modules.search.types import SearchType
18
-
19
-
20
- logger = get_logger("save")
21
-
22
-
23
- class QuestionsModel(BaseModel):
24
- questions: List[str]
25
-
26
-
27
- def _sanitize_filename(name: str) -> str:
28
- safe = "".join(c if c.isalnum() or c in ("-", "_", ".", " ") else "_" for c in name)
29
- return safe.strip().replace(" ", "_")
30
-
31
-
32
- def _dataset_dir_name(dataset) -> str:
33
- # Prefer readable dataset name when available, fallback to id
34
- if getattr(dataset, "name", None):
35
- return _sanitize_filename(str(dataset.name))
36
- return str(dataset.id)
37
-
38
-
39
- def _file_markdown_name(data_item, used_names: set[str]) -> str:
40
- # Use original file name if present, else data.name
41
- name = getattr(data_item, "name", None) or "file"
42
- base = _sanitize_filename(str(name))
43
- filename = f"{base}.md"
44
- if filename in used_names:
45
- short_id = str(getattr(data_item, "id", ""))[:8]
46
- filename = f"{base}__{short_id}.md"
47
- used_names.add(filename)
48
- return filename
49
-
50
-
51
- def _ascii_path_tree(path_str: str) -> str:
52
- if not path_str:
53
- return "(no path)"
54
-
55
- # Normalize special schemes but keep segments readable
56
- try:
57
- normalized = get_data_file_path(path_str)
58
- except Exception:
59
- normalized = path_str
60
-
61
- # Keep the path compact – show last 5 segments
62
- parts = [p for p in normalized.replace("\\", "/").split("/") if p]
63
- if len(parts) > 6:
64
- display = ["…"] + parts[-5:]
65
- else:
66
- display = parts
67
-
68
- # Render a single-branch tree
69
- lines = []
70
- for idx, seg in enumerate(display):
71
- prefix = "└── " if idx == 0 else (" " * idx + "└── ")
72
- lines.append(f"{prefix}{seg}")
73
- return "\n".join(lines)
74
-
75
-
76
- async def _get_summary_via_summaries(query_text: str, dataset_id: UUID, top_k: int) -> str:
77
- try:
78
- results = await search(
79
- query_text=query_text,
80
- query_type=SearchType.SUMMARIES,
81
- dataset_ids=[dataset_id],
82
- top_k=top_k,
83
- )
84
- if not results:
85
- return ""
86
- texts: List[str] = []
87
- for r in results[:top_k]:
88
- texts.append(str(r))
89
- return "\n\n".join(texts)
90
- except Exception as e:
91
- logger.error(
92
- "SUMMARIES search failed for '%s' in dataset %s: %s",
93
- query_text,
94
- str(dataset_id),
95
- str(e),
96
- )
97
- return ""
98
-
99
-
100
- async def _generate_questions(file_name: str, summary_text: str) -> List[str]:
101
- prompt = (
102
- "You are an expert analyst. Given a file and its summary, propose 10 diverse, high-signal "
103
- "questions to further explore the file's content, implications, relationships, and gaps. "
104
- "Avoid duplicates; vary depth and angle (overview, details, cross-references, temporal, quality).\n\n"
105
- f"File: {file_name}\n\nSummary:\n{summary_text[:4000]}"
106
- )
107
-
108
- model = await LLMGateway.acreate_structured_output(
109
- text_input=prompt,
110
- system_prompt="Return strictly a JSON with key 'questions' and value as an array of 10 concise strings.",
111
- response_model=QuestionsModel,
112
- )
113
-
114
- # model can be either pydantic model or dict-like, normalize
115
- try:
116
- questions = list(getattr(model, "questions", []))
117
- except Exception:
118
- questions = []
119
-
120
- # Fallback if the tool returned a dict-like
121
- if not questions and isinstance(model, dict):
122
- questions = list(model.get("questions", []) or [])
123
-
124
- # Enforce 10 max
125
- return questions[:10]
126
-
127
-
128
- async def _run_searches_for_question(
129
- question: str, dataset_id: UUID, search_types: List[SearchType], top_k: int
130
- ) -> Dict[str, Union[str, List[dict], List[str]]]:
131
- async def run_one(st: SearchType):
132
- try:
133
- result = await search(
134
- query_text=question,
135
- query_type=st,
136
- dataset_ids=[dataset_id],
137
- top_k=top_k,
138
- )
139
- return st.value, result
140
- except Exception as e:
141
- logger.error("Search failed for type %s: %s", st.value, str(e))
142
- return st.value, [f"Error: {str(e)}"]
143
-
144
- pairs = await asyncio.gather(*[run_one(st) for st in search_types])
145
- return {k: v for k, v in pairs}
146
-
147
-
148
- def _format_results_md(results: Dict[str, Union[str, List[dict], List[str]]]) -> str:
149
- lines: List[str] = []
150
- for st, payload in results.items():
151
- lines.append(f"#### {st}")
152
- if isinstance(payload, list):
153
- # Printed as bullet items; stringify dicts
154
- for item in payload[:5]:
155
- if isinstance(item, dict):
156
- # compact representation
157
- snippet = json.dumps(item, ensure_ascii=False)[:800]
158
- lines.append(f"- {snippet}")
159
- else:
160
- text = str(item)
161
- lines.append(f"- {text[:800]}")
162
- else:
163
- lines.append(str(payload))
164
- lines.append("")
165
- return "\n".join(lines)
166
-
167
-
168
- async def save(
169
- datasets: Optional[Union[List[str], List[UUID]]] = None,
170
- export_root_directory: Optional[str] = None,
171
- user: Optional[User] = None,
172
- # Configurable knobs
173
- max_questions: int = 10,
174
- search_types: Optional[List[Union[str, SearchType]]] = None,
175
- top_k: int = 5,
176
- include_summary: bool = True,
177
- include_ascii_tree: bool = True,
178
- concurrency: int = 4,
179
- timeout: Optional[float] = None,
180
- ) -> Dict[str, str]:
181
- """
182
- Export per-dataset markdown summaries and search insights for each ingested file.
183
-
184
- For every dataset the user can read:
185
- - Create a folder under export_root_directory (or data_root_directory/exports)
186
- - For each data item (file), create a .md containing:
187
- - Summary of the file (from existing TextSummary nodes)
188
- - A small ASCII path tree showing its folder position
189
- - Up to N LLM-generated question ideas (configurable)
190
- - Results of configured Cognee searches per question
191
- Also creates an index.md per dataset with links to files and an optional dataset summary.
192
-
193
- Returns a mapping of dataset_id -> export_directory path.
194
- """
195
- base_config = get_base_config()
196
- export_root = export_root_directory or os.path.join(
197
- base_config.data_root_directory, "memory_export"
198
- )
199
- os.makedirs(export_root, exist_ok=True)
200
-
201
- if user is None:
202
- user = await get_default_user()
203
-
204
- datasets_list = await get_authorized_existing_datasets(datasets, "read", user)
205
- results: Dict[str, str] = {}
206
-
207
- for dataset in datasets_list:
208
- ds_dir = os.path.join(export_root, _dataset_dir_name(dataset))
209
- os.makedirs(ds_dir, exist_ok=True)
210
- results[str(dataset.id)] = ds_dir
211
-
212
- data_items = await get_dataset_data(dataset.id)
213
-
214
- # Normalize search types
215
- if not search_types:
216
- effective_search_types = [
217
- SearchType.GRAPH_COMPLETION,
218
- SearchType.INSIGHTS,
219
- SearchType.CHUNKS,
220
- ]
221
- else:
222
- effective_search_types = []
223
- for st in search_types:
224
- if isinstance(st, SearchType):
225
- effective_search_types.append(st)
226
- else:
227
- try:
228
- effective_search_types.append(SearchType[str(st)])
229
- except Exception:
230
- logger.warning("Unknown search type '%s', skipping", str(st))
231
-
232
- sem = asyncio.Semaphore(max(1, int(concurrency)))
233
- used_names: set[str] = set()
234
- index_entries: List[tuple[str, str]] = []
235
-
236
- async def process_one(data_item):
237
- async with sem:
238
- file_label = getattr(data_item, "name", str(data_item.id))
239
- original_path = getattr(data_item, "original_data_location", None)
240
-
241
- ascii_tree = (
242
- _ascii_path_tree(original_path or file_label) if include_ascii_tree else ""
243
- )
244
-
245
- summary_text = ""
246
- if include_summary:
247
- # Use SUMMARIES search scoped to dataset to derive file summary
248
- file_query = getattr(data_item, "name", str(data_item.id)) or "file"
249
- summary_text = await _get_summary_via_summaries(file_query, dataset.id, top_k)
250
- if not summary_text:
251
- summary_text = "Summary not available."
252
-
253
- if max_questions == 0:
254
- questions = []
255
- else:
256
- questions = await _generate_questions(file_label, summary_text)
257
- if max_questions is not None and max_questions >= 0:
258
- questions = questions[:max_questions]
259
-
260
- async def searches_for_question(q: str):
261
- return await _run_searches_for_question(
262
- q, dataset.id, effective_search_types, top_k
263
- )
264
-
265
- # Run per-question searches concurrently
266
- per_q_results = await asyncio.gather(*[searches_for_question(q) for q in questions])
267
-
268
- # Build markdown content
269
- md_lines = [f"# {file_label}", ""]
270
- if include_ascii_tree:
271
- md_lines.extend(["## Location", "", "```", ascii_tree, "```", ""])
272
- if include_summary:
273
- md_lines.extend(["## Summary", "", summary_text, ""])
274
-
275
- md_lines.append("## Question ideas")
276
- for idx, q in enumerate(questions, start=1):
277
- md_lines.append(f"- {idx}. {q}")
278
- md_lines.append("")
279
-
280
- md_lines.append("## Searches")
281
- md_lines.append("")
282
- for q, per_type in zip(questions, per_q_results):
283
- md_lines.append(f"### Q: {q}")
284
- md_lines.append(_format_results_md(per_type))
285
- md_lines.append("")
286
-
287
- # Write to file (collision-safe)
288
- md_filename = _file_markdown_name(data_item, used_names)
289
- export_path = os.path.join(ds_dir, md_filename)
290
- tmp_path = export_path + ".tmp"
291
- with open(tmp_path, "w", encoding="utf-8") as f:
292
- f.write("\n".join(md_lines))
293
- os.replace(tmp_path, export_path)
294
-
295
- index_entries.append((file_label, md_filename))
296
-
297
- tasks = [asyncio.create_task(process_one(item)) for item in data_items]
298
-
299
- if timeout and timeout > 0:
300
- try:
301
- await asyncio.wait_for(asyncio.gather(*tasks, return_exceptions=True), timeout)
302
- except asyncio.TimeoutError:
303
- logger.error("Save timed out for dataset %s", str(dataset.id))
304
- else:
305
- await asyncio.gather(*tasks, return_exceptions=True)
306
-
307
- # Build dataset index.md with TOC and optional dataset summary via SUMMARIES
308
- try:
309
- index_lines = [f"# Dataset: {_dataset_dir_name(dataset)}", "", "## Files", ""]
310
- for display, fname in sorted(index_entries, key=lambda x: x[0].lower()):
311
- index_lines.append(f"- [{display}]({fname})")
312
-
313
- # Dataset summary section
314
- try:
315
- summaries = await search(
316
- query_text="dataset overview",
317
- query_type=SearchType.SUMMARIES,
318
- dataset_ids=[dataset.id],
319
- top_k=top_k,
320
- )
321
- except Exception as e:
322
- logger.error("Dataset summary search failed: %s", str(e))
323
- summaries = []
324
-
325
- if summaries:
326
- index_lines.extend(["", "## Dataset summary (top summaries)", ""])
327
- for s in summaries[:top_k]:
328
- index_lines.append(f"- {str(s)[:800]}")
329
-
330
- with open(os.path.join(ds_dir, "index.md"), "w", encoding="utf-8") as f:
331
- f.write("\n".join(index_lines))
332
- except Exception as e:
333
- logger.error("Failed to write dataset index for %s: %s", str(dataset.id), str(e))
334
-
335
- return results
@@ -1,116 +0,0 @@
1
- import os
2
- import asyncio
3
- from uuid import uuid4
4
-
5
- import pytest
6
-
7
-
8
- @pytest.mark.asyncio
9
- async def test_save_uses_custom_export_path(tmp_path, monkeypatch):
10
- # Import target after tmp fixtures are ready
11
- from cognee.api.v1.save import save as save_mod
12
-
13
- # Prepare two mock datasets
14
- class Dataset:
15
- def __init__(self, id_, name):
16
- self.id = id_
17
- self.name = name
18
-
19
- ds1 = Dataset(uuid4(), "dataset_alpha")
20
- ds2 = Dataset(uuid4(), "dataset_beta")
21
-
22
- # Mock dataset discovery
23
- async def mock_get_authorized_existing_datasets(datasets, permission_type, user):
24
- return [ds1, ds2]
25
-
26
- monkeypatch.setattr(
27
- save_mod, "get_authorized_existing_datasets", mock_get_authorized_existing_datasets
28
- )
29
-
30
- # Mock data items (with filename collision in ds1)
31
- class DataItem:
32
- def __init__(self, id_, name, original_path=None):
33
- self.id = id_
34
- self.name = name
35
- self.original_data_location = original_path
36
-
37
- ds1_items = [
38
- DataItem(uuid4(), "report.txt", "/root/a/report.txt"),
39
- DataItem(uuid4(), "report.txt", "/root/b/report.txt"), # collision
40
- ]
41
- ds2_items = [
42
- DataItem(uuid4(), "notes.md", "/root/x/notes.md"),
43
- ]
44
-
45
- async def mock_get_dataset_data(dataset_id):
46
- if dataset_id == ds1.id:
47
- return ds1_items
48
- if dataset_id == ds2.id:
49
- return ds2_items
50
- return []
51
-
52
- monkeypatch.setattr(save_mod, "get_dataset_data", mock_get_dataset_data)
53
-
54
- # Mock summary retrieval
55
- async def mock_get_document_summaries_text(data_id: str) -> str:
56
- return "This is a summary."
57
-
58
- monkeypatch.setattr(save_mod, "_get_document_summaries_text", mock_get_document_summaries_text)
59
-
60
- # Mock questions
61
- async def mock_generate_questions(file_name: str, summary_text: str):
62
- return ["Q1?", "Q2?", "Q3?"]
63
-
64
- monkeypatch.setattr(save_mod, "_generate_questions", mock_generate_questions)
65
-
66
- # Mock searches per question
67
- async def mock_run_searches_for_question(question, dataset_id, search_types, top_k):
68
- return {st.value: [f"{question} -> ok"] for st in search_types}
69
-
70
- monkeypatch.setattr(save_mod, "_run_searches_for_question", mock_run_searches_for_question)
71
-
72
- # Use custom export path
73
- export_dir = tmp_path / "my_exports"
74
- export_dir_str = str(export_dir)
75
-
76
- # Run
77
- result = await save_mod.save(
78
- datasets=None,
79
- export_root_directory=export_dir_str,
80
- max_questions=3,
81
- search_types=["GRAPH_COMPLETION", "INSIGHTS", "CHUNKS"],
82
- top_k=2,
83
- include_summary=True,
84
- include_ascii_tree=True,
85
- concurrency=2,
86
- timeout=None,
87
- )
88
-
89
- # Verify returned mapping points to our custom path
90
- assert str(ds1.id) in result and str(ds2.id) in result
91
- assert result[str(ds1.id)].startswith(export_dir_str)
92
- assert result[str(ds2.id)].startswith(export_dir_str)
93
-
94
- # Verify directories and files exist
95
- ds1_dir = result[str(ds1.id)]
96
- ds2_dir = result[str(ds2.id)]
97
-
98
- assert os.path.isdir(ds1_dir)
99
- assert os.path.isdir(ds2_dir)
100
-
101
- # index.md present
102
- assert os.path.isfile(os.path.join(ds1_dir, "index.md"))
103
- assert os.path.isfile(os.path.join(ds2_dir, "index.md"))
104
-
105
- # File markdowns exist; collision handling: two files with similar base
106
- ds1_files = [f for f in os.listdir(ds1_dir) if f.endswith(".md") and f != "index.md"]
107
- assert len(ds1_files) == 2
108
- assert any(f == "report.txt.md" for f in ds1_files)
109
- assert any(f.startswith("report.txt__") and f.endswith(".md") for f in ds1_files)
110
-
111
- # Content sanity: ensure question headers exist in one file
112
- sample_md_path = os.path.join(ds1_dir, ds1_files[0])
113
- with open(sample_md_path, "r", encoding="utf-8") as fh:
114
- content = fh.read()
115
- assert "## Question ideas" in content
116
- assert "## Searches" in content
@@ -1,2 +0,0 @@
1
- [console_scripts]
2
- cognee = cognee.cli._cognee:main
File without changes