cognee 0.5.0.dev0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognee/api/client.py +1 -5
- cognee/api/v1/add/add.py +2 -1
- cognee/api/v1/cognify/cognify.py +24 -16
- cognee/api/v1/cognify/routers/__init__.py +0 -1
- cognee/api/v1/cognify/routers/get_cognify_router.py +3 -1
- cognee/api/v1/datasets/routers/get_datasets_router.py +3 -3
- cognee/api/v1/ontologies/ontologies.py +12 -37
- cognee/api/v1/ontologies/routers/get_ontology_router.py +27 -25
- cognee/api/v1/search/search.py +8 -0
- cognee/api/v1/ui/node_setup.py +360 -0
- cognee/api/v1/ui/npm_utils.py +50 -0
- cognee/api/v1/ui/ui.py +38 -68
- cognee/context_global_variables.py +61 -16
- cognee/eval_framework/Dockerfile +29 -0
- cognee/eval_framework/answer_generation/answer_generation_executor.py +10 -0
- cognee/eval_framework/answer_generation/run_question_answering_module.py +1 -1
- cognee/eval_framework/corpus_builder/task_getters/get_cascade_graph_tasks.py +0 -2
- cognee/eval_framework/corpus_builder/task_getters/get_default_tasks_by_indices.py +4 -4
- cognee/eval_framework/eval_config.py +2 -2
- cognee/eval_framework/modal_run_eval.py +16 -28
- cognee/infrastructure/databases/dataset_database_handler/__init__.py +3 -0
- cognee/infrastructure/databases/dataset_database_handler/dataset_database_handler_interface.py +80 -0
- cognee/infrastructure/databases/dataset_database_handler/supported_dataset_database_handlers.py +18 -0
- cognee/infrastructure/databases/dataset_database_handler/use_dataset_database_handler.py +10 -0
- cognee/infrastructure/databases/graph/config.py +3 -0
- cognee/infrastructure/databases/graph/get_graph_engine.py +1 -0
- cognee/infrastructure/databases/graph/graph_db_interface.py +15 -0
- cognee/infrastructure/databases/graph/kuzu/KuzuDatasetDatabaseHandler.py +81 -0
- cognee/infrastructure/databases/graph/kuzu/adapter.py +228 -0
- cognee/infrastructure/databases/graph/neo4j_driver/Neo4jAuraDevDatasetDatabaseHandler.py +168 -0
- cognee/infrastructure/databases/graph/neo4j_driver/adapter.py +80 -1
- cognee/infrastructure/databases/utils/__init__.py +3 -0
- cognee/infrastructure/databases/utils/get_graph_dataset_database_handler.py +10 -0
- cognee/infrastructure/databases/utils/get_or_create_dataset_database.py +62 -48
- cognee/infrastructure/databases/utils/get_vector_dataset_database_handler.py +10 -0
- cognee/infrastructure/databases/utils/resolve_dataset_database_connection_info.py +30 -0
- cognee/infrastructure/databases/vector/config.py +2 -0
- cognee/infrastructure/databases/vector/create_vector_engine.py +1 -0
- cognee/infrastructure/databases/vector/embeddings/FastembedEmbeddingEngine.py +8 -6
- cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py +9 -7
- cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py +11 -10
- cognee/infrastructure/databases/vector/lancedb/LanceDBAdapter.py +2 -0
- cognee/infrastructure/databases/vector/lancedb/LanceDBDatasetDatabaseHandler.py +50 -0
- cognee/infrastructure/databases/vector/vector_db_interface.py +35 -0
- cognee/infrastructure/files/storage/s3_config.py +2 -0
- cognee/infrastructure/llm/LLMGateway.py +5 -2
- cognee/infrastructure/llm/config.py +35 -0
- cognee/infrastructure/llm/extraction/knowledge_graph/extract_content_graph.py +2 -2
- cognee/infrastructure/llm/structured_output_framework/baml/baml_src/extraction/acreate_structured_output.py +23 -8
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py +17 -16
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/bedrock/__init__.py +5 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/bedrock/adapter.py +153 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py +40 -37
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py +39 -36
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py +19 -1
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/adapter.py +11 -9
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py +23 -21
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py +42 -34
- cognee/memify_pipelines/create_triplet_embeddings.py +53 -0
- cognee/modules/cognify/config.py +2 -0
- cognee/modules/data/deletion/prune_system.py +52 -2
- cognee/modules/data/methods/delete_dataset.py +26 -0
- cognee/modules/engine/models/Triplet.py +9 -0
- cognee/modules/engine/models/__init__.py +1 -0
- cognee/modules/graph/cognee_graph/CogneeGraph.py +85 -37
- cognee/modules/graph/cognee_graph/CogneeGraphElements.py +8 -3
- cognee/modules/memify/memify.py +1 -7
- cognee/modules/pipelines/operations/pipeline.py +18 -2
- cognee/modules/retrieval/__init__.py +1 -1
- cognee/modules/retrieval/graph_completion_context_extension_retriever.py +4 -0
- cognee/modules/retrieval/graph_completion_cot_retriever.py +4 -0
- cognee/modules/retrieval/graph_completion_retriever.py +10 -0
- cognee/modules/retrieval/graph_summary_completion_retriever.py +4 -0
- cognee/modules/retrieval/register_retriever.py +10 -0
- cognee/modules/retrieval/registered_community_retrievers.py +1 -0
- cognee/modules/retrieval/temporal_retriever.py +4 -0
- cognee/modules/retrieval/triplet_retriever.py +182 -0
- cognee/modules/retrieval/utils/brute_force_triplet_search.py +42 -10
- cognee/modules/run_custom_pipeline/run_custom_pipeline.py +8 -1
- cognee/modules/search/methods/get_search_type_tools.py +54 -8
- cognee/modules/search/methods/no_access_control_search.py +4 -0
- cognee/modules/search/methods/search.py +46 -18
- cognee/modules/search/types/SearchType.py +1 -1
- cognee/modules/settings/get_settings.py +19 -0
- cognee/modules/users/methods/get_authenticated_user.py +2 -2
- cognee/modules/users/models/DatasetDatabase.py +15 -3
- cognee/shared/logging_utils.py +4 -0
- cognee/shared/rate_limiting.py +30 -0
- cognee/tasks/documents/__init__.py +0 -1
- cognee/tasks/graph/extract_graph_from_data.py +9 -10
- cognee/tasks/memify/get_triplet_datapoints.py +289 -0
- cognee/tasks/storage/add_data_points.py +142 -2
- cognee/tests/integration/retrieval/test_triplet_retriever.py +84 -0
- cognee/tests/integration/tasks/test_add_data_points.py +139 -0
- cognee/tests/integration/tasks/test_get_triplet_datapoints.py +69 -0
- cognee/tests/test_cognee_server_start.py +2 -4
- cognee/tests/test_conversation_history.py +23 -1
- cognee/tests/test_dataset_database_handler.py +137 -0
- cognee/tests/test_dataset_delete.py +76 -0
- cognee/tests/test_edge_centered_payload.py +170 -0
- cognee/tests/test_pipeline_cache.py +164 -0
- cognee/tests/test_search_db.py +37 -1
- cognee/tests/unit/api/test_ontology_endpoint.py +77 -89
- cognee/tests/unit/infrastructure/llm/test_llm_config.py +46 -0
- cognee/tests/unit/infrastructure/mock_embedding_engine.py +3 -7
- cognee/tests/unit/infrastructure/test_embedding_rate_limiting_realistic.py +0 -5
- cognee/tests/unit/modules/graph/cognee_graph_elements_test.py +2 -2
- cognee/tests/unit/modules/graph/cognee_graph_test.py +406 -0
- cognee/tests/unit/modules/memify_tasks/test_get_triplet_datapoints.py +214 -0
- cognee/tests/unit/modules/retrieval/test_brute_force_triplet_search.py +608 -0
- cognee/tests/unit/modules/retrieval/triplet_retriever_test.py +83 -0
- cognee/tests/unit/modules/search/test_search.py +100 -0
- cognee/tests/unit/tasks/storage/test_add_data_points.py +288 -0
- {cognee-0.5.0.dev0.dist-info → cognee-0.5.1.dist-info}/METADATA +76 -89
- {cognee-0.5.0.dev0.dist-info → cognee-0.5.1.dist-info}/RECORD +119 -97
- {cognee-0.5.0.dev0.dist-info → cognee-0.5.1.dist-info}/WHEEL +1 -1
- cognee/api/v1/cognify/code_graph_pipeline.py +0 -119
- cognee/api/v1/cognify/routers/get_code_pipeline_router.py +0 -90
- cognee/infrastructure/databases/vector/embeddings/embedding_rate_limiter.py +0 -544
- cognee/modules/retrieval/code_retriever.py +0 -232
- cognee/tasks/code/enrich_dependency_graph_checker.py +0 -35
- cognee/tasks/code/get_local_dependencies_checker.py +0 -20
- cognee/tasks/code/get_repo_dependency_graph_checker.py +0 -35
- cognee/tasks/documents/check_permissions_on_dataset.py +0 -26
- cognee/tasks/repo_processor/__init__.py +0 -2
- cognee/tasks/repo_processor/get_local_dependencies.py +0 -335
- cognee/tasks/repo_processor/get_non_code_files.py +0 -158
- cognee/tasks/repo_processor/get_repo_file_dependencies.py +0 -243
- cognee/tests/test_delete_bmw_example.py +0 -60
- {cognee-0.5.0.dev0.dist-info → cognee-0.5.1.dist-info}/entry_points.txt +0 -0
- {cognee-0.5.0.dev0.dist-info → cognee-0.5.1.dist-info}/licenses/LICENSE +0 -0
- {cognee-0.5.0.dev0.dist-info → cognee-0.5.1.dist-info}/licenses/NOTICE.md +0 -0
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"""
|
|
2
|
+
End-to-end integration test for edge-centered payload and triplet embeddings.
|
|
3
|
+
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
import pathlib
|
|
8
|
+
import cognee
|
|
9
|
+
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
10
|
+
from cognee.infrastructure.databases.vector import get_vector_engine
|
|
11
|
+
from cognee.modules.search.types import SearchType
|
|
12
|
+
from cognee.shared.logging_utils import get_logger
|
|
13
|
+
from cognee.modules.ontology.rdf_xml.RDFLibOntologyResolver import RDFLibOntologyResolver
|
|
14
|
+
from cognee.modules.ontology.ontology_config import Config
|
|
15
|
+
|
|
16
|
+
logger = get_logger()
|
|
17
|
+
|
|
18
|
+
text_data = """
|
|
19
|
+
Apple is a technology company that produces the iPhone, iPad, and Mac computers.
|
|
20
|
+
The company is known for its innovative products and ecosystem integration.
|
|
21
|
+
|
|
22
|
+
Microsoft develops the Windows operating system and Office productivity suite.
|
|
23
|
+
They are also major players in cloud computing with Azure.
|
|
24
|
+
|
|
25
|
+
Google created the Android operating system and provides search engine services.
|
|
26
|
+
The company is a leader in artificial intelligence and machine learning.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
ontology_content = """<?xml version="1.0"?>
|
|
30
|
+
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
|
31
|
+
xmlns:owl="http://www.w3.org/2002/07/owl#"
|
|
32
|
+
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
|
|
33
|
+
xmlns="http://example.org/tech#"
|
|
34
|
+
xml:base="http://example.org/tech">
|
|
35
|
+
|
|
36
|
+
<owl:Ontology rdf:about="http://example.org/tech"/>
|
|
37
|
+
|
|
38
|
+
<!-- Classes -->
|
|
39
|
+
<owl:Class rdf:ID="Company"/>
|
|
40
|
+
<owl:Class rdf:ID="TechnologyCompany"/>
|
|
41
|
+
<owl:Class rdf:ID="Product"/>
|
|
42
|
+
<owl:Class rdf:ID="Software"/>
|
|
43
|
+
<owl:Class rdf:ID="Hardware"/>
|
|
44
|
+
<owl:Class rdf:ID="Service"/>
|
|
45
|
+
|
|
46
|
+
<rdf:Description rdf:about="#TechnologyCompany">
|
|
47
|
+
<rdfs:subClassOf rdf:resource="#Company"/>
|
|
48
|
+
<rdfs:comment>A company operating in the technology sector.</rdfs:comment>
|
|
49
|
+
</rdf:Description>
|
|
50
|
+
|
|
51
|
+
<rdf:Description rdf:about="#Software">
|
|
52
|
+
<rdfs:subClassOf rdf:resource="#Product"/>
|
|
53
|
+
<rdfs:comment>Software products and applications.</rdfs:comment>
|
|
54
|
+
</rdf:Description>
|
|
55
|
+
|
|
56
|
+
<rdf:Description rdf:about="#Hardware">
|
|
57
|
+
<rdfs:subClassOf rdf:resource="#Product"/>
|
|
58
|
+
<rdfs:comment>Physical hardware products.</rdfs:comment>
|
|
59
|
+
</rdf:Description>
|
|
60
|
+
|
|
61
|
+
<!-- Individuals -->
|
|
62
|
+
<TechnologyCompany rdf:ID="apple">
|
|
63
|
+
<rdfs:label>Apple</rdfs:label>
|
|
64
|
+
</TechnologyCompany>
|
|
65
|
+
|
|
66
|
+
<TechnologyCompany rdf:ID="microsoft">
|
|
67
|
+
<rdfs:label>Microsoft</rdfs:label>
|
|
68
|
+
</TechnologyCompany>
|
|
69
|
+
|
|
70
|
+
<TechnologyCompany rdf:ID="google">
|
|
71
|
+
<rdfs:label>Google</rdfs:label>
|
|
72
|
+
</TechnologyCompany>
|
|
73
|
+
|
|
74
|
+
<Hardware rdf:ID="iphone">
|
|
75
|
+
<rdfs:label>iPhone</rdfs:label>
|
|
76
|
+
</Hardware>
|
|
77
|
+
|
|
78
|
+
<Software rdf:ID="windows">
|
|
79
|
+
<rdfs:label>Windows</rdfs:label>
|
|
80
|
+
</Software>
|
|
81
|
+
|
|
82
|
+
<Software rdf:ID="android">
|
|
83
|
+
<rdfs:label>Android</rdfs:label>
|
|
84
|
+
</Software>
|
|
85
|
+
|
|
86
|
+
</rdf:RDF>"""
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
async def main():
|
|
90
|
+
data_directory_path = str(
|
|
91
|
+
pathlib.Path(
|
|
92
|
+
os.path.join(
|
|
93
|
+
pathlib.Path(__file__).parent,
|
|
94
|
+
".data_storage/test_edge_centered_payload",
|
|
95
|
+
)
|
|
96
|
+
).resolve()
|
|
97
|
+
)
|
|
98
|
+
cognee_directory_path = str(
|
|
99
|
+
pathlib.Path(
|
|
100
|
+
os.path.join(
|
|
101
|
+
pathlib.Path(__file__).parent,
|
|
102
|
+
".cognee_system/test_edge_centered_payload",
|
|
103
|
+
)
|
|
104
|
+
).resolve()
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
cognee.config.data_root_directory(data_directory_path)
|
|
108
|
+
cognee.config.system_root_directory(cognee_directory_path)
|
|
109
|
+
|
|
110
|
+
dataset_name = "tech_companies"
|
|
111
|
+
|
|
112
|
+
await cognee.prune.prune_data()
|
|
113
|
+
await cognee.prune.prune_system(metadata=True)
|
|
114
|
+
|
|
115
|
+
await cognee.add(data=text_data, dataset_name=dataset_name)
|
|
116
|
+
|
|
117
|
+
import tempfile
|
|
118
|
+
|
|
119
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".owl", delete=False) as f:
|
|
120
|
+
f.write(ontology_content)
|
|
121
|
+
ontology_file_path = f.name
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
logger.info(f"Loading ontology from: {ontology_file_path}")
|
|
125
|
+
config: Config = {
|
|
126
|
+
"ontology_config": {
|
|
127
|
+
"ontology_resolver": RDFLibOntologyResolver(ontology_file=ontology_file_path)
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
await cognee.cognify(datasets=[dataset_name], config=config)
|
|
132
|
+
graph_engine = await get_graph_engine()
|
|
133
|
+
nodes_phase2, edges_phase2 = await graph_engine.get_graph_data()
|
|
134
|
+
|
|
135
|
+
vector_engine = get_vector_engine()
|
|
136
|
+
triplets_phase2 = await vector_engine.search(
|
|
137
|
+
query_text="technology", limit=None, collection_name="Triplet_text"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
assert len(triplets_phase2) == len(edges_phase2), (
|
|
141
|
+
f"Triplet embeddings and number of edges do not match. Vector db contains {len(triplets_phase2)} edge triplets while graph db contains {len(edges_phase2)} edges."
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
search_results_phase2 = await cognee.search(
|
|
145
|
+
query_type=SearchType.TRIPLET_COMPLETION,
|
|
146
|
+
query_text="What products does Apple make?",
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
assert search_results_phase2 is not None, (
|
|
150
|
+
"Search should return results for triplet embeddings in simple ontology use case."
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
finally:
|
|
154
|
+
if os.path.exists(ontology_file_path):
|
|
155
|
+
os.unlink(ontology_file_path)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
if __name__ == "__main__":
|
|
159
|
+
import asyncio
|
|
160
|
+
from cognee.shared.logging_utils import setup_logging
|
|
161
|
+
|
|
162
|
+
setup_logging()
|
|
163
|
+
|
|
164
|
+
loop = asyncio.new_event_loop()
|
|
165
|
+
asyncio.set_event_loop(loop)
|
|
166
|
+
try:
|
|
167
|
+
loop.run_until_complete(main())
|
|
168
|
+
finally:
|
|
169
|
+
loop.run_until_complete(loop.shutdown_asyncgens())
|
|
170
|
+
loop.close()
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test suite for the pipeline_cache feature in Cognee pipelines.
|
|
3
|
+
|
|
4
|
+
This module tests the behavior of the `pipeline_cache` parameter which controls
|
|
5
|
+
whether a pipeline should skip re-execution when it has already been completed
|
|
6
|
+
for the same dataset.
|
|
7
|
+
|
|
8
|
+
Architecture Overview:
|
|
9
|
+
---------------------
|
|
10
|
+
The pipeline_cache mechanism works at the dataset level:
|
|
11
|
+
1. When a pipeline runs, it logs its status (INITIATED -> STARTED -> COMPLETED)
|
|
12
|
+
2. Before each run, `check_pipeline_run_qualification()` checks the pipeline status
|
|
13
|
+
3. If `use_pipeline_cache=True` and status is COMPLETED/STARTED, the pipeline skips
|
|
14
|
+
4. If `use_pipeline_cache=False`, the pipeline always re-executes regardless of status
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import pytest
|
|
18
|
+
|
|
19
|
+
import cognee
|
|
20
|
+
from cognee.modules.pipelines.tasks.task import Task
|
|
21
|
+
from cognee.modules.pipelines import run_pipeline
|
|
22
|
+
from cognee.modules.users.methods import get_default_user
|
|
23
|
+
|
|
24
|
+
from cognee.modules.pipelines.layers.reset_dataset_pipeline_run_status import (
|
|
25
|
+
reset_dataset_pipeline_run_status,
|
|
26
|
+
)
|
|
27
|
+
from cognee.infrastructure.databases.relational import create_db_and_tables
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ExecutionCounter:
|
|
31
|
+
"""Helper class to track task execution counts."""
|
|
32
|
+
|
|
33
|
+
def __init__(self):
|
|
34
|
+
self.count = 0
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def create_counting_task(data, counter: ExecutionCounter):
|
|
38
|
+
"""Create a task that increments a counter from the ExecutionCounter instance when executed."""
|
|
39
|
+
counter.count += 1
|
|
40
|
+
return counter
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class TestPipelineCache:
|
|
44
|
+
"""Tests for basic pipeline_cache on/off behavior."""
|
|
45
|
+
|
|
46
|
+
@pytest.mark.asyncio
|
|
47
|
+
async def test_pipeline_cache_off_allows_reexecution(self):
|
|
48
|
+
"""
|
|
49
|
+
Test that with use_pipeline_cache=False, the pipeline re-executes
|
|
50
|
+
even when it has already completed for the dataset.
|
|
51
|
+
|
|
52
|
+
Expected behavior:
|
|
53
|
+
- First run: Pipeline executes fully, task runs once
|
|
54
|
+
- Second run: Pipeline executes again, task runs again (total: 2 times)
|
|
55
|
+
"""
|
|
56
|
+
await cognee.prune.prune_data()
|
|
57
|
+
await cognee.prune.prune_system(metadata=True)
|
|
58
|
+
await create_db_and_tables()
|
|
59
|
+
|
|
60
|
+
counter = ExecutionCounter()
|
|
61
|
+
user = await get_default_user()
|
|
62
|
+
|
|
63
|
+
tasks = [Task(create_counting_task, counter=counter)]
|
|
64
|
+
|
|
65
|
+
# First run
|
|
66
|
+
pipeline_results_1 = []
|
|
67
|
+
async for result in run_pipeline(
|
|
68
|
+
tasks=tasks,
|
|
69
|
+
datasets="test_dataset_cache_off",
|
|
70
|
+
data=["sample data"], # Data is necessary to trigger processing
|
|
71
|
+
user=user,
|
|
72
|
+
pipeline_name="test_cache_off_pipeline",
|
|
73
|
+
use_pipeline_cache=False,
|
|
74
|
+
):
|
|
75
|
+
pipeline_results_1.append(result)
|
|
76
|
+
|
|
77
|
+
first_run_count = counter.count
|
|
78
|
+
assert first_run_count >= 1, "Task should have executed at least once on first run"
|
|
79
|
+
|
|
80
|
+
# Second run with pipeline_cache=False
|
|
81
|
+
pipeline_results_2 = []
|
|
82
|
+
async for result in run_pipeline(
|
|
83
|
+
tasks=tasks,
|
|
84
|
+
datasets="test_dataset_cache_off",
|
|
85
|
+
data=["sample data"], # Data is necessary to trigger processing
|
|
86
|
+
user=user,
|
|
87
|
+
pipeline_name="test_cache_off_pipeline",
|
|
88
|
+
use_pipeline_cache=False,
|
|
89
|
+
):
|
|
90
|
+
pipeline_results_2.append(result)
|
|
91
|
+
|
|
92
|
+
second_run_count = counter.count
|
|
93
|
+
assert second_run_count > first_run_count, (
|
|
94
|
+
f"With pipeline_cache=False, task should re-execute. "
|
|
95
|
+
f"First run: {first_run_count}, After second run: {second_run_count}"
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
@pytest.mark.asyncio
|
|
99
|
+
async def test_reset_pipeline_status_allows_reexecution_with_cache(self):
|
|
100
|
+
"""
|
|
101
|
+
Test that resetting pipeline status allows re-execution even with
|
|
102
|
+
pipeline_cache=True.
|
|
103
|
+
"""
|
|
104
|
+
await cognee.prune.prune_data()
|
|
105
|
+
await cognee.prune.prune_system(metadata=True)
|
|
106
|
+
await create_db_and_tables()
|
|
107
|
+
|
|
108
|
+
counter = ExecutionCounter()
|
|
109
|
+
user = await get_default_user()
|
|
110
|
+
dataset_name = "reset_status_test"
|
|
111
|
+
pipeline_name = "test_reset_pipeline"
|
|
112
|
+
|
|
113
|
+
tasks = [Task(create_counting_task, counter=counter)]
|
|
114
|
+
|
|
115
|
+
# First run
|
|
116
|
+
pipeline_result = []
|
|
117
|
+
async for result in run_pipeline(
|
|
118
|
+
tasks=tasks,
|
|
119
|
+
datasets=dataset_name,
|
|
120
|
+
user=user,
|
|
121
|
+
data=["sample data"], # Data is necessary to trigger processing
|
|
122
|
+
pipeline_name=pipeline_name,
|
|
123
|
+
use_pipeline_cache=True,
|
|
124
|
+
):
|
|
125
|
+
pipeline_result.append(result)
|
|
126
|
+
|
|
127
|
+
first_run_count = counter.count
|
|
128
|
+
assert first_run_count >= 1
|
|
129
|
+
|
|
130
|
+
# Second run without reset - should skip
|
|
131
|
+
async for _ in run_pipeline(
|
|
132
|
+
tasks=tasks,
|
|
133
|
+
datasets=dataset_name,
|
|
134
|
+
user=user,
|
|
135
|
+
data=["sample data"], # Data is necessary to trigger processing
|
|
136
|
+
pipeline_name=pipeline_name,
|
|
137
|
+
use_pipeline_cache=True,
|
|
138
|
+
):
|
|
139
|
+
pass
|
|
140
|
+
|
|
141
|
+
after_second_run = counter.count
|
|
142
|
+
assert after_second_run == first_run_count, "Should have skipped due to cache"
|
|
143
|
+
|
|
144
|
+
# Reset the pipeline status
|
|
145
|
+
await reset_dataset_pipeline_run_status(
|
|
146
|
+
pipeline_result[0].dataset_id, user, pipeline_names=[pipeline_name]
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Third run after reset - should execute
|
|
150
|
+
async for _ in run_pipeline(
|
|
151
|
+
tasks=tasks,
|
|
152
|
+
datasets=dataset_name,
|
|
153
|
+
user=user,
|
|
154
|
+
data=["sample data"], # Data is necessary to trigger processing
|
|
155
|
+
pipeline_name=pipeline_name,
|
|
156
|
+
use_pipeline_cache=True,
|
|
157
|
+
):
|
|
158
|
+
pass
|
|
159
|
+
|
|
160
|
+
after_reset_run = counter.count
|
|
161
|
+
assert after_reset_run > after_second_run, (
|
|
162
|
+
f"After reset, pipeline should re-execute. "
|
|
163
|
+
f"Before reset: {after_second_run}, After reset run: {after_reset_run}"
|
|
164
|
+
)
|
cognee/tests/test_search_db.py
CHANGED
|
@@ -2,6 +2,7 @@ import pathlib
|
|
|
2
2
|
import os
|
|
3
3
|
import cognee
|
|
4
4
|
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
5
|
+
from cognee.infrastructure.databases.vector import get_vector_engine
|
|
5
6
|
from cognee.modules.graph.cognee_graph.CogneeGraphElements import Edge
|
|
6
7
|
from cognee.modules.graph.utils import resolve_edges_to_text
|
|
7
8
|
from cognee.modules.retrieval.graph_completion_retriever import GraphCompletionRetriever
|
|
@@ -12,8 +13,10 @@ from cognee.modules.retrieval.graph_completion_cot_retriever import GraphComplet
|
|
|
12
13
|
from cognee.modules.retrieval.graph_summary_completion_retriever import (
|
|
13
14
|
GraphSummaryCompletionRetriever,
|
|
14
15
|
)
|
|
16
|
+
from cognee.modules.retrieval.triplet_retriever import TripletRetriever
|
|
15
17
|
from cognee.shared.logging_utils import get_logger
|
|
16
18
|
from cognee.modules.search.types import SearchType
|
|
19
|
+
from cognee.modules.users.methods import get_default_user
|
|
17
20
|
from collections import Counter
|
|
18
21
|
|
|
19
22
|
logger = get_logger()
|
|
@@ -37,6 +40,23 @@ async def main():
|
|
|
37
40
|
|
|
38
41
|
await cognee.cognify([dataset_name])
|
|
39
42
|
|
|
43
|
+
user = await get_default_user()
|
|
44
|
+
from cognee.memify_pipelines.create_triplet_embeddings import create_triplet_embeddings
|
|
45
|
+
|
|
46
|
+
await create_triplet_embeddings(user=user, dataset=dataset_name, triplets_batch_size=5)
|
|
47
|
+
|
|
48
|
+
graph_engine = await get_graph_engine()
|
|
49
|
+
nodes, edges = await graph_engine.get_graph_data()
|
|
50
|
+
|
|
51
|
+
vector_engine = get_vector_engine()
|
|
52
|
+
collection = await vector_engine.search(
|
|
53
|
+
query_text="Test", limit=None, collection_name="Triplet_text"
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
assert len(edges) == len(collection), (
|
|
57
|
+
f"Expected {len(edges)} edges but got {len(collection)} in Triplet_text collection"
|
|
58
|
+
)
|
|
59
|
+
|
|
40
60
|
context_gk = await GraphCompletionRetriever().get_context(
|
|
41
61
|
query="Next to which country is Germany located?"
|
|
42
62
|
)
|
|
@@ -49,6 +69,9 @@ async def main():
|
|
|
49
69
|
context_gk_sum = await GraphSummaryCompletionRetriever().get_context(
|
|
50
70
|
query="Next to which country is Germany located?"
|
|
51
71
|
)
|
|
72
|
+
context_triplet = await TripletRetriever().get_context(
|
|
73
|
+
query="Next to which country is Germany located?"
|
|
74
|
+
)
|
|
52
75
|
|
|
53
76
|
for name, context in [
|
|
54
77
|
("GraphCompletionRetriever", context_gk),
|
|
@@ -65,6 +88,13 @@ async def main():
|
|
|
65
88
|
f"{name}: Context did not contain 'germany' or 'netherlands'; got: {context!r}"
|
|
66
89
|
)
|
|
67
90
|
|
|
91
|
+
assert isinstance(context_triplet, str), "TripletRetriever: Context should be a string"
|
|
92
|
+
assert len(context_triplet) > 0, "TripletRetriever: Context should not be empty"
|
|
93
|
+
lower_triplet = context_triplet.lower()
|
|
94
|
+
assert "germany" in lower_triplet or "netherlands" in lower_triplet, (
|
|
95
|
+
f"TripletRetriever: Context did not contain 'germany' or 'netherlands'; got: {context_triplet!r}"
|
|
96
|
+
)
|
|
97
|
+
|
|
68
98
|
triplets_gk = await GraphCompletionRetriever().get_triplets(
|
|
69
99
|
query="Next to which country is Germany located?"
|
|
70
100
|
)
|
|
@@ -129,6 +159,11 @@ async def main():
|
|
|
129
159
|
query_text="Next to which country is Germany located?",
|
|
130
160
|
save_interaction=True,
|
|
131
161
|
)
|
|
162
|
+
completion_triplet = await cognee.search(
|
|
163
|
+
query_type=SearchType.TRIPLET_COMPLETION,
|
|
164
|
+
query_text="Next to which country is Germany located?",
|
|
165
|
+
save_interaction=True,
|
|
166
|
+
)
|
|
132
167
|
|
|
133
168
|
await cognee.search(
|
|
134
169
|
query_type=SearchType.FEEDBACK,
|
|
@@ -141,6 +176,7 @@ async def main():
|
|
|
141
176
|
("GRAPH_COMPLETION_COT", completion_cot),
|
|
142
177
|
("GRAPH_COMPLETION_CONTEXT_EXTENSION", completion_ext),
|
|
143
178
|
("GRAPH_SUMMARY_COMPLETION", completion_sum),
|
|
179
|
+
("TRIPLET_COMPLETION", completion_triplet),
|
|
144
180
|
]:
|
|
145
181
|
assert isinstance(search_results, list), f"{name}: should return a list"
|
|
146
182
|
assert len(search_results) == 1, (
|
|
@@ -168,7 +204,7 @@ async def main():
|
|
|
168
204
|
|
|
169
205
|
# Assert there are exactly 4 CogneeUserInteraction nodes.
|
|
170
206
|
assert type_counts.get("CogneeUserInteraction", 0) == 4, (
|
|
171
|
-
f"Expected exactly four
|
|
207
|
+
f"Expected exactly four CogneeUserInteraction nodes, but found {type_counts.get('CogneeUserInteraction', 0)}"
|
|
172
208
|
)
|
|
173
209
|
|
|
174
210
|
# Assert there is exactly two CogneeUserFeedback nodes.
|