cognee 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognee/__init__.py +1 -0
- cognee/api/health.py +2 -12
- cognee/api/v1/add/add.py +46 -6
- cognee/api/v1/add/routers/get_add_router.py +5 -1
- cognee/api/v1/cognify/cognify.py +29 -9
- cognee/api/v1/datasets/datasets.py +11 -0
- cognee/api/v1/responses/default_tools.py +0 -1
- cognee/api/v1/responses/dispatch_function.py +1 -1
- cognee/api/v1/responses/routers/default_tools.py +0 -1
- cognee/api/v1/search/search.py +11 -9
- cognee/api/v1/settings/routers/get_settings_router.py +7 -1
- cognee/api/v1/ui/ui.py +47 -16
- cognee/api/v1/update/routers/get_update_router.py +1 -1
- cognee/api/v1/update/update.py +3 -3
- cognee/cli/_cognee.py +61 -10
- cognee/cli/commands/add_command.py +3 -3
- cognee/cli/commands/cognify_command.py +3 -3
- cognee/cli/commands/config_command.py +9 -7
- cognee/cli/commands/delete_command.py +3 -3
- cognee/cli/commands/search_command.py +3 -7
- cognee/cli/config.py +0 -1
- cognee/context_global_variables.py +5 -0
- cognee/exceptions/exceptions.py +1 -1
- cognee/infrastructure/databases/cache/__init__.py +2 -0
- cognee/infrastructure/databases/cache/cache_db_interface.py +79 -0
- cognee/infrastructure/databases/cache/config.py +44 -0
- cognee/infrastructure/databases/cache/get_cache_engine.py +67 -0
- cognee/infrastructure/databases/cache/redis/RedisAdapter.py +243 -0
- cognee/infrastructure/databases/exceptions/__init__.py +1 -0
- cognee/infrastructure/databases/exceptions/exceptions.py +18 -2
- cognee/infrastructure/databases/graph/get_graph_engine.py +1 -1
- cognee/infrastructure/databases/graph/graph_db_interface.py +5 -0
- cognee/infrastructure/databases/graph/kuzu/adapter.py +67 -44
- cognee/infrastructure/databases/graph/neo4j_driver/adapter.py +13 -3
- cognee/infrastructure/databases/graph/neo4j_driver/deadlock_retry.py +1 -1
- cognee/infrastructure/databases/graph/neptune_driver/neptune_utils.py +1 -1
- cognee/infrastructure/databases/relational/sqlalchemy/SqlAlchemyAdapter.py +1 -1
- cognee/infrastructure/databases/vector/embeddings/FastembedEmbeddingEngine.py +21 -3
- cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py +17 -10
- cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py +17 -4
- cognee/infrastructure/databases/vector/embeddings/config.py +2 -3
- cognee/infrastructure/databases/vector/exceptions/exceptions.py +1 -1
- cognee/infrastructure/databases/vector/lancedb/LanceDBAdapter.py +0 -1
- cognee/infrastructure/files/exceptions.py +1 -1
- cognee/infrastructure/files/storage/LocalFileStorage.py +9 -9
- cognee/infrastructure/files/storage/S3FileStorage.py +11 -11
- cognee/infrastructure/files/utils/guess_file_type.py +6 -0
- cognee/infrastructure/llm/prompts/search_type_selector_prompt.txt +0 -5
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py +19 -9
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py +17 -5
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py +17 -5
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py +32 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/__init__.py +0 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/adapter.py +109 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py +33 -8
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py +40 -18
- cognee/infrastructure/loaders/LoaderEngine.py +27 -7
- cognee/infrastructure/loaders/external/__init__.py +7 -0
- cognee/infrastructure/loaders/external/advanced_pdf_loader.py +2 -8
- cognee/infrastructure/loaders/external/beautiful_soup_loader.py +310 -0
- cognee/infrastructure/loaders/supported_loaders.py +7 -0
- cognee/modules/data/exceptions/exceptions.py +1 -1
- cognee/modules/data/methods/__init__.py +3 -0
- cognee/modules/data/methods/get_dataset_data.py +4 -1
- cognee/modules/data/methods/has_dataset_data.py +21 -0
- cognee/modules/engine/models/TableRow.py +0 -1
- cognee/modules/ingestion/save_data_to_file.py +9 -2
- cognee/modules/pipelines/exceptions/exceptions.py +1 -1
- cognee/modules/pipelines/operations/pipeline.py +12 -1
- cognee/modules/pipelines/operations/run_tasks.py +25 -197
- cognee/modules/pipelines/operations/run_tasks_data_item.py +260 -0
- cognee/modules/pipelines/operations/run_tasks_distributed.py +121 -38
- cognee/modules/retrieval/EntityCompletionRetriever.py +48 -8
- cognee/modules/retrieval/base_graph_retriever.py +3 -1
- cognee/modules/retrieval/base_retriever.py +3 -1
- cognee/modules/retrieval/chunks_retriever.py +5 -1
- cognee/modules/retrieval/code_retriever.py +20 -2
- cognee/modules/retrieval/completion_retriever.py +50 -9
- cognee/modules/retrieval/cypher_search_retriever.py +11 -1
- cognee/modules/retrieval/graph_completion_context_extension_retriever.py +47 -8
- cognee/modules/retrieval/graph_completion_cot_retriever.py +32 -1
- cognee/modules/retrieval/graph_completion_retriever.py +54 -10
- cognee/modules/retrieval/lexical_retriever.py +20 -2
- cognee/modules/retrieval/natural_language_retriever.py +10 -1
- cognee/modules/retrieval/summaries_retriever.py +5 -1
- cognee/modules/retrieval/temporal_retriever.py +62 -10
- cognee/modules/retrieval/user_qa_feedback.py +3 -2
- cognee/modules/retrieval/utils/completion.py +5 -0
- cognee/modules/retrieval/utils/description_to_codepart_search.py +1 -1
- cognee/modules/retrieval/utils/session_cache.py +156 -0
- cognee/modules/search/methods/get_search_type_tools.py +0 -5
- cognee/modules/search/methods/no_access_control_search.py +12 -1
- cognee/modules/search/methods/search.py +34 -2
- cognee/modules/search/types/SearchType.py +0 -1
- cognee/modules/settings/get_settings.py +23 -0
- cognee/modules/users/methods/get_authenticated_user.py +3 -1
- cognee/modules/users/methods/get_default_user.py +1 -6
- cognee/modules/users/roles/methods/create_role.py +2 -2
- cognee/modules/users/tenants/methods/create_tenant.py +2 -2
- cognee/shared/exceptions/exceptions.py +1 -1
- cognee/tasks/codingagents/coding_rule_associations.py +1 -2
- cognee/tasks/documents/exceptions/exceptions.py +1 -1
- cognee/tasks/graph/extract_graph_from_data.py +2 -0
- cognee/tasks/ingestion/data_item_to_text_file.py +3 -3
- cognee/tasks/ingestion/ingest_data.py +11 -5
- cognee/tasks/ingestion/save_data_item_to_storage.py +12 -1
- cognee/tasks/storage/add_data_points.py +3 -10
- cognee/tasks/storage/index_data_points.py +19 -14
- cognee/tasks/storage/index_graph_edges.py +25 -11
- cognee/tasks/web_scraper/__init__.py +34 -0
- cognee/tasks/web_scraper/config.py +26 -0
- cognee/tasks/web_scraper/default_url_crawler.py +446 -0
- cognee/tasks/web_scraper/models.py +46 -0
- cognee/tasks/web_scraper/types.py +4 -0
- cognee/tasks/web_scraper/utils.py +142 -0
- cognee/tasks/web_scraper/web_scraper_task.py +396 -0
- cognee/tests/cli_tests/cli_unit_tests/test_cli_utils.py +0 -1
- cognee/tests/integration/web_url_crawler/test_default_url_crawler.py +13 -0
- cognee/tests/integration/web_url_crawler/test_tavily_crawler.py +19 -0
- cognee/tests/integration/web_url_crawler/test_url_adding_e2e.py +344 -0
- cognee/tests/subprocesses/reader.py +25 -0
- cognee/tests/subprocesses/simple_cognify_1.py +31 -0
- cognee/tests/subprocesses/simple_cognify_2.py +31 -0
- cognee/tests/subprocesses/writer.py +32 -0
- cognee/tests/tasks/descriptive_metrics/metrics_test_utils.py +0 -2
- cognee/tests/tasks/descriptive_metrics/neo4j_metrics_test.py +8 -3
- cognee/tests/tasks/entity_extraction/entity_extraction_test.py +89 -0
- cognee/tests/tasks/web_scraping/web_scraping_test.py +172 -0
- cognee/tests/test_add_docling_document.py +56 -0
- cognee/tests/test_chromadb.py +7 -11
- cognee/tests/test_concurrent_subprocess_access.py +76 -0
- cognee/tests/test_conversation_history.py +240 -0
- cognee/tests/test_kuzu.py +27 -15
- cognee/tests/test_lancedb.py +7 -11
- cognee/tests/test_library.py +32 -2
- cognee/tests/test_neo4j.py +24 -16
- cognee/tests/test_neptune_analytics_vector.py +7 -11
- cognee/tests/test_permissions.py +9 -13
- cognee/tests/test_pgvector.py +4 -4
- cognee/tests/test_remote_kuzu.py +8 -11
- cognee/tests/test_s3_file_storage.py +1 -1
- cognee/tests/test_search_db.py +6 -8
- cognee/tests/unit/infrastructure/databases/cache/test_cache_config.py +89 -0
- cognee/tests/unit/modules/retrieval/conversation_history_test.py +154 -0
- {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/METADATA +21 -6
- {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/RECORD +155 -126
- {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/entry_points.txt +1 -0
- distributed/Dockerfile +0 -3
- distributed/entrypoint.py +21 -9
- distributed/signal.py +5 -0
- distributed/workers/data_point_saving_worker.py +64 -34
- distributed/workers/graph_saving_worker.py +71 -47
- cognee/infrastructure/databases/graph/memgraph/memgraph_adapter.py +0 -1116
- cognee/modules/retrieval/insights_retriever.py +0 -133
- cognee/tests/test_memgraph.py +0 -109
- cognee/tests/unit/modules/retrieval/insights_retriever_test.py +0 -251
- {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/WHEEL +0 -0
- {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/licenses/LICENSE +0 -0
- {cognee-0.3.6.dist-info → cognee-0.3.7.dist-info}/licenses/NOTICE.md +0 -0
cognee/tests/test_lancedb.py
CHANGED
|
@@ -131,20 +131,16 @@ async def main():
|
|
|
131
131
|
dataset_name_1 = "natural_language"
|
|
132
132
|
dataset_name_2 = "quantum"
|
|
133
133
|
|
|
134
|
-
|
|
134
|
+
explanation_file_path_nlp = os.path.join(
|
|
135
135
|
pathlib.Path(__file__).parent, "test_data/Natural_language_processing.txt"
|
|
136
136
|
)
|
|
137
|
-
await cognee.add([
|
|
137
|
+
await cognee.add([explanation_file_path_nlp], dataset_name_1)
|
|
138
138
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
The basic unit of information in quantum computing is the qubit, similar to the bit in traditional digital electronics. Unlike a classical bit, a qubit can exist in a superposition of its two "basis" states. When measuring a qubit, the result is a probabilistic output of a classical bit, therefore making quantum computers nondeterministic in general. If a quantum computer manipulates the qubit in a particular way, wave interference effects can amplify the desired measurement results. The design of quantum algorithms involves creating procedures that allow a quantum computer to perform calculations efficiently and quickly.
|
|
143
|
-
Physically engineering high-quality qubits has proven challenging. If a physical qubit is not sufficiently isolated from its environment, it suffers from quantum decoherence, introducing noise into calculations. Paradoxically, perfectly isolating qubits is also undesirable because quantum computations typically need to initialize qubits, perform controlled qubit interactions, and measure the resulting quantum states. Each of those operations introduces errors and suffers from noise, and such inaccuracies accumulate.
|
|
144
|
-
In principle, a non-quantum (classical) computer can solve the same computational problems as a quantum computer, given enough time. Quantum advantage comes in the form of time complexity rather than computability, and quantum complexity theory shows that some quantum algorithms for carefully selected tasks require exponentially fewer computational steps than the best known non-quantum algorithms. Such tasks can in theory be solved on a large-scale quantum computer whereas classical computers would not finish computations in any reasonable amount of time. However, quantum speedup is not universal or even typical across computational tasks, since basic tasks such as sorting are proven to not allow any asymptotic quantum speedup. Claims of quantum supremacy have drawn significant attention to the discipline, but are demonstrated on contrived tasks, while near-term practical use cases remain limited.
|
|
145
|
-
"""
|
|
139
|
+
explanation_file_path_quantum = os.path.join(
|
|
140
|
+
pathlib.Path(__file__).parent, "test_data/Quantum_computers.txt"
|
|
141
|
+
)
|
|
146
142
|
|
|
147
|
-
await cognee.add([
|
|
143
|
+
await cognee.add([explanation_file_path_quantum], dataset_name_2)
|
|
148
144
|
|
|
149
145
|
await cognee.cognify([dataset_name_2, dataset_name_1])
|
|
150
146
|
|
|
@@ -157,7 +153,7 @@ async def main():
|
|
|
157
153
|
random_node_name = random_node.payload["text"]
|
|
158
154
|
|
|
159
155
|
search_results = await cognee.search(
|
|
160
|
-
query_type=SearchType.
|
|
156
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
|
|
161
157
|
)
|
|
162
158
|
assert len(search_results) != 0, "The search results list is empty."
|
|
163
159
|
print("\n\nExtracted sentences are:\n")
|
cognee/tests/test_library.py
CHANGED
|
@@ -6,6 +6,7 @@ from cognee.modules.search.operations import get_history
|
|
|
6
6
|
from cognee.modules.users.methods import get_default_user
|
|
7
7
|
from cognee.shared.logging_utils import get_logger
|
|
8
8
|
from cognee.modules.search.types import SearchType
|
|
9
|
+
from cognee import update
|
|
9
10
|
|
|
10
11
|
logger = get_logger()
|
|
11
12
|
|
|
@@ -42,7 +43,7 @@ async def main():
|
|
|
42
43
|
|
|
43
44
|
await cognee.add([text], dataset_name)
|
|
44
45
|
|
|
45
|
-
await cognee.cognify([dataset_name])
|
|
46
|
+
cognify_run_info = await cognee.cognify([dataset_name])
|
|
46
47
|
|
|
47
48
|
from cognee.infrastructure.databases.vector import get_vector_engine
|
|
48
49
|
|
|
@@ -51,7 +52,7 @@ async def main():
|
|
|
51
52
|
random_node_name = random_node.payload["text"]
|
|
52
53
|
|
|
53
54
|
search_results = await cognee.search(
|
|
54
|
-
query_type=SearchType.
|
|
55
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
|
|
55
56
|
)
|
|
56
57
|
assert len(search_results) != 0, "The search results list is empty."
|
|
57
58
|
print("\n\nExtracted sentences are:\n")
|
|
@@ -77,6 +78,35 @@ async def main():
|
|
|
77
78
|
|
|
78
79
|
assert len(history) == 6, "Search history is not correct."
|
|
79
80
|
|
|
81
|
+
# Test updating of documents
|
|
82
|
+
# Get Pipeline Run object
|
|
83
|
+
pipeline_run_obj = list(cognify_run_info.values())[0]
|
|
84
|
+
for data_item in pipeline_run_obj.data_ingestion_info:
|
|
85
|
+
# Update all documents in dataset to only contain Mark and Cindy information
|
|
86
|
+
await update(
|
|
87
|
+
dataset_id=pipeline_run_obj.dataset_id,
|
|
88
|
+
data_id=data_item["data_id"],
|
|
89
|
+
data="Mark met with Cindy at a cafe.",
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
search_results = await cognee.search(
|
|
93
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text="What information do you contain?"
|
|
94
|
+
)
|
|
95
|
+
assert "Mark" in search_results[0], (
|
|
96
|
+
"Failed to update document, no mention of Mark in search results"
|
|
97
|
+
)
|
|
98
|
+
assert "Cindy" in search_results[0], (
|
|
99
|
+
"Failed to update document, no mention of Cindy in search results"
|
|
100
|
+
)
|
|
101
|
+
assert "Artificial intelligence" not in search_results[0], (
|
|
102
|
+
"Failed to update document, Artificial intelligence still mentioned in search results"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Test visualization
|
|
106
|
+
from cognee import visualize_graph
|
|
107
|
+
|
|
108
|
+
await visualize_graph()
|
|
109
|
+
|
|
80
110
|
# Assert local data files are cleaned properly
|
|
81
111
|
await cognee.prune.prune_data()
|
|
82
112
|
data_root_directory = get_storage_config()["data_root_directory"]
|
cognee/tests/test_neo4j.py
CHANGED
|
@@ -32,23 +32,34 @@ async def main():
|
|
|
32
32
|
|
|
33
33
|
dataset_name = "cs_explanations"
|
|
34
34
|
|
|
35
|
-
|
|
35
|
+
explanation_file_path_nlp = os.path.join(
|
|
36
36
|
pathlib.Path(__file__).parent, "test_data/Natural_language_processing.txt"
|
|
37
37
|
)
|
|
38
|
-
|
|
38
|
+
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
39
|
+
|
|
40
|
+
graph_engine = await get_graph_engine()
|
|
41
|
+
|
|
42
|
+
is_empty = await graph_engine.is_empty()
|
|
39
43
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
44
|
+
assert is_empty, "Graph has to be empty"
|
|
45
|
+
|
|
46
|
+
await cognee.add([explanation_file_path_nlp], dataset_name)
|
|
47
|
+
|
|
48
|
+
explanation_file_path_quantum = os.path.join(
|
|
49
|
+
pathlib.Path(__file__).parent, "test_data/Quantum_computers.txt"
|
|
50
|
+
)
|
|
47
51
|
|
|
48
|
-
await cognee.add([
|
|
52
|
+
await cognee.add([explanation_file_path_quantum], dataset_name)
|
|
53
|
+
is_empty = await graph_engine.is_empty()
|
|
54
|
+
|
|
55
|
+
assert is_empty, "Graph has to be empty before cognify"
|
|
49
56
|
|
|
50
57
|
await cognee.cognify([dataset_name])
|
|
51
58
|
|
|
59
|
+
is_empty = await graph_engine.is_empty()
|
|
60
|
+
|
|
61
|
+
assert not is_empty, "Graph shouldn't be empty"
|
|
62
|
+
|
|
52
63
|
from cognee.infrastructure.databases.vector import get_vector_engine
|
|
53
64
|
|
|
54
65
|
vector_engine = get_vector_engine()
|
|
@@ -56,7 +67,7 @@ async def main():
|
|
|
56
67
|
random_node_name = random_node.payload["text"]
|
|
57
68
|
|
|
58
69
|
search_results = await cognee.search(
|
|
59
|
-
query_type=SearchType.
|
|
70
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
|
|
60
71
|
)
|
|
61
72
|
assert len(search_results) != 0, "The search results list is empty."
|
|
62
73
|
print("\n\nExtracted sentences are:\n")
|
|
@@ -121,11 +132,8 @@ async def main():
|
|
|
121
132
|
assert not os.path.isdir(data_root_directory), "Local data files are not deleted"
|
|
122
133
|
|
|
123
134
|
await cognee.prune.prune_system(metadata=True)
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
graph_engine = await get_graph_engine()
|
|
127
|
-
nodes, edges = await graph_engine.get_graph_data()
|
|
128
|
-
assert len(nodes) == 0 and len(edges) == 0, "Neo4j graph database is not empty"
|
|
135
|
+
is_empty = await graph_engine.is_empty()
|
|
136
|
+
assert is_empty, "Neo4j graph database is not empty"
|
|
129
137
|
|
|
130
138
|
|
|
131
139
|
if __name__ == "__main__":
|
|
@@ -38,20 +38,16 @@ async def main():
|
|
|
38
38
|
|
|
39
39
|
dataset_name = "cs_explanations"
|
|
40
40
|
|
|
41
|
-
|
|
41
|
+
explanation_file_path_nlp = os.path.join(
|
|
42
42
|
pathlib.Path(__file__).parent, "test_data/Natural_language_processing.txt"
|
|
43
43
|
)
|
|
44
|
-
await cognee.add([
|
|
44
|
+
await cognee.add([explanation_file_path_nlp], dataset_name)
|
|
45
45
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
The basic unit of information in quantum computing is the qubit, similar to the bit in traditional digital electronics. Unlike a classical bit, a qubit can exist in a superposition of its two "basis" states. When measuring a qubit, the result is a probabilistic output of a classical bit, therefore making quantum computers nondeterministic in general. If a quantum computer manipulates the qubit in a particular way, wave interference effects can amplify the desired measurement results. The design of quantum algorithms involves creating procedures that allow a quantum computer to perform calculations efficiently and quickly.
|
|
50
|
-
Physically engineering high-quality qubits has proven challenging. If a physical qubit is not sufficiently isolated from its environment, it suffers from quantum decoherence, introducing noise into calculations. Paradoxically, perfectly isolating qubits is also undesirable because quantum computations typically need to initialize qubits, perform controlled qubit interactions, and measure the resulting quantum states. Each of those operations introduces errors and suffers from noise, and such inaccuracies accumulate.
|
|
51
|
-
In principle, a non-quantum (classical) computer can solve the same computational problems as a quantum computer, given enough time. Quantum advantage comes in the form of time complexity rather than computability, and quantum complexity theory shows that some quantum algorithms for carefully selected tasks require exponentially fewer computational steps than the best known non-quantum algorithms. Such tasks can in theory be solved on a large-scale quantum computer whereas classical computers would not finish computations in any reasonable amount of time. However, quantum speedup is not universal or even typical across computational tasks, since basic tasks such as sorting are proven to not allow any asymptotic quantum speedup. Claims of quantum supremacy have drawn significant attention to the discipline, but are demonstrated on contrived tasks, while near-term practical use cases remain limited.
|
|
52
|
-
"""
|
|
46
|
+
explanation_file_path_quantum = os.path.join(
|
|
47
|
+
pathlib.Path(__file__).parent, "test_data/Quantum_computers.txt"
|
|
48
|
+
)
|
|
53
49
|
|
|
54
|
-
await cognee.add([
|
|
50
|
+
await cognee.add([explanation_file_path_quantum], dataset_name)
|
|
55
51
|
|
|
56
52
|
await cognee.cognify([dataset_name])
|
|
57
53
|
|
|
@@ -60,7 +56,7 @@ async def main():
|
|
|
60
56
|
random_node_name = random_node.payload["text"]
|
|
61
57
|
|
|
62
58
|
search_results = await cognee.search(
|
|
63
|
-
query_type=SearchType.
|
|
59
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
|
|
64
60
|
)
|
|
65
61
|
assert len(search_results) != 0, "The search results list is empty."
|
|
66
62
|
print("\n\nExtracted sentences are:\n")
|
cognee/tests/test_permissions.py
CHANGED
|
@@ -34,25 +34,21 @@ async def main():
|
|
|
34
34
|
await cognee.prune.prune_data()
|
|
35
35
|
await cognee.prune.prune_system(metadata=True)
|
|
36
36
|
|
|
37
|
-
|
|
37
|
+
explanation_file_path_nlp = os.path.join(
|
|
38
38
|
pathlib.Path(__file__).parent, "test_data/Natural_language_processing.txt"
|
|
39
39
|
)
|
|
40
40
|
|
|
41
41
|
# Add document for default user
|
|
42
|
-
await cognee.add([
|
|
42
|
+
await cognee.add([explanation_file_path_nlp], dataset_name="NLP")
|
|
43
43
|
default_user = await get_default_user()
|
|
44
44
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
The basic unit of information in quantum computing is the qubit, similar to the bit in traditional digital electronics. Unlike a classical bit, a qubit can exist in a superposition of its two "basis" states. When measuring a qubit, the result is a probabilistic output of a classical bit, therefore making quantum computers nondeterministic in general. If a quantum computer manipulates the qubit in a particular way, wave interference effects can amplify the desired measurement results. The design of quantum algorithms involves creating procedures that allow a quantum computer to perform calculations efficiently and quickly.
|
|
49
|
-
Physically engineering high-quality qubits has proven challenging. If a physical qubit is not sufficiently isolated from its environment, it suffers from quantum decoherence, introducing noise into calculations. Paradoxically, perfectly isolating qubits is also undesirable because quantum computations typically need to initialize qubits, perform controlled qubit interactions, and measure the resulting quantum states. Each of those operations introduces errors and suffers from noise, and such inaccuracies accumulate.
|
|
50
|
-
In principle, a non-quantum (classical) computer can solve the same computational problems as a quantum computer, given enough time. Quantum advantage comes in the form of time complexity rather than computability, and quantum complexity theory shows that some quantum algorithms for carefully selected tasks require exponentially fewer computational steps than the best known non-quantum algorithms. Such tasks can in theory be solved on a large-scale quantum computer whereas classical computers would not finish computations in any reasonable amount of time. However, quantum speedup is not universal or even typical across computational tasks, since basic tasks such as sorting are proven to not allow any asymptotic quantum speedup. Claims of quantum supremacy have drawn significant attention to the discipline, but are demonstrated on contrived tasks, while near-term practical use cases remain limited.
|
|
51
|
-
"""
|
|
45
|
+
explanation_file_path_quantum = os.path.join(
|
|
46
|
+
pathlib.Path(__file__).parent, "test_data/Quantum_computers.txt"
|
|
47
|
+
)
|
|
52
48
|
|
|
53
49
|
# Add document for test user
|
|
54
50
|
test_user = await create_user("user@example.com", "example")
|
|
55
|
-
await cognee.add([
|
|
51
|
+
await cognee.add([explanation_file_path_quantum], dataset_name="QUANTUM", user=test_user)
|
|
56
52
|
|
|
57
53
|
nlp_cognify_result = await cognee.cognify(["NLP"], user=default_user)
|
|
58
54
|
quantum_cognify_result = await cognee.cognify(["QUANTUM"], user=test_user)
|
|
@@ -101,7 +97,7 @@ async def main():
|
|
|
101
97
|
add_error = False
|
|
102
98
|
try:
|
|
103
99
|
await cognee.add(
|
|
104
|
-
[
|
|
100
|
+
[explanation_file_path_nlp],
|
|
105
101
|
dataset_name="QUANTUM",
|
|
106
102
|
dataset_id=test_user_dataset_id,
|
|
107
103
|
user=default_user,
|
|
@@ -143,7 +139,7 @@ async def main():
|
|
|
143
139
|
|
|
144
140
|
# Add new data to test_users dataset from default_user
|
|
145
141
|
await cognee.add(
|
|
146
|
-
[
|
|
142
|
+
[explanation_file_path_nlp],
|
|
147
143
|
dataset_name="QUANTUM",
|
|
148
144
|
dataset_id=test_user_dataset_id,
|
|
149
145
|
user=default_user,
|
|
@@ -216,7 +212,7 @@ async def main():
|
|
|
216
212
|
)
|
|
217
213
|
|
|
218
214
|
# Try deleting data from test_user dataset with default_user after getting delete permission
|
|
219
|
-
# Get the dataset data to find the ID of the remaining data item (
|
|
215
|
+
# Get the dataset data to find the ID of the remaining data item (explanation_file_path_nlp)
|
|
220
216
|
test_user_dataset_data = await get_dataset_data(test_user_dataset_id)
|
|
221
217
|
explanation_file_data_id = test_user_dataset_data[0].id
|
|
222
218
|
|
cognee/tests/test_pgvector.py
CHANGED
|
@@ -141,10 +141,10 @@ async def main():
|
|
|
141
141
|
dataset_name_1 = "natural_language"
|
|
142
142
|
dataset_name_2 = "quantum"
|
|
143
143
|
|
|
144
|
-
|
|
144
|
+
explanation_file_path_nlp = os.path.join(
|
|
145
145
|
pathlib.Path(__file__).parent, "test_data/Natural_language_processing.txt"
|
|
146
146
|
)
|
|
147
|
-
await cognee.add([
|
|
147
|
+
await cognee.add([explanation_file_path_nlp], dataset_name_1)
|
|
148
148
|
|
|
149
149
|
text = """A quantum computer is a computer that takes advantage of quantum mechanical phenomena.
|
|
150
150
|
At small scales, physical matter exhibits properties of both particles and waves, and quantum computing leverages this behavior, specifically quantum superposition and entanglement, using specialized hardware that supports the preparation and manipulation of quantum states.
|
|
@@ -167,7 +167,7 @@ async def main():
|
|
|
167
167
|
random_node_name = random_node.payload["text"]
|
|
168
168
|
|
|
169
169
|
search_results = await cognee.search(
|
|
170
|
-
query_type=SearchType.
|
|
170
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
|
|
171
171
|
)
|
|
172
172
|
assert len(search_results) != 0, "The search results list is empty."
|
|
173
173
|
print("\n\nExtracted sentences are:\n")
|
|
@@ -202,7 +202,7 @@ async def main():
|
|
|
202
202
|
history = await get_history(user.id)
|
|
203
203
|
assert len(history) == 8, "Search history is not correct."
|
|
204
204
|
|
|
205
|
-
await test_local_file_deletion(text,
|
|
205
|
+
await test_local_file_deletion(text, explanation_file_path_nlp)
|
|
206
206
|
|
|
207
207
|
await cognee.prune.prune_data()
|
|
208
208
|
data_root_directory = get_storage_config()["data_root_directory"]
|
cognee/tests/test_remote_kuzu.py
CHANGED
|
@@ -42,19 +42,16 @@ async def main():
|
|
|
42
42
|
|
|
43
43
|
dataset_name = "cs_explanations"
|
|
44
44
|
|
|
45
|
-
|
|
45
|
+
explanation_file_path_nlp = os.path.join(
|
|
46
46
|
pathlib.Path(__file__).parent, "test_data/Natural_language_processing.txt"
|
|
47
47
|
)
|
|
48
|
-
await cognee.add([
|
|
48
|
+
await cognee.add([explanation_file_path_nlp], dataset_name)
|
|
49
49
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
In principle, a non-quantum (classical) computer can solve the same computational problems as a quantum computer, given enough time. Quantum advantage comes in the form of time complexity rather than computability, and quantum complexity theory shows that some quantum algorithms for carefully selected tasks require exponentially fewer computational steps than the best known non-quantum algorithms. Such tasks can in theory be solved on a large-scale quantum computer whereas classical computers would not finish computations in any reasonable amount of time. However, quantum speedup is not universal or even typical across computational tasks, since basic tasks such as sorting are proven to not allow any asymptotic quantum speedup. Claims of quantum supremacy have drawn significant attention to the discipline, but are demonstrated on contrived tasks, while near-term practical use cases remain limited.
|
|
56
|
-
"""
|
|
57
|
-
await cognee.add([text], dataset_name)
|
|
50
|
+
explanation_file_path_quantum = os.path.join(
|
|
51
|
+
pathlib.Path(__file__).parent, "test_data/Quantum_computers.txt"
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
await cognee.add([explanation_file_path_quantum], dataset_name)
|
|
58
55
|
|
|
59
56
|
await cognee.cognify([dataset_name])
|
|
60
57
|
|
|
@@ -65,7 +62,7 @@ async def main():
|
|
|
65
62
|
random_node_name = random_node.payload["text"]
|
|
66
63
|
|
|
67
64
|
search_results = await cognee.search(
|
|
68
|
-
query_type=SearchType.
|
|
65
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
|
|
69
66
|
)
|
|
70
67
|
assert len(search_results) != 0, "The search results list is empty."
|
|
71
68
|
print("\n\nExtracted sentences are:\n")
|
|
@@ -47,7 +47,7 @@ async def main():
|
|
|
47
47
|
random_node_name = random_node.payload["text"]
|
|
48
48
|
|
|
49
49
|
search_results = await cognee.search(
|
|
50
|
-
query_type=SearchType.
|
|
50
|
+
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
|
|
51
51
|
)
|
|
52
52
|
assert len(search_results) != 0, "The search results list is empty."
|
|
53
53
|
print("\n\nExtracted sentences are:\n")
|
cognee/tests/test_search_db.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
import pathlib
|
|
2
|
+
import os
|
|
1
3
|
import cognee
|
|
2
4
|
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
3
5
|
from cognee.modules.graph.cognee_graph.CogneeGraphElements import Edge
|
|
@@ -27,15 +29,11 @@ async def main():
|
|
|
27
29
|
text_1 = """Germany is located in europe right next to the Netherlands"""
|
|
28
30
|
await cognee.add(text_1, dataset_name)
|
|
29
31
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
The basic unit of information in quantum computing is the qubit, similar to the bit in traditional digital electronics. Unlike a classical bit, a qubit can exist in a superposition of its two "basis" states. When measuring a qubit, the result is a probabilistic output of a classical bit, therefore making quantum computers nondeterministic in general. If a quantum computer manipulates the qubit in a particular way, wave interference effects can amplify the desired measurement results. The design of quantum algorithms involves creating procedures that allow a quantum computer to perform calculations efficiently and quickly.
|
|
34
|
-
Physically engineering high-quality qubits has proven challenging. If a physical qubit is not sufficiently isolated from its environment, it suffers from quantum decoherence, introducing noise into calculations. Paradoxically, perfectly isolating qubits is also undesirable because quantum computations typically need to initialize qubits, perform controlled qubit interactions, and measure the resulting quantum states. Each of those operations introduces errors and suffers from noise, and such inaccuracies accumulate.
|
|
35
|
-
In principle, a non-quantum (classical) computer can solve the same computational problems as a quantum computer, given enough time. Quantum advantage comes in the form of time complexity rather than computability, and quantum complexity theory shows that some quantum algorithms for carefully selected tasks require exponentially fewer computational steps than the best known non-quantum algorithms. Such tasks can in theory be solved on a large-scale quantum computer whereas classical computers would not finish computations in any reasonable amount of time. However, quantum speedup is not universal or even typical across computational tasks, since basic tasks such as sorting are proven to not allow any asymptotic quantum speedup. Claims of quantum supremacy have drawn significant attention to the discipline, but are demonstrated on contrived tasks, while near-term practical use cases remain limited.
|
|
36
|
-
"""
|
|
32
|
+
explanation_file_path_quantum = os.path.join(
|
|
33
|
+
pathlib.Path(__file__).parent, "test_data/Quantum_computers.txt"
|
|
34
|
+
)
|
|
37
35
|
|
|
38
|
-
await cognee.add([
|
|
36
|
+
await cognee.add([explanation_file_path_quantum], dataset_name)
|
|
39
37
|
|
|
40
38
|
await cognee.cognify([dataset_name])
|
|
41
39
|
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""Tests for cache configuration."""
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
from cognee.infrastructure.databases.cache.config import CacheConfig, get_cache_config
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def test_cache_config_defaults():
|
|
8
|
+
"""Test that CacheConfig has the correct default values."""
|
|
9
|
+
config = CacheConfig()
|
|
10
|
+
|
|
11
|
+
assert config.caching is False
|
|
12
|
+
assert config.shared_kuzu_lock is False
|
|
13
|
+
assert config.cache_host == "localhost"
|
|
14
|
+
assert config.cache_port == 6379
|
|
15
|
+
assert config.agentic_lock_expire == 240
|
|
16
|
+
assert config.agentic_lock_timeout == 300
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def test_cache_config_custom_values():
|
|
20
|
+
"""Test that CacheConfig accepts custom values."""
|
|
21
|
+
config = CacheConfig(
|
|
22
|
+
caching=True,
|
|
23
|
+
shared_kuzu_lock=True,
|
|
24
|
+
cache_host="redis.example.com",
|
|
25
|
+
cache_port=6380,
|
|
26
|
+
agentic_lock_expire=120,
|
|
27
|
+
agentic_lock_timeout=180,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
assert config.caching is True
|
|
31
|
+
assert config.shared_kuzu_lock is True
|
|
32
|
+
assert config.cache_host == "redis.example.com"
|
|
33
|
+
assert config.cache_port == 6380
|
|
34
|
+
assert config.agentic_lock_expire == 120
|
|
35
|
+
assert config.agentic_lock_timeout == 180
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def test_cache_config_to_dict():
|
|
39
|
+
"""Test the to_dict method returns all configuration values."""
|
|
40
|
+
config = CacheConfig(
|
|
41
|
+
caching=True,
|
|
42
|
+
shared_kuzu_lock=True,
|
|
43
|
+
cache_host="test-host",
|
|
44
|
+
cache_port=7000,
|
|
45
|
+
agentic_lock_expire=100,
|
|
46
|
+
agentic_lock_timeout=200,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
config_dict = config.to_dict()
|
|
50
|
+
|
|
51
|
+
assert config_dict == {
|
|
52
|
+
"caching": True,
|
|
53
|
+
"shared_kuzu_lock": True,
|
|
54
|
+
"cache_host": "test-host",
|
|
55
|
+
"cache_port": 7000,
|
|
56
|
+
"cache_username": None,
|
|
57
|
+
"cache_password": None,
|
|
58
|
+
"agentic_lock_expire": 100,
|
|
59
|
+
"agentic_lock_timeout": 200,
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def test_get_cache_config_singleton():
|
|
64
|
+
"""Test that get_cache_config returns the same instance."""
|
|
65
|
+
config1 = get_cache_config()
|
|
66
|
+
config2 = get_cache_config()
|
|
67
|
+
|
|
68
|
+
assert config1 is config2
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def test_cache_config_extra_fields_allowed():
|
|
72
|
+
"""Test that CacheConfig allows extra fields due to extra='allow'."""
|
|
73
|
+
config = CacheConfig(extra_field="extra_value", another_field=123)
|
|
74
|
+
|
|
75
|
+
assert hasattr(config, "extra_field")
|
|
76
|
+
assert config.extra_field == "extra_value"
|
|
77
|
+
assert hasattr(config, "another_field")
|
|
78
|
+
assert config.another_field == 123
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def test_cache_config_boolean_type_validation():
|
|
82
|
+
"""Test that boolean fields accept various truthy/falsy values."""
|
|
83
|
+
config1 = CacheConfig(caching="true", shared_kuzu_lock="yes")
|
|
84
|
+
assert config1.caching is True
|
|
85
|
+
assert config1.shared_kuzu_lock is True
|
|
86
|
+
|
|
87
|
+
config2 = CacheConfig(caching="false", shared_kuzu_lock="no")
|
|
88
|
+
assert config2.caching is False
|
|
89
|
+
assert config2.shared_kuzu_lock is False
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from unittest.mock import AsyncMock, patch, MagicMock
|
|
3
|
+
from cognee.context_global_variables import session_user
|
|
4
|
+
import importlib
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def create_mock_cache_engine(qa_history=None):
|
|
8
|
+
mock_cache = AsyncMock()
|
|
9
|
+
if qa_history is None:
|
|
10
|
+
qa_history = []
|
|
11
|
+
mock_cache.get_latest_qa = AsyncMock(return_value=qa_history)
|
|
12
|
+
mock_cache.add_qa = AsyncMock(return_value=None)
|
|
13
|
+
return mock_cache
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def create_mock_user():
|
|
17
|
+
mock_user = MagicMock()
|
|
18
|
+
mock_user.id = "test-user-id-123"
|
|
19
|
+
return mock_user
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class TestConversationHistoryUtils:
|
|
23
|
+
@pytest.mark.asyncio
|
|
24
|
+
async def test_get_conversation_history_returns_empty_when_no_history(self):
|
|
25
|
+
user = create_mock_user()
|
|
26
|
+
session_user.set(user)
|
|
27
|
+
mock_cache = create_mock_cache_engine([])
|
|
28
|
+
|
|
29
|
+
cache_module = importlib.import_module(
|
|
30
|
+
"cognee.infrastructure.databases.cache.get_cache_engine"
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
with patch.object(cache_module, "get_cache_engine", return_value=mock_cache):
|
|
34
|
+
from cognee.modules.retrieval.utils.session_cache import get_conversation_history
|
|
35
|
+
|
|
36
|
+
result = await get_conversation_history(session_id="test_session")
|
|
37
|
+
|
|
38
|
+
assert result == ""
|
|
39
|
+
|
|
40
|
+
@pytest.mark.asyncio
|
|
41
|
+
async def test_get_conversation_history_formats_history_correctly(self):
|
|
42
|
+
"""Test get_conversation_history formats Q&A history with correct structure."""
|
|
43
|
+
user = create_mock_user()
|
|
44
|
+
session_user.set(user)
|
|
45
|
+
|
|
46
|
+
mock_history = [
|
|
47
|
+
{
|
|
48
|
+
"time": "2024-01-15 10:30:45",
|
|
49
|
+
"question": "What is AI?",
|
|
50
|
+
"context": "AI is artificial intelligence",
|
|
51
|
+
"answer": "AI stands for Artificial Intelligence",
|
|
52
|
+
}
|
|
53
|
+
]
|
|
54
|
+
mock_cache = create_mock_cache_engine(mock_history)
|
|
55
|
+
|
|
56
|
+
# Import the real module to patch safely
|
|
57
|
+
cache_module = importlib.import_module(
|
|
58
|
+
"cognee.infrastructure.databases.cache.get_cache_engine"
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
with patch.object(cache_module, "get_cache_engine", return_value=mock_cache):
|
|
62
|
+
with patch(
|
|
63
|
+
"cognee.modules.retrieval.utils.session_cache.CacheConfig"
|
|
64
|
+
) as MockCacheConfig:
|
|
65
|
+
mock_config = MagicMock()
|
|
66
|
+
mock_config.caching = True
|
|
67
|
+
MockCacheConfig.return_value = mock_config
|
|
68
|
+
|
|
69
|
+
from cognee.modules.retrieval.utils.session_cache import (
|
|
70
|
+
get_conversation_history,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
result = await get_conversation_history(session_id="test_session")
|
|
74
|
+
|
|
75
|
+
assert "Previous conversation:" in result
|
|
76
|
+
assert "[2024-01-15 10:30:45]" in result
|
|
77
|
+
assert "QUESTION: What is AI?" in result
|
|
78
|
+
assert "CONTEXT: AI is artificial intelligence" in result
|
|
79
|
+
assert "ANSWER: AI stands for Artificial Intelligence" in result
|
|
80
|
+
|
|
81
|
+
@pytest.mark.asyncio
|
|
82
|
+
async def test_save_to_session_cache_saves_correctly(self):
|
|
83
|
+
"""Test save_conversation_history calls add_qa with correct parameters."""
|
|
84
|
+
user = create_mock_user()
|
|
85
|
+
session_user.set(user)
|
|
86
|
+
|
|
87
|
+
mock_cache = create_mock_cache_engine([])
|
|
88
|
+
|
|
89
|
+
cache_module = importlib.import_module(
|
|
90
|
+
"cognee.infrastructure.databases.cache.get_cache_engine"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
with patch.object(cache_module, "get_cache_engine", return_value=mock_cache):
|
|
94
|
+
with patch(
|
|
95
|
+
"cognee.modules.retrieval.utils.session_cache.CacheConfig"
|
|
96
|
+
) as MockCacheConfig:
|
|
97
|
+
mock_config = MagicMock()
|
|
98
|
+
mock_config.caching = True
|
|
99
|
+
MockCacheConfig.return_value = mock_config
|
|
100
|
+
|
|
101
|
+
from cognee.modules.retrieval.utils.session_cache import (
|
|
102
|
+
save_conversation_history,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
result = await save_conversation_history(
|
|
106
|
+
query="What is Python?",
|
|
107
|
+
context_summary="Python is a programming language",
|
|
108
|
+
answer="Python is a high-level programming language",
|
|
109
|
+
session_id="my_session",
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
assert result is True
|
|
113
|
+
mock_cache.add_qa.assert_called_once()
|
|
114
|
+
|
|
115
|
+
call_kwargs = mock_cache.add_qa.call_args.kwargs
|
|
116
|
+
assert call_kwargs["question"] == "What is Python?"
|
|
117
|
+
assert call_kwargs["context"] == "Python is a programming language"
|
|
118
|
+
assert call_kwargs["answer"] == "Python is a high-level programming language"
|
|
119
|
+
assert call_kwargs["session_id"] == "my_session"
|
|
120
|
+
|
|
121
|
+
@pytest.mark.asyncio
|
|
122
|
+
async def test_save_to_session_cache_uses_default_session_when_none(self):
|
|
123
|
+
"""Test save_conversation_history uses 'default_session' when session_id is None."""
|
|
124
|
+
user = create_mock_user()
|
|
125
|
+
session_user.set(user)
|
|
126
|
+
|
|
127
|
+
mock_cache = create_mock_cache_engine([])
|
|
128
|
+
|
|
129
|
+
cache_module = importlib.import_module(
|
|
130
|
+
"cognee.infrastructure.databases.cache.get_cache_engine"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
with patch.object(cache_module, "get_cache_engine", return_value=mock_cache):
|
|
134
|
+
with patch(
|
|
135
|
+
"cognee.modules.retrieval.utils.session_cache.CacheConfig"
|
|
136
|
+
) as MockCacheConfig:
|
|
137
|
+
mock_config = MagicMock()
|
|
138
|
+
mock_config.caching = True
|
|
139
|
+
MockCacheConfig.return_value = mock_config
|
|
140
|
+
|
|
141
|
+
from cognee.modules.retrieval.utils.session_cache import (
|
|
142
|
+
save_conversation_history,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
result = await save_conversation_history(
|
|
146
|
+
query="Test question",
|
|
147
|
+
context_summary="Test context",
|
|
148
|
+
answer="Test answer",
|
|
149
|
+
session_id=None,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
assert result is True
|
|
153
|
+
call_kwargs = mock_cache.add_qa.call_args.kwargs
|
|
154
|
+
assert call_kwargs["session_id"] == "default_session"
|