cognee 0.2.3.dev1__py3-none-any.whl → 0.3.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognee/__init__.py +1 -0
- cognee/__main__.py +4 -0
- cognee/api/client.py +28 -3
- cognee/api/health.py +10 -13
- cognee/api/v1/add/add.py +20 -6
- cognee/api/v1/add/routers/get_add_router.py +12 -37
- cognee/api/v1/cloud/routers/__init__.py +1 -0
- cognee/api/v1/cloud/routers/get_checks_router.py +23 -0
- cognee/api/v1/cognify/code_graph_pipeline.py +14 -3
- cognee/api/v1/cognify/cognify.py +67 -105
- cognee/api/v1/cognify/routers/get_cognify_router.py +11 -3
- cognee/api/v1/datasets/routers/get_datasets_router.py +16 -5
- cognee/api/v1/memify/routers/__init__.py +1 -0
- cognee/api/v1/memify/routers/get_memify_router.py +100 -0
- cognee/api/v1/notebooks/routers/__init__.py +1 -0
- cognee/api/v1/notebooks/routers/get_notebooks_router.py +96 -0
- cognee/api/v1/responses/default_tools.py +4 -0
- cognee/api/v1/responses/dispatch_function.py +6 -1
- cognee/api/v1/responses/models.py +1 -1
- cognee/api/v1/search/routers/get_search_router.py +20 -1
- cognee/api/v1/search/search.py +17 -4
- cognee/api/v1/sync/__init__.py +17 -0
- cognee/api/v1/sync/routers/__init__.py +3 -0
- cognee/api/v1/sync/routers/get_sync_router.py +241 -0
- cognee/api/v1/sync/sync.py +877 -0
- cognee/api/v1/users/routers/get_auth_router.py +13 -1
- cognee/base_config.py +10 -1
- cognee/cli/__init__.py +10 -0
- cognee/cli/_cognee.py +180 -0
- cognee/cli/commands/__init__.py +1 -0
- cognee/cli/commands/add_command.py +80 -0
- cognee/cli/commands/cognify_command.py +128 -0
- cognee/cli/commands/config_command.py +225 -0
- cognee/cli/commands/delete_command.py +80 -0
- cognee/cli/commands/search_command.py +149 -0
- cognee/cli/config.py +33 -0
- cognee/cli/debug.py +21 -0
- cognee/cli/echo.py +45 -0
- cognee/cli/exceptions.py +23 -0
- cognee/cli/minimal_cli.py +97 -0
- cognee/cli/reference.py +26 -0
- cognee/cli/suppress_logging.py +12 -0
- cognee/eval_framework/corpus_builder/corpus_builder_executor.py +2 -2
- cognee/eval_framework/eval_config.py +1 -1
- cognee/infrastructure/databases/graph/config.py +10 -4
- cognee/infrastructure/databases/graph/get_graph_engine.py +4 -9
- cognee/infrastructure/databases/graph/kuzu/adapter.py +199 -2
- cognee/infrastructure/databases/graph/neo4j_driver/adapter.py +138 -0
- cognee/infrastructure/databases/relational/__init__.py +2 -0
- cognee/infrastructure/databases/relational/get_async_session.py +15 -0
- cognee/infrastructure/databases/relational/sqlalchemy/SqlAlchemyAdapter.py +6 -1
- cognee/infrastructure/databases/relational/with_async_session.py +25 -0
- cognee/infrastructure/databases/vector/chromadb/ChromaDBAdapter.py +1 -1
- cognee/infrastructure/databases/vector/config.py +13 -6
- cognee/infrastructure/databases/vector/embeddings/FastembedEmbeddingEngine.py +6 -4
- cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py +16 -7
- cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py +5 -5
- cognee/infrastructure/databases/vector/embeddings/config.py +2 -2
- cognee/infrastructure/databases/vector/embeddings/embedding_rate_limiter.py +2 -6
- cognee/infrastructure/databases/vector/embeddings/get_embedding_engine.py +10 -7
- cognee/infrastructure/files/storage/LocalFileStorage.py +9 -0
- cognee/infrastructure/files/storage/S3FileStorage.py +5 -0
- cognee/infrastructure/files/storage/StorageManager.py +7 -1
- cognee/infrastructure/files/storage/storage.py +16 -0
- cognee/infrastructure/files/utils/get_data_file_path.py +14 -9
- cognee/infrastructure/files/utils/get_file_metadata.py +2 -1
- cognee/infrastructure/llm/LLMGateway.py +32 -5
- cognee/infrastructure/llm/config.py +6 -4
- cognee/infrastructure/llm/prompts/extract_query_time.txt +15 -0
- cognee/infrastructure/llm/prompts/generate_event_entity_prompt.txt +25 -0
- cognee/infrastructure/llm/prompts/generate_event_graph_prompt.txt +30 -0
- cognee/infrastructure/llm/structured_output_framework/baml/baml_src/extraction/knowledge_graph/extract_content_graph.py +16 -5
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/extraction/__init__.py +2 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/extraction/extract_event_entities.py +44 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/extraction/knowledge_graph/__init__.py +1 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/extraction/knowledge_graph/extract_content_graph.py +19 -15
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/extraction/knowledge_graph/extract_event_graph.py +46 -0
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py +3 -3
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py +3 -3
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py +2 -2
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py +14 -8
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py +6 -4
- cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py +28 -4
- cognee/infrastructure/llm/tokenizer/Gemini/adapter.py +2 -2
- cognee/infrastructure/llm/tokenizer/HuggingFace/adapter.py +3 -3
- cognee/infrastructure/llm/tokenizer/Mistral/adapter.py +3 -3
- cognee/infrastructure/llm/tokenizer/TikToken/adapter.py +6 -6
- cognee/infrastructure/llm/utils.py +7 -7
- cognee/infrastructure/utils/run_sync.py +8 -1
- cognee/modules/chunking/models/DocumentChunk.py +4 -3
- cognee/modules/cloud/exceptions/CloudApiKeyMissingError.py +15 -0
- cognee/modules/cloud/exceptions/CloudConnectionError.py +15 -0
- cognee/modules/cloud/exceptions/__init__.py +2 -0
- cognee/modules/cloud/operations/__init__.py +1 -0
- cognee/modules/cloud/operations/check_api_key.py +25 -0
- cognee/modules/data/deletion/prune_system.py +1 -1
- cognee/modules/data/methods/__init__.py +2 -0
- cognee/modules/data/methods/check_dataset_name.py +1 -1
- cognee/modules/data/methods/create_authorized_dataset.py +19 -0
- cognee/modules/data/methods/get_authorized_dataset.py +11 -5
- cognee/modules/data/methods/get_authorized_dataset_by_name.py +16 -0
- cognee/modules/data/methods/get_dataset_data.py +1 -1
- cognee/modules/data/methods/load_or_create_datasets.py +2 -20
- cognee/modules/engine/models/Event.py +16 -0
- cognee/modules/engine/models/Interval.py +8 -0
- cognee/modules/engine/models/Timestamp.py +13 -0
- cognee/modules/engine/models/__init__.py +3 -0
- cognee/modules/engine/utils/__init__.py +2 -0
- cognee/modules/engine/utils/generate_event_datapoint.py +46 -0
- cognee/modules/engine/utils/generate_timestamp_datapoint.py +51 -0
- cognee/modules/graph/cognee_graph/CogneeGraph.py +2 -2
- cognee/modules/graph/methods/get_formatted_graph_data.py +3 -2
- cognee/modules/graph/utils/__init__.py +1 -0
- cognee/modules/graph/utils/resolve_edges_to_text.py +71 -0
- cognee/modules/memify/__init__.py +1 -0
- cognee/modules/memify/memify.py +118 -0
- cognee/modules/notebooks/methods/__init__.py +5 -0
- cognee/modules/notebooks/methods/create_notebook.py +26 -0
- cognee/modules/notebooks/methods/delete_notebook.py +13 -0
- cognee/modules/notebooks/methods/get_notebook.py +21 -0
- cognee/modules/notebooks/methods/get_notebooks.py +18 -0
- cognee/modules/notebooks/methods/update_notebook.py +17 -0
- cognee/modules/notebooks/models/Notebook.py +53 -0
- cognee/modules/notebooks/models/__init__.py +1 -0
- cognee/modules/notebooks/operations/__init__.py +1 -0
- cognee/modules/notebooks/operations/run_in_local_sandbox.py +55 -0
- cognee/modules/pipelines/__init__.py +1 -1
- cognee/modules/pipelines/exceptions/tasks.py +18 -0
- cognee/modules/pipelines/layers/__init__.py +1 -0
- cognee/modules/pipelines/layers/check_pipeline_run_qualification.py +59 -0
- cognee/modules/pipelines/layers/pipeline_execution_mode.py +127 -0
- cognee/modules/pipelines/layers/reset_dataset_pipeline_run_status.py +28 -0
- cognee/modules/pipelines/layers/resolve_authorized_user_dataset.py +34 -0
- cognee/modules/pipelines/layers/resolve_authorized_user_datasets.py +55 -0
- cognee/modules/pipelines/layers/setup_and_check_environment.py +41 -0
- cognee/modules/pipelines/layers/validate_pipeline_tasks.py +20 -0
- cognee/modules/pipelines/methods/__init__.py +2 -0
- cognee/modules/pipelines/methods/get_pipeline_runs_by_dataset.py +34 -0
- cognee/modules/pipelines/methods/reset_pipeline_run_status.py +16 -0
- cognee/modules/pipelines/operations/__init__.py +0 -1
- cognee/modules/pipelines/operations/log_pipeline_run_initiated.py +1 -1
- cognee/modules/pipelines/operations/pipeline.py +24 -138
- cognee/modules/pipelines/operations/run_tasks.py +17 -41
- cognee/modules/retrieval/base_feedback.py +11 -0
- cognee/modules/retrieval/base_graph_retriever.py +18 -0
- cognee/modules/retrieval/base_retriever.py +1 -1
- cognee/modules/retrieval/code_retriever.py +8 -0
- cognee/modules/retrieval/coding_rules_retriever.py +31 -0
- cognee/modules/retrieval/completion_retriever.py +9 -3
- cognee/modules/retrieval/context_providers/TripletSearchContextProvider.py +1 -0
- cognee/modules/retrieval/cypher_search_retriever.py +1 -9
- cognee/modules/retrieval/graph_completion_context_extension_retriever.py +29 -13
- cognee/modules/retrieval/graph_completion_cot_retriever.py +30 -13
- cognee/modules/retrieval/graph_completion_retriever.py +107 -56
- cognee/modules/retrieval/graph_summary_completion_retriever.py +5 -1
- cognee/modules/retrieval/insights_retriever.py +14 -3
- cognee/modules/retrieval/natural_language_retriever.py +0 -4
- cognee/modules/retrieval/summaries_retriever.py +1 -1
- cognee/modules/retrieval/temporal_retriever.py +152 -0
- cognee/modules/retrieval/user_qa_feedback.py +83 -0
- cognee/modules/retrieval/utils/brute_force_triplet_search.py +7 -32
- cognee/modules/retrieval/utils/completion.py +10 -3
- cognee/modules/retrieval/utils/extract_uuid_from_node.py +18 -0
- cognee/modules/retrieval/utils/models.py +40 -0
- cognee/modules/search/methods/get_search_type_tools.py +168 -0
- cognee/modules/search/methods/no_access_control_search.py +47 -0
- cognee/modules/search/methods/search.py +239 -118
- cognee/modules/search/types/SearchResult.py +21 -0
- cognee/modules/search/types/SearchType.py +3 -0
- cognee/modules/search/types/__init__.py +1 -0
- cognee/modules/search/utils/__init__.py +2 -0
- cognee/modules/search/utils/prepare_search_result.py +41 -0
- cognee/modules/search/utils/transform_context_to_graph.py +38 -0
- cognee/modules/settings/get_settings.py +2 -2
- cognee/modules/sync/__init__.py +1 -0
- cognee/modules/sync/methods/__init__.py +23 -0
- cognee/modules/sync/methods/create_sync_operation.py +53 -0
- cognee/modules/sync/methods/get_sync_operation.py +107 -0
- cognee/modules/sync/methods/update_sync_operation.py +248 -0
- cognee/modules/sync/models/SyncOperation.py +142 -0
- cognee/modules/sync/models/__init__.py +3 -0
- cognee/modules/users/__init__.py +0 -1
- cognee/modules/users/methods/__init__.py +4 -1
- cognee/modules/users/methods/create_user.py +26 -1
- cognee/modules/users/methods/get_authenticated_user.py +36 -42
- cognee/modules/users/methods/get_default_user.py +3 -1
- cognee/modules/users/permissions/methods/get_specific_user_permission_datasets.py +2 -1
- cognee/root_dir.py +19 -0
- cognee/shared/CodeGraphEntities.py +1 -0
- cognee/shared/logging_utils.py +143 -32
- cognee/shared/utils.py +0 -1
- cognee/tasks/codingagents/coding_rule_associations.py +127 -0
- cognee/tasks/graph/extract_graph_from_data.py +6 -2
- cognee/tasks/ingestion/save_data_item_to_storage.py +23 -0
- cognee/tasks/memify/__init__.py +2 -0
- cognee/tasks/memify/extract_subgraph.py +7 -0
- cognee/tasks/memify/extract_subgraph_chunks.py +11 -0
- cognee/tasks/repo_processor/get_local_dependencies.py +2 -0
- cognee/tasks/repo_processor/get_repo_file_dependencies.py +144 -47
- cognee/tasks/storage/add_data_points.py +33 -3
- cognee/tasks/temporal_graph/__init__.py +1 -0
- cognee/tasks/temporal_graph/add_entities_to_event.py +85 -0
- cognee/tasks/temporal_graph/enrich_events.py +34 -0
- cognee/tasks/temporal_graph/extract_events_and_entities.py +32 -0
- cognee/tasks/temporal_graph/extract_knowledge_graph_from_events.py +41 -0
- cognee/tasks/temporal_graph/models.py +49 -0
- cognee/tests/integration/cli/__init__.py +3 -0
- cognee/tests/integration/cli/test_cli_integration.py +331 -0
- cognee/tests/integration/documents/PdfDocument_test.py +2 -2
- cognee/tests/integration/documents/TextDocument_test.py +2 -4
- cognee/tests/integration/documents/UnstructuredDocument_test.py +5 -8
- cognee/tests/{test_deletion.py → test_delete_hard.py} +0 -37
- cognee/tests/test_delete_soft.py +85 -0
- cognee/tests/test_kuzu.py +2 -2
- cognee/tests/test_neo4j.py +2 -2
- cognee/tests/test_permissions.py +3 -3
- cognee/tests/test_relational_db_migration.py +7 -5
- cognee/tests/test_search_db.py +136 -23
- cognee/tests/test_temporal_graph.py +167 -0
- cognee/tests/unit/api/__init__.py +1 -0
- cognee/tests/unit/api/test_conditional_authentication_endpoints.py +246 -0
- cognee/tests/unit/cli/__init__.py +3 -0
- cognee/tests/unit/cli/test_cli_commands.py +483 -0
- cognee/tests/unit/cli/test_cli_edge_cases.py +625 -0
- cognee/tests/unit/cli/test_cli_main.py +173 -0
- cognee/tests/unit/cli/test_cli_runner.py +62 -0
- cognee/tests/unit/cli/test_cli_utils.py +127 -0
- cognee/tests/unit/modules/retrieval/chunks_retriever_test.py +18 -2
- cognee/tests/unit/modules/retrieval/graph_completion_retriever_context_extension_test.py +12 -15
- cognee/tests/unit/modules/retrieval/graph_completion_retriever_cot_test.py +10 -15
- cognee/tests/unit/modules/retrieval/graph_completion_retriever_test.py +4 -3
- cognee/tests/unit/modules/retrieval/insights_retriever_test.py +4 -2
- cognee/tests/unit/modules/retrieval/rag_completion_retriever_test.py +18 -2
- cognee/tests/unit/modules/retrieval/temporal_retriever_test.py +225 -0
- cognee/tests/unit/modules/users/__init__.py +1 -0
- cognee/tests/unit/modules/users/test_conditional_authentication.py +277 -0
- cognee/tests/unit/processing/utils/utils_test.py +20 -1
- {cognee-0.2.3.dev1.dist-info → cognee-0.3.0.dev0.dist-info}/METADATA +13 -9
- {cognee-0.2.3.dev1.dist-info → cognee-0.3.0.dev0.dist-info}/RECORD +245 -135
- cognee-0.3.0.dev0.dist-info/entry_points.txt +2 -0
- cognee/infrastructure/databases/graph/networkx/adapter.py +0 -1017
- cognee/infrastructure/pipeline/models/Operation.py +0 -60
- cognee/notebooks/github_analysis_step_by_step.ipynb +0 -37
- cognee/tests/tasks/descriptive_metrics/networkx_metrics_test.py +0 -7
- cognee/tests/unit/modules/search/search_methods_test.py +0 -223
- /cognee/{infrastructure/databases/graph/networkx → api/v1/memify}/__init__.py +0 -0
- /cognee/{infrastructure/pipeline/models → tasks/codingagents}/__init__.py +0 -0
- {cognee-0.2.3.dev1.dist-info → cognee-0.3.0.dev0.dist-info}/WHEEL +0 -0
- {cognee-0.2.3.dev1.dist-info → cognee-0.3.0.dev0.dist-info}/licenses/LICENSE +0 -0
- {cognee-0.2.3.dev1.dist-info → cognee-0.3.0.dev0.dist-info}/licenses/NOTICE.md +0 -0
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
from typing import Any, Optional
|
|
2
2
|
from cognee.shared.logging_utils import get_logger
|
|
3
3
|
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
4
|
-
from cognee.infrastructure.databases.graph.networkx.adapter import NetworkXAdapter
|
|
5
4
|
from cognee.infrastructure.llm.LLMGateway import LLMGateway
|
|
6
5
|
from cognee.modules.retrieval.base_retriever import BaseRetriever
|
|
7
6
|
from cognee.modules.retrieval.exceptions import SearchTypeNotSupported
|
|
@@ -123,9 +122,6 @@ class NaturalLanguageRetriever(BaseRetriever):
|
|
|
123
122
|
"""
|
|
124
123
|
graph_engine = await get_graph_engine()
|
|
125
124
|
|
|
126
|
-
if isinstance(graph_engine, (NetworkXAdapter)):
|
|
127
|
-
raise SearchTypeNotSupported("Natural language search type not supported.")
|
|
128
|
-
|
|
129
125
|
return await self._execute_cypher_query(query, graph_engine)
|
|
130
126
|
|
|
131
127
|
async def get_completion(self, query: str, context: Optional[Any] = None) -> Any:
|
|
@@ -62,7 +62,7 @@ class SummariesRetriever(BaseRetriever):
|
|
|
62
62
|
logger.info(f"Returning {len(summary_payloads)} summary payloads")
|
|
63
63
|
return summary_payloads
|
|
64
64
|
|
|
65
|
-
async def get_completion(self, query: str, context: Optional[Any] = None) -> Any:
|
|
65
|
+
async def get_completion(self, query: str, context: Optional[Any] = None, **kwargs) -> Any:
|
|
66
66
|
"""
|
|
67
67
|
Generates a completion using summaries context.
|
|
68
68
|
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Any, Optional, List, Type
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
from operator import itemgetter
|
|
6
|
+
from cognee.infrastructure.databases.vector import get_vector_engine
|
|
7
|
+
from cognee.modules.retrieval.utils.completion import generate_completion
|
|
8
|
+
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
9
|
+
from cognee.infrastructure.llm import LLMGateway
|
|
10
|
+
from cognee.modules.retrieval.graph_completion_retriever import GraphCompletionRetriever
|
|
11
|
+
from cognee.shared.logging_utils import get_logger
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
from cognee.tasks.temporal_graph.models import QueryInterval
|
|
15
|
+
|
|
16
|
+
logger = get_logger()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class TemporalRetriever(GraphCompletionRetriever):
|
|
20
|
+
"""
|
|
21
|
+
Handles graph completion by generating responses based on a series of interactions with
|
|
22
|
+
a language model. This class extends from GraphCompletionRetriever and is designed to
|
|
23
|
+
manage the retrieval and validation process for user queries, integrating follow-up
|
|
24
|
+
questions based on reasoning. The public methods are:
|
|
25
|
+
|
|
26
|
+
- get_completion
|
|
27
|
+
|
|
28
|
+
Instance variables include:
|
|
29
|
+
- validation_system_prompt_path
|
|
30
|
+
- validation_user_prompt_path
|
|
31
|
+
- followup_system_prompt_path
|
|
32
|
+
- followup_user_prompt_path
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
user_prompt_path: str = "graph_context_for_question.txt",
|
|
38
|
+
system_prompt_path: str = "answer_simple_question.txt",
|
|
39
|
+
time_extraction_prompt_path: str = "extract_query_time.txt",
|
|
40
|
+
top_k: Optional[int] = 5,
|
|
41
|
+
node_type: Optional[Type] = None,
|
|
42
|
+
node_name: Optional[List[str]] = None,
|
|
43
|
+
):
|
|
44
|
+
super().__init__(
|
|
45
|
+
user_prompt_path=user_prompt_path,
|
|
46
|
+
system_prompt_path=system_prompt_path,
|
|
47
|
+
top_k=top_k,
|
|
48
|
+
node_type=node_type,
|
|
49
|
+
node_name=node_name,
|
|
50
|
+
)
|
|
51
|
+
self.user_prompt_path = user_prompt_path
|
|
52
|
+
self.system_prompt_path = system_prompt_path
|
|
53
|
+
self.time_extraction_prompt_path = time_extraction_prompt_path
|
|
54
|
+
self.top_k = top_k if top_k is not None else 5
|
|
55
|
+
self.node_type = node_type
|
|
56
|
+
self.node_name = node_name
|
|
57
|
+
|
|
58
|
+
def descriptions_to_string(self, results):
|
|
59
|
+
descs = []
|
|
60
|
+
for entry in results:
|
|
61
|
+
d = entry.get("description")
|
|
62
|
+
if d:
|
|
63
|
+
descs.append(d.strip())
|
|
64
|
+
return "\n#####################\n".join(descs)
|
|
65
|
+
|
|
66
|
+
async def extract_time_from_query(self, query: str):
|
|
67
|
+
prompt_path = self.time_extraction_prompt_path
|
|
68
|
+
|
|
69
|
+
if os.path.isabs(prompt_path):
|
|
70
|
+
base_directory = os.path.dirname(prompt_path)
|
|
71
|
+
prompt_path = os.path.basename(prompt_path)
|
|
72
|
+
else:
|
|
73
|
+
base_directory = None
|
|
74
|
+
|
|
75
|
+
system_prompt = LLMGateway.render_prompt(prompt_path, {}, base_directory=base_directory)
|
|
76
|
+
|
|
77
|
+
interval = await LLMGateway.acreate_structured_output(query, system_prompt, QueryInterval)
|
|
78
|
+
|
|
79
|
+
time_from = interval.starts_at
|
|
80
|
+
time_to = interval.ends_at
|
|
81
|
+
|
|
82
|
+
return time_from, time_to
|
|
83
|
+
|
|
84
|
+
async def filter_top_k_events(self, relevant_events, scored_results):
|
|
85
|
+
# Build a score lookup from vector search results
|
|
86
|
+
score_lookup = {res.payload["id"]: res.score for res in scored_results}
|
|
87
|
+
|
|
88
|
+
events_with_scores = []
|
|
89
|
+
for event in relevant_events[0]["events"]:
|
|
90
|
+
score = score_lookup.get(event["id"], float("inf"))
|
|
91
|
+
events_with_scores.append({**event, "score": score})
|
|
92
|
+
|
|
93
|
+
events_with_scores.sort(key=itemgetter("score"))
|
|
94
|
+
|
|
95
|
+
return events_with_scores[: self.top_k]
|
|
96
|
+
|
|
97
|
+
async def get_context(self, query: str) -> Any:
|
|
98
|
+
"""Retrieves context based on the query."""
|
|
99
|
+
|
|
100
|
+
time_from, time_to = await self.extract_time_from_query(query)
|
|
101
|
+
|
|
102
|
+
graph_engine = await get_graph_engine()
|
|
103
|
+
|
|
104
|
+
triplets = []
|
|
105
|
+
|
|
106
|
+
if time_from and time_to:
|
|
107
|
+
ids = await graph_engine.collect_time_ids(time_from=time_from, time_to=time_to)
|
|
108
|
+
elif time_from:
|
|
109
|
+
ids = await graph_engine.collect_time_ids(time_from=time_from)
|
|
110
|
+
elif time_to:
|
|
111
|
+
ids = await graph_engine.collect_time_ids(time_to=time_to)
|
|
112
|
+
else:
|
|
113
|
+
logger.info(
|
|
114
|
+
"No timestamps identified based on the query, performing retrieval using triplet search on events and entities."
|
|
115
|
+
)
|
|
116
|
+
triplets = await self.get_context(query)
|
|
117
|
+
return await self.resolve_edges_to_text(triplets)
|
|
118
|
+
|
|
119
|
+
if ids:
|
|
120
|
+
relevant_events = await graph_engine.collect_events(ids=ids)
|
|
121
|
+
else:
|
|
122
|
+
logger.info(
|
|
123
|
+
"No events identified based on timestamp filtering, performing retrieval using triplet search on events and entities."
|
|
124
|
+
)
|
|
125
|
+
triplets = await self.get_context(query)
|
|
126
|
+
return await self.resolve_edges_to_text(triplets)
|
|
127
|
+
|
|
128
|
+
vector_engine = get_vector_engine()
|
|
129
|
+
query_vector = (await vector_engine.embedding_engine.embed_text([query]))[0]
|
|
130
|
+
|
|
131
|
+
vector_search_results = await vector_engine.search(
|
|
132
|
+
collection_name="Event_name", query_vector=query_vector, limit=0
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
top_k_events = await self.filter_top_k_events(relevant_events, vector_search_results)
|
|
136
|
+
|
|
137
|
+
return self.descriptions_to_string(top_k_events)
|
|
138
|
+
|
|
139
|
+
async def get_completion(self, query: str, context: Optional[str] = None) -> str:
|
|
140
|
+
"""Generates a response using the query and optional context."""
|
|
141
|
+
if not context:
|
|
142
|
+
context = await self.get_context(query=query)
|
|
143
|
+
|
|
144
|
+
if context:
|
|
145
|
+
completion = await generate_completion(
|
|
146
|
+
query=query,
|
|
147
|
+
context=context,
|
|
148
|
+
user_prompt_path=self.user_prompt_path,
|
|
149
|
+
system_prompt_path=self.system_prompt_path,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
return completion
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
from typing import Any, Optional, List
|
|
2
|
+
|
|
3
|
+
from uuid import NAMESPACE_OID, uuid5, UUID
|
|
4
|
+
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
5
|
+
from cognee.infrastructure.llm import LLMGateway
|
|
6
|
+
from cognee.modules.engine.models import NodeSet
|
|
7
|
+
from cognee.shared.logging_utils import get_logger
|
|
8
|
+
from cognee.modules.retrieval.base_feedback import BaseFeedback
|
|
9
|
+
from cognee.modules.retrieval.utils.models import CogneeUserFeedback
|
|
10
|
+
from cognee.modules.retrieval.utils.models import UserFeedbackEvaluation
|
|
11
|
+
from cognee.tasks.storage import add_data_points
|
|
12
|
+
|
|
13
|
+
logger = get_logger("CompletionRetriever")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class UserQAFeedback(BaseFeedback):
|
|
17
|
+
"""
|
|
18
|
+
Interface for handling user feedback queries.
|
|
19
|
+
Public methods:
|
|
20
|
+
- get_context(query: str) -> str
|
|
21
|
+
- get_completion(query: str, context: Optional[Any] = None) -> Any
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, last_k: Optional[int] = 1) -> None:
|
|
25
|
+
"""Initialize retriever with optional custom prompt paths."""
|
|
26
|
+
self.last_k = last_k
|
|
27
|
+
|
|
28
|
+
async def add_feedback(self, feedback_text: str) -> List[str]:
|
|
29
|
+
feedback_sentiment = await LLMGateway.acreate_structured_output(
|
|
30
|
+
text_input=feedback_text,
|
|
31
|
+
system_prompt="You are a sentiment analysis assistant. For each piece of user feedback you receive, return exactly one of: Positive, Negative, or Neutral classification and a corresponding score from -5 (worst negative) to 5 (best positive)",
|
|
32
|
+
response_model=UserFeedbackEvaluation,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
graph_engine = await get_graph_engine()
|
|
36
|
+
last_interaction_ids = await graph_engine.get_last_user_interaction_ids(limit=self.last_k)
|
|
37
|
+
|
|
38
|
+
nodeset_name = "UserQAFeedbacks"
|
|
39
|
+
feedbacks_node_set = NodeSet(id=uuid5(NAMESPACE_OID, name=nodeset_name), name=nodeset_name)
|
|
40
|
+
feedback_id = uuid5(NAMESPACE_OID, name=feedback_text)
|
|
41
|
+
|
|
42
|
+
cognee_user_feedback = CogneeUserFeedback(
|
|
43
|
+
id=feedback_id,
|
|
44
|
+
feedback=feedback_text,
|
|
45
|
+
sentiment=feedback_sentiment.evaluation.value,
|
|
46
|
+
score=feedback_sentiment.score,
|
|
47
|
+
belongs_to_set=feedbacks_node_set,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
await add_data_points(data_points=[cognee_user_feedback], update_edge_collection=False)
|
|
51
|
+
|
|
52
|
+
relationships = []
|
|
53
|
+
relationship_name = "gives_feedback_to"
|
|
54
|
+
to_node_ids = []
|
|
55
|
+
|
|
56
|
+
for interaction_id in last_interaction_ids:
|
|
57
|
+
target_id_1 = feedback_id
|
|
58
|
+
target_id_2 = UUID(interaction_id)
|
|
59
|
+
|
|
60
|
+
if target_id_1 and target_id_2:
|
|
61
|
+
relationships.append(
|
|
62
|
+
(
|
|
63
|
+
target_id_1,
|
|
64
|
+
target_id_2,
|
|
65
|
+
relationship_name,
|
|
66
|
+
{
|
|
67
|
+
"relationship_name": relationship_name,
|
|
68
|
+
"source_node_id": target_id_1,
|
|
69
|
+
"target_node_id": target_id_2,
|
|
70
|
+
"ontology_valid": False,
|
|
71
|
+
},
|
|
72
|
+
)
|
|
73
|
+
)
|
|
74
|
+
to_node_ids.append(str(target_id_2))
|
|
75
|
+
|
|
76
|
+
if len(relationships) > 0:
|
|
77
|
+
graph_engine = await get_graph_engine()
|
|
78
|
+
await graph_engine.add_edges(relationships)
|
|
79
|
+
await graph_engine.apply_feedback_weight(
|
|
80
|
+
node_ids=to_node_ids, weight=feedback_sentiment.score
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
return [feedback_text]
|
|
@@ -8,7 +8,7 @@ from cognee.infrastructure.databases.vector.exceptions import CollectionNotFound
|
|
|
8
8
|
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
9
9
|
from cognee.infrastructure.databases.vector import get_vector_engine
|
|
10
10
|
from cognee.modules.graph.cognee_graph.CogneeGraph import CogneeGraph
|
|
11
|
-
from cognee.modules.
|
|
11
|
+
from cognee.modules.graph.cognee_graph.CogneeGraphElements import Edge
|
|
12
12
|
from cognee.modules.users.models import User
|
|
13
13
|
from cognee.shared.utils import send_telemetry
|
|
14
14
|
|
|
@@ -63,9 +63,10 @@ async def get_memory_fragment(
|
|
|
63
63
|
if properties_to_project is None:
|
|
64
64
|
properties_to_project = ["id", "description", "name", "type", "text"]
|
|
65
65
|
|
|
66
|
+
memory_fragment = CogneeGraph()
|
|
67
|
+
|
|
66
68
|
try:
|
|
67
69
|
graph_engine = await get_graph_engine()
|
|
68
|
-
memory_fragment = CogneeGraph()
|
|
69
70
|
|
|
70
71
|
await memory_fragment.project_graph_from_db(
|
|
71
72
|
graph_engine,
|
|
@@ -87,41 +88,15 @@ async def get_memory_fragment(
|
|
|
87
88
|
|
|
88
89
|
|
|
89
90
|
async def brute_force_triplet_search(
|
|
90
|
-
query: str,
|
|
91
|
-
user: User = None,
|
|
92
|
-
top_k: int = 5,
|
|
93
|
-
collections: List[str] = None,
|
|
94
|
-
properties_to_project: List[str] = None,
|
|
95
|
-
memory_fragment: Optional[CogneeGraph] = None,
|
|
96
|
-
node_type: Optional[Type] = None,
|
|
97
|
-
node_name: Optional[List[str]] = None,
|
|
98
|
-
) -> list:
|
|
99
|
-
if user is None:
|
|
100
|
-
user = await get_default_user()
|
|
101
|
-
|
|
102
|
-
retrieved_results = await brute_force_search(
|
|
103
|
-
query,
|
|
104
|
-
user,
|
|
105
|
-
top_k,
|
|
106
|
-
collections=collections,
|
|
107
|
-
properties_to_project=properties_to_project,
|
|
108
|
-
memory_fragment=memory_fragment,
|
|
109
|
-
node_type=node_type,
|
|
110
|
-
node_name=node_name,
|
|
111
|
-
)
|
|
112
|
-
return retrieved_results
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
async def brute_force_search(
|
|
116
91
|
query: str,
|
|
117
92
|
user: User,
|
|
118
|
-
top_k: int,
|
|
119
|
-
collections: List[str] = None,
|
|
120
|
-
properties_to_project: List[str] = None,
|
|
93
|
+
top_k: int = 5,
|
|
94
|
+
collections: Optional[List[str]] = None,
|
|
95
|
+
properties_to_project: Optional[List[str]] = None,
|
|
121
96
|
memory_fragment: Optional[CogneeGraph] = None,
|
|
122
97
|
node_type: Optional[Type] = None,
|
|
123
98
|
node_name: Optional[List[str]] = None,
|
|
124
|
-
) ->
|
|
99
|
+
) -> List[Edge]:
|
|
125
100
|
"""
|
|
126
101
|
Performs a brute force search to retrieve the top triplets from the graph.
|
|
127
102
|
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from typing import Optional
|
|
1
2
|
from cognee.infrastructure.llm.LLMGateway import LLMGateway
|
|
2
3
|
|
|
3
4
|
|
|
@@ -6,11 +7,14 @@ async def generate_completion(
|
|
|
6
7
|
context: str,
|
|
7
8
|
user_prompt_path: str,
|
|
8
9
|
system_prompt_path: str,
|
|
10
|
+
system_prompt: Optional[str] = None,
|
|
9
11
|
) -> str:
|
|
10
12
|
"""Generates a completion using LLM with given context and prompts."""
|
|
11
13
|
args = {"question": query, "context": context}
|
|
12
14
|
user_prompt = LLMGateway.render_prompt(user_prompt_path, args)
|
|
13
|
-
system_prompt =
|
|
15
|
+
system_prompt = (
|
|
16
|
+
system_prompt if system_prompt else LLMGateway.read_query_prompt(system_prompt_path)
|
|
17
|
+
)
|
|
14
18
|
|
|
15
19
|
return await LLMGateway.acreate_structured_output(
|
|
16
20
|
text_input=user_prompt,
|
|
@@ -21,10 +25,13 @@ async def generate_completion(
|
|
|
21
25
|
|
|
22
26
|
async def summarize_text(
|
|
23
27
|
text: str,
|
|
24
|
-
|
|
28
|
+
system_prompt_path: str = "summarize_search_results.txt",
|
|
29
|
+
system_prompt: str = None,
|
|
25
30
|
) -> str:
|
|
26
31
|
"""Summarizes text using LLM with the specified prompt."""
|
|
27
|
-
system_prompt =
|
|
32
|
+
system_prompt = (
|
|
33
|
+
system_prompt if system_prompt else LLMGateway.read_query_prompt(system_prompt_path)
|
|
34
|
+
)
|
|
28
35
|
|
|
29
36
|
return await LLMGateway.acreate_structured_output(
|
|
30
37
|
text_input=text,
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from typing import Any, Optional
|
|
2
|
+
from uuid import UUID
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def extract_uuid_from_node(node: Any) -> Optional[UUID]:
|
|
6
|
+
"""
|
|
7
|
+
Try to pull a UUID string out of node.id or node.properties['id'],
|
|
8
|
+
then return a UUID instance (or None if neither exists).
|
|
9
|
+
"""
|
|
10
|
+
id_str = None
|
|
11
|
+
if not id_str:
|
|
12
|
+
id_str = getattr(node, "id", None)
|
|
13
|
+
|
|
14
|
+
if hasattr(node, "attributes") and not id_str:
|
|
15
|
+
id_str = node.attributes.get("id", None)
|
|
16
|
+
|
|
17
|
+
id = UUID(id_str) if isinstance(id_str, str) else None
|
|
18
|
+
return id
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from cognee.infrastructure.engine.models.DataPoint import DataPoint
|
|
3
|
+
from cognee.modules.engine.models.node_set import NodeSet
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from pydantic import BaseModel, Field, confloat
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CogneeUserInteraction(DataPoint):
|
|
9
|
+
"""User - Cognee interaction"""
|
|
10
|
+
|
|
11
|
+
question: str
|
|
12
|
+
answer: str
|
|
13
|
+
context: str
|
|
14
|
+
belongs_to_set: Optional[NodeSet] = None
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CogneeUserFeedback(DataPoint):
|
|
18
|
+
"""User - Cognee Feedback"""
|
|
19
|
+
|
|
20
|
+
feedback: str
|
|
21
|
+
sentiment: str
|
|
22
|
+
score: float
|
|
23
|
+
belongs_to_set: Optional[NodeSet] = None
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class UserFeedbackSentiment(str, Enum):
|
|
27
|
+
"""User - User feedback sentiment"""
|
|
28
|
+
|
|
29
|
+
positive = "positive"
|
|
30
|
+
negative = "negative"
|
|
31
|
+
neutral = "neutral"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class UserFeedbackEvaluation(BaseModel):
|
|
35
|
+
"""User - User feedback evaluation"""
|
|
36
|
+
|
|
37
|
+
score: confloat(ge=-5, le=5) = Field(
|
|
38
|
+
..., description="Sentiment score from -5 (negative) to +5 (positive)"
|
|
39
|
+
)
|
|
40
|
+
evaluation: UserFeedbackSentiment
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
from typing import Callable, List, Optional, Type
|
|
2
|
+
|
|
3
|
+
from cognee.modules.engine.models.node_set import NodeSet
|
|
4
|
+
from cognee.modules.search.types import SearchType
|
|
5
|
+
from cognee.modules.search.operations import select_search_type
|
|
6
|
+
from cognee.modules.search.exceptions import UnsupportedSearchTypeError
|
|
7
|
+
|
|
8
|
+
# Retrievers
|
|
9
|
+
from cognee.modules.retrieval.user_qa_feedback import UserQAFeedback
|
|
10
|
+
from cognee.modules.retrieval.chunks_retriever import ChunksRetriever
|
|
11
|
+
from cognee.modules.retrieval.insights_retriever import InsightsRetriever
|
|
12
|
+
from cognee.modules.retrieval.summaries_retriever import SummariesRetriever
|
|
13
|
+
from cognee.modules.retrieval.completion_retriever import CompletionRetriever
|
|
14
|
+
from cognee.modules.retrieval.graph_completion_retriever import GraphCompletionRetriever
|
|
15
|
+
from cognee.modules.retrieval.temporal_retriever import TemporalRetriever
|
|
16
|
+
from cognee.modules.retrieval.coding_rules_retriever import CodingRulesRetriever
|
|
17
|
+
from cognee.modules.retrieval.graph_summary_completion_retriever import (
|
|
18
|
+
GraphSummaryCompletionRetriever,
|
|
19
|
+
)
|
|
20
|
+
from cognee.modules.retrieval.graph_completion_cot_retriever import GraphCompletionCotRetriever
|
|
21
|
+
from cognee.modules.retrieval.graph_completion_context_extension_retriever import (
|
|
22
|
+
GraphCompletionContextExtensionRetriever,
|
|
23
|
+
)
|
|
24
|
+
from cognee.modules.retrieval.code_retriever import CodeRetriever
|
|
25
|
+
from cognee.modules.retrieval.cypher_search_retriever import CypherSearchRetriever
|
|
26
|
+
from cognee.modules.retrieval.natural_language_retriever import NaturalLanguageRetriever
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def get_search_type_tools(
|
|
30
|
+
query_type: SearchType,
|
|
31
|
+
query_text: str,
|
|
32
|
+
system_prompt_path: str = "answer_simple_question.txt",
|
|
33
|
+
system_prompt: Optional[str] = None,
|
|
34
|
+
top_k: int = 10,
|
|
35
|
+
node_type: Optional[Type] = NodeSet,
|
|
36
|
+
node_name: Optional[List[str]] = None,
|
|
37
|
+
save_interaction: bool = False,
|
|
38
|
+
last_k: Optional[int] = None,
|
|
39
|
+
) -> list:
|
|
40
|
+
search_tasks: dict[SearchType, List[Callable]] = {
|
|
41
|
+
SearchType.SUMMARIES: [
|
|
42
|
+
SummariesRetriever(top_k=top_k).get_completion,
|
|
43
|
+
SummariesRetriever(top_k=top_k).get_context,
|
|
44
|
+
],
|
|
45
|
+
SearchType.INSIGHTS: [
|
|
46
|
+
InsightsRetriever(top_k=top_k).get_completion,
|
|
47
|
+
InsightsRetriever(top_k=top_k).get_context,
|
|
48
|
+
],
|
|
49
|
+
SearchType.CHUNKS: [
|
|
50
|
+
ChunksRetriever(top_k=top_k).get_completion,
|
|
51
|
+
ChunksRetriever(top_k=top_k).get_context,
|
|
52
|
+
],
|
|
53
|
+
SearchType.RAG_COMPLETION: [
|
|
54
|
+
CompletionRetriever(
|
|
55
|
+
system_prompt_path=system_prompt_path,
|
|
56
|
+
top_k=top_k,
|
|
57
|
+
system_prompt=system_prompt,
|
|
58
|
+
).get_completion,
|
|
59
|
+
CompletionRetriever(
|
|
60
|
+
system_prompt_path=system_prompt_path,
|
|
61
|
+
top_k=top_k,
|
|
62
|
+
system_prompt=system_prompt,
|
|
63
|
+
).get_context,
|
|
64
|
+
],
|
|
65
|
+
SearchType.GRAPH_COMPLETION: [
|
|
66
|
+
GraphCompletionRetriever(
|
|
67
|
+
system_prompt_path=system_prompt_path,
|
|
68
|
+
top_k=top_k,
|
|
69
|
+
node_type=node_type,
|
|
70
|
+
node_name=node_name,
|
|
71
|
+
save_interaction=save_interaction,
|
|
72
|
+
system_prompt=system_prompt,
|
|
73
|
+
).get_completion,
|
|
74
|
+
GraphCompletionRetriever(
|
|
75
|
+
system_prompt_path=system_prompt_path,
|
|
76
|
+
top_k=top_k,
|
|
77
|
+
node_type=node_type,
|
|
78
|
+
node_name=node_name,
|
|
79
|
+
save_interaction=save_interaction,
|
|
80
|
+
system_prompt=system_prompt,
|
|
81
|
+
).get_context,
|
|
82
|
+
],
|
|
83
|
+
SearchType.GRAPH_COMPLETION_COT: [
|
|
84
|
+
GraphCompletionCotRetriever(
|
|
85
|
+
system_prompt_path=system_prompt_path,
|
|
86
|
+
top_k=top_k,
|
|
87
|
+
node_type=node_type,
|
|
88
|
+
node_name=node_name,
|
|
89
|
+
save_interaction=save_interaction,
|
|
90
|
+
system_prompt=system_prompt,
|
|
91
|
+
).get_completion,
|
|
92
|
+
GraphCompletionCotRetriever(
|
|
93
|
+
system_prompt_path=system_prompt_path,
|
|
94
|
+
top_k=top_k,
|
|
95
|
+
node_type=node_type,
|
|
96
|
+
node_name=node_name,
|
|
97
|
+
save_interaction=save_interaction,
|
|
98
|
+
system_prompt=system_prompt,
|
|
99
|
+
).get_context,
|
|
100
|
+
],
|
|
101
|
+
SearchType.GRAPH_COMPLETION_CONTEXT_EXTENSION: [
|
|
102
|
+
GraphCompletionContextExtensionRetriever(
|
|
103
|
+
system_prompt_path=system_prompt_path,
|
|
104
|
+
top_k=top_k,
|
|
105
|
+
node_type=node_type,
|
|
106
|
+
node_name=node_name,
|
|
107
|
+
save_interaction=save_interaction,
|
|
108
|
+
system_prompt=system_prompt,
|
|
109
|
+
).get_completion,
|
|
110
|
+
GraphCompletionContextExtensionRetriever(
|
|
111
|
+
system_prompt_path=system_prompt_path,
|
|
112
|
+
top_k=top_k,
|
|
113
|
+
node_type=node_type,
|
|
114
|
+
node_name=node_name,
|
|
115
|
+
save_interaction=save_interaction,
|
|
116
|
+
system_prompt=system_prompt,
|
|
117
|
+
).get_context,
|
|
118
|
+
],
|
|
119
|
+
SearchType.GRAPH_SUMMARY_COMPLETION: [
|
|
120
|
+
GraphSummaryCompletionRetriever(
|
|
121
|
+
system_prompt_path=system_prompt_path,
|
|
122
|
+
top_k=top_k,
|
|
123
|
+
node_type=node_type,
|
|
124
|
+
node_name=node_name,
|
|
125
|
+
save_interaction=save_interaction,
|
|
126
|
+
system_prompt=system_prompt,
|
|
127
|
+
).get_completion,
|
|
128
|
+
GraphSummaryCompletionRetriever(
|
|
129
|
+
system_prompt_path=system_prompt_path,
|
|
130
|
+
top_k=top_k,
|
|
131
|
+
node_type=node_type,
|
|
132
|
+
node_name=node_name,
|
|
133
|
+
save_interaction=save_interaction,
|
|
134
|
+
system_prompt=system_prompt,
|
|
135
|
+
).get_context,
|
|
136
|
+
],
|
|
137
|
+
SearchType.CODE: [
|
|
138
|
+
CodeRetriever(top_k=top_k).get_completion,
|
|
139
|
+
CodeRetriever(top_k=top_k).get_context,
|
|
140
|
+
],
|
|
141
|
+
SearchType.CYPHER: [
|
|
142
|
+
CypherSearchRetriever().get_completion,
|
|
143
|
+
CypherSearchRetriever().get_context,
|
|
144
|
+
],
|
|
145
|
+
SearchType.NATURAL_LANGUAGE: [
|
|
146
|
+
NaturalLanguageRetriever().get_completion,
|
|
147
|
+
NaturalLanguageRetriever().get_context,
|
|
148
|
+
],
|
|
149
|
+
SearchType.FEEDBACK: [UserQAFeedback(last_k=last_k).add_feedback],
|
|
150
|
+
SearchType.TEMPORAL: [
|
|
151
|
+
TemporalRetriever(top_k=top_k).get_completion,
|
|
152
|
+
TemporalRetriever(top_k=top_k).get_context,
|
|
153
|
+
],
|
|
154
|
+
SearchType.CODING_RULES: [
|
|
155
|
+
CodingRulesRetriever(rules_nodeset_name=node_name).get_existing_rules,
|
|
156
|
+
],
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
# If the query type is FEELING_LUCKY, select the search type intelligently
|
|
160
|
+
if query_type is SearchType.FEELING_LUCKY:
|
|
161
|
+
query_type = await select_search_type(query_text)
|
|
162
|
+
|
|
163
|
+
search_type_tools = search_tasks.get(query_type)
|
|
164
|
+
|
|
165
|
+
if not search_type_tools:
|
|
166
|
+
raise UnsupportedSearchTypeError(str(query_type))
|
|
167
|
+
|
|
168
|
+
return search_type_tools
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from typing import Any, List, Optional, Tuple, Type, Union
|
|
2
|
+
|
|
3
|
+
from cognee.modules.data.models.Dataset import Dataset
|
|
4
|
+
from cognee.modules.engine.models.node_set import NodeSet
|
|
5
|
+
from cognee.modules.graph.cognee_graph.CogneeGraphElements import Edge
|
|
6
|
+
from cognee.modules.search.types import SearchType
|
|
7
|
+
|
|
8
|
+
from .get_search_type_tools import get_search_type_tools
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def no_access_control_search(
|
|
12
|
+
query_type: SearchType,
|
|
13
|
+
query_text: str,
|
|
14
|
+
system_prompt_path: str = "answer_simple_question.txt",
|
|
15
|
+
system_prompt: Optional[str] = None,
|
|
16
|
+
top_k: int = 10,
|
|
17
|
+
node_type: Optional[Type] = NodeSet,
|
|
18
|
+
node_name: Optional[List[str]] = None,
|
|
19
|
+
save_interaction: bool = False,
|
|
20
|
+
last_k: Optional[int] = None,
|
|
21
|
+
only_context: bool = False,
|
|
22
|
+
) -> Tuple[Any, Union[str, List[Edge]], List[Dataset]]:
|
|
23
|
+
search_tools = await get_search_type_tools(
|
|
24
|
+
query_type=query_type,
|
|
25
|
+
query_text=query_text,
|
|
26
|
+
system_prompt_path=system_prompt_path,
|
|
27
|
+
system_prompt=system_prompt,
|
|
28
|
+
top_k=top_k,
|
|
29
|
+
node_type=node_type,
|
|
30
|
+
node_name=node_name,
|
|
31
|
+
save_interaction=save_interaction,
|
|
32
|
+
last_k=last_k,
|
|
33
|
+
)
|
|
34
|
+
if len(search_tools) == 2:
|
|
35
|
+
[get_completion, get_context] = search_tools
|
|
36
|
+
|
|
37
|
+
if only_context:
|
|
38
|
+
return await get_context(query_text)
|
|
39
|
+
|
|
40
|
+
context = await get_context(query_text)
|
|
41
|
+
result = await get_completion(query_text, context)
|
|
42
|
+
else:
|
|
43
|
+
unknown_tool = search_tools[0]
|
|
44
|
+
result = await unknown_tool(query_text)
|
|
45
|
+
context = ""
|
|
46
|
+
|
|
47
|
+
return result, context, []
|