gllm-datastore-binary 0.5.50__cp312-cp312-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gllm_datastore/__init__.pyi +0 -0
- gllm_datastore/cache/__init__.pyi +4 -0
- gllm_datastore/cache/base.pyi +84 -0
- gllm_datastore/cache/cache.pyi +137 -0
- gllm_datastore/cache/hybrid_cache/__init__.pyi +5 -0
- gllm_datastore/cache/hybrid_cache/file_system_hybrid_cache.pyi +50 -0
- gllm_datastore/cache/hybrid_cache/hybrid_cache.pyi +115 -0
- gllm_datastore/cache/hybrid_cache/in_memory_hybrid_cache.pyi +29 -0
- gllm_datastore/cache/hybrid_cache/key_matcher/__init__.pyi +5 -0
- gllm_datastore/cache/hybrid_cache/key_matcher/exact_key_matcher.pyi +44 -0
- gllm_datastore/cache/hybrid_cache/key_matcher/fuzzy_key_matcher.pyi +70 -0
- gllm_datastore/cache/hybrid_cache/key_matcher/key_matcher.pyi +60 -0
- gllm_datastore/cache/hybrid_cache/key_matcher/semantic_key_matcher.pyi +93 -0
- gllm_datastore/cache/hybrid_cache/redis_hybrid_cache.pyi +34 -0
- gllm_datastore/cache/hybrid_cache/utils.pyi +36 -0
- gllm_datastore/cache/utils.pyi +34 -0
- gllm_datastore/cache/vector_cache/__init__.pyi +0 -0
- gllm_datastore/cache/vector_cache/eviction_manager/__init__.pyi +0 -0
- gllm_datastore/cache/vector_cache/eviction_manager/asyncio_eviction_manager.pyi +48 -0
- gllm_datastore/cache/vector_cache/eviction_manager/eviction_manager.pyi +38 -0
- gllm_datastore/cache/vector_cache/eviction_strategy/__init__.pyi +0 -0
- gllm_datastore/cache/vector_cache/eviction_strategy/eviction_strategy.pyi +34 -0
- gllm_datastore/cache/vector_cache/eviction_strategy/ttl_eviction_strategy.pyi +34 -0
- gllm_datastore/cache/vector_cache/vector_cache.pyi +99 -0
- gllm_datastore/constants.pyi +66 -0
- gllm_datastore/core/__init__.pyi +7 -0
- gllm_datastore/core/capabilities/__init__.pyi +7 -0
- gllm_datastore/core/capabilities/encryption_capability.pyi +21 -0
- gllm_datastore/core/capabilities/fulltext_capability.pyi +73 -0
- gllm_datastore/core/capabilities/graph_capability.pyi +70 -0
- gllm_datastore/core/capabilities/hybrid_capability.pyi +184 -0
- gllm_datastore/core/capabilities/vector_capability.pyi +90 -0
- gllm_datastore/core/filters/__init__.pyi +4 -0
- gllm_datastore/core/filters/filter.pyi +340 -0
- gllm_datastore/core/filters/schema.pyi +149 -0
- gllm_datastore/data_store/__init__.pyi +8 -0
- gllm_datastore/data_store/_elastic_core/__init__.pyi +0 -0
- gllm_datastore/data_store/_elastic_core/client_factory.pyi +66 -0
- gllm_datastore/data_store/_elastic_core/constants.pyi +27 -0
- gllm_datastore/data_store/_elastic_core/elastic_like_core.pyi +115 -0
- gllm_datastore/data_store/_elastic_core/index_manager.pyi +37 -0
- gllm_datastore/data_store/_elastic_core/query_translator.pyi +89 -0
- gllm_datastore/data_store/base.pyi +176 -0
- gllm_datastore/data_store/chroma/__init__.pyi +4 -0
- gllm_datastore/data_store/chroma/_chroma_import.pyi +13 -0
- gllm_datastore/data_store/chroma/data_store.pyi +201 -0
- gllm_datastore/data_store/chroma/fulltext.pyi +134 -0
- gllm_datastore/data_store/chroma/query.pyi +266 -0
- gllm_datastore/data_store/chroma/query_translator.pyi +41 -0
- gllm_datastore/data_store/chroma/vector.pyi +197 -0
- gllm_datastore/data_store/elasticsearch/__init__.pyi +5 -0
- gllm_datastore/data_store/elasticsearch/data_store.pyi +147 -0
- gllm_datastore/data_store/elasticsearch/fulltext.pyi +238 -0
- gllm_datastore/data_store/elasticsearch/query.pyi +118 -0
- gllm_datastore/data_store/elasticsearch/query_translator.pyi +18 -0
- gllm_datastore/data_store/elasticsearch/vector.pyi +180 -0
- gllm_datastore/data_store/exceptions.pyi +35 -0
- gllm_datastore/data_store/in_memory/__init__.pyi +5 -0
- gllm_datastore/data_store/in_memory/data_store.pyi +71 -0
- gllm_datastore/data_store/in_memory/fulltext.pyi +131 -0
- gllm_datastore/data_store/in_memory/query.pyi +175 -0
- gllm_datastore/data_store/in_memory/vector.pyi +174 -0
- gllm_datastore/data_store/opensearch/__init__.pyi +5 -0
- gllm_datastore/data_store/opensearch/data_store.pyi +160 -0
- gllm_datastore/data_store/opensearch/fulltext.pyi +240 -0
- gllm_datastore/data_store/opensearch/query.pyi +89 -0
- gllm_datastore/data_store/opensearch/query_translator.pyi +18 -0
- gllm_datastore/data_store/opensearch/vector.pyi +211 -0
- gllm_datastore/data_store/redis/__init__.pyi +5 -0
- gllm_datastore/data_store/redis/data_store.pyi +153 -0
- gllm_datastore/data_store/redis/fulltext.pyi +128 -0
- gllm_datastore/data_store/redis/query.pyi +428 -0
- gllm_datastore/data_store/redis/query_translator.pyi +37 -0
- gllm_datastore/data_store/redis/vector.pyi +131 -0
- gllm_datastore/data_store/sql/__init__.pyi +4 -0
- gllm_datastore/data_store/sql/constants.pyi +5 -0
- gllm_datastore/data_store/sql/data_store.pyi +201 -0
- gllm_datastore/data_store/sql/fulltext.pyi +164 -0
- gllm_datastore/data_store/sql/query.pyi +81 -0
- gllm_datastore/data_store/sql/query_translator.pyi +51 -0
- gllm_datastore/data_store/sql/schema.pyi +16 -0
- gllm_datastore/encryptor/__init__.pyi +4 -0
- gllm_datastore/encryptor/aes_gcm_encryptor.pyi +45 -0
- gllm_datastore/encryptor/capability/__init__.pyi +3 -0
- gllm_datastore/encryptor/capability/mixin.pyi +32 -0
- gllm_datastore/encryptor/encryptor.pyi +52 -0
- gllm_datastore/encryptor/key_ring/__init__.pyi +3 -0
- gllm_datastore/encryptor/key_ring/in_memory_key_ring.pyi +52 -0
- gllm_datastore/encryptor/key_ring/key_ring.pyi +45 -0
- gllm_datastore/encryptor/key_rotating_encryptor.pyi +60 -0
- gllm_datastore/graph_data_store/__init__.pyi +6 -0
- gllm_datastore/graph_data_store/graph_data_store.pyi +151 -0
- gllm_datastore/graph_data_store/graph_rag_data_store.pyi +29 -0
- gllm_datastore/graph_data_store/light_rag_data_store.pyi +93 -0
- gllm_datastore/graph_data_store/light_rag_postgres_data_store.pyi +96 -0
- gllm_datastore/graph_data_store/llama_index_graph_rag_data_store.pyi +49 -0
- gllm_datastore/graph_data_store/llama_index_neo4j_graph_rag_data_store.pyi +78 -0
- gllm_datastore/graph_data_store/mixins/__init__.pyi +3 -0
- gllm_datastore/graph_data_store/mixins/agentic_graph_tools_mixin.pyi +175 -0
- gllm_datastore/graph_data_store/nebula_graph_data_store.pyi +206 -0
- gllm_datastore/graph_data_store/neo4j_graph_data_store.pyi +182 -0
- gllm_datastore/graph_data_store/schema.pyi +27 -0
- gllm_datastore/graph_data_store/utils/__init__.pyi +6 -0
- gllm_datastore/graph_data_store/utils/constants.pyi +21 -0
- gllm_datastore/graph_data_store/utils/light_rag_em_invoker_adapter.pyi +56 -0
- gllm_datastore/graph_data_store/utils/light_rag_lm_invoker_adapter.pyi +43 -0
- gllm_datastore/graph_data_store/utils/llama_index_em_invoker_adapter.pyi +45 -0
- gllm_datastore/graph_data_store/utils/llama_index_lm_invoker_adapter.pyi +169 -0
- gllm_datastore/signature/__init__.pyi +0 -0
- gllm_datastore/signature/webhook_signature.pyi +31 -0
- gllm_datastore/sql_data_store/__init__.pyi +4 -0
- gllm_datastore/sql_data_store/adapter/__init__.pyi +0 -0
- gllm_datastore/sql_data_store/adapter/sqlalchemy_adapter.pyi +38 -0
- gllm_datastore/sql_data_store/constants.pyi +6 -0
- gllm_datastore/sql_data_store/sql_data_store.pyi +86 -0
- gllm_datastore/sql_data_store/sqlalchemy_sql_data_store.pyi +216 -0
- gllm_datastore/sql_data_store/types.pyi +31 -0
- gllm_datastore/utils/__init__.pyi +6 -0
- gllm_datastore/utils/converter.pyi +51 -0
- gllm_datastore/utils/dict.pyi +21 -0
- gllm_datastore/utils/ttl.pyi +25 -0
- gllm_datastore/utils/types.pyi +32 -0
- gllm_datastore/vector_data_store/__init__.pyi +6 -0
- gllm_datastore/vector_data_store/chroma_vector_data_store.pyi +259 -0
- gllm_datastore/vector_data_store/elasticsearch_vector_data_store.pyi +357 -0
- gllm_datastore/vector_data_store/in_memory_vector_data_store.pyi +179 -0
- gllm_datastore/vector_data_store/mixin/__init__.pyi +0 -0
- gllm_datastore/vector_data_store/mixin/cache_compatible_mixin.pyi +145 -0
- gllm_datastore/vector_data_store/redis_vector_data_store.pyi +191 -0
- gllm_datastore/vector_data_store/vector_data_store.pyi +146 -0
- gllm_datastore.build/.gitignore +1 -0
- gllm_datastore.cpython-312-darwin.so +0 -0
- gllm_datastore.pyi +178 -0
- gllm_datastore_binary-0.5.50.dist-info/METADATA +185 -0
- gllm_datastore_binary-0.5.50.dist-info/RECORD +137 -0
- gllm_datastore_binary-0.5.50.dist-info/WHEEL +5 -0
- gllm_datastore_binary-0.5.50.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from gllm_datastore.graph_data_store.utils.constants import LightRAGConstants as LightRAGConstants, LightRAGKeys as LightRAGKeys
|
|
2
|
+
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
class LightRAGLMInvokerAdapter:
|
|
6
|
+
"""LMInvoker adapter for the LightRAG module.
|
|
7
|
+
|
|
8
|
+
This adapter is used to adapt the LMInvoker interface to the LightRAG module.
|
|
9
|
+
It handles the conversion between different prompt formats and manages
|
|
10
|
+
asynchronous invocation in a way that's compatible with nested event loops.
|
|
11
|
+
"""
|
|
12
|
+
def __init__(self, lm_invoker: BaseLMInvoker) -> None:
|
|
13
|
+
"""Initialize the LightRAGLMInvokerAdapter.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
lm_invoker (BaseLMInvoker): The LM invoker to use.
|
|
17
|
+
"""
|
|
18
|
+
def __deepcopy__(self, memo: dict) -> LightRAGLMInvokerAdapter:
|
|
19
|
+
"""Custom deepcopy implementation to handle non-serializable objects.
|
|
20
|
+
|
|
21
|
+
This method is called when copy.deepcopy() is invoked on this object.
|
|
22
|
+
We create a new instance without deep-copying the invoker object
|
|
23
|
+
which may contain non-serializable components.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
memo (dict): Memoization dictionary for deepcopy process
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
LightRAGLMInvokerAdapter: A new instance with the same invoker reference
|
|
30
|
+
"""
|
|
31
|
+
async def __call__(self, prompt: str, system_prompt: str | None = None, history_messages: list[dict[str, Any]] | None = None, **kwargs: Any) -> str:
|
|
32
|
+
"""Make the adapter callable for compatibility with LightRAG.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
prompt (str): The prompt to invoke the LM invoker with.
|
|
36
|
+
system_prompt (str | None, optional): The system prompt to format in string format. Defaults to None.
|
|
37
|
+
history_messages (list[dict[str, Any]] | None, optional): The history messages to format in OpenAI format.
|
|
38
|
+
Defaults to None.
|
|
39
|
+
**kwargs (Any): Additional keyword arguments for the LM invoker.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
str: The response from the LM invoker.
|
|
43
|
+
"""
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from gllm_inference.em_invoker.em_invoker import BaseEMInvoker
|
|
2
|
+
from llama_index.core.base.embeddings.base import BaseEmbedding
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
class LlamaIndexEMInvokerAdapter(BaseEmbedding):
|
|
6
|
+
"""Minimal EMInvoker adapter for the LlamaIndex BaseEmbedding interface.
|
|
7
|
+
|
|
8
|
+
This adapter wraps a BaseEMInvoker instance to provide compatibility with
|
|
9
|
+
LlamaIndex's BaseEmbedding interface. Embeddings from the underlying invoker
|
|
10
|
+
are returned directly without any conversion, assuming they are already in
|
|
11
|
+
the correct format (list of floats).
|
|
12
|
+
|
|
13
|
+
The adapter provides both synchronous and asynchronous methods for:
|
|
14
|
+
- Query embeddings: Single text embedding for search queries
|
|
15
|
+
- Text embeddings: Single or batch text embedding for documents
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
em_invoker (BaseEMInvoker): The underlying EM invoker instance.
|
|
19
|
+
model_name (str): The name of the embedding model (inherited from invoker).
|
|
20
|
+
embed_batch_size (int): The batch size for batch embedding operations.
|
|
21
|
+
|
|
22
|
+
Note:
|
|
23
|
+
Sync methods (_get_*) use asyncio.run internally to call async methods.
|
|
24
|
+
The implementation uses nest_asyncio to handle nested event loops if needed.
|
|
25
|
+
"""
|
|
26
|
+
em_invoker: BaseEMInvoker
|
|
27
|
+
def __init__(self, em_invoker: BaseEMInvoker, embed_batch_size: int = ..., **kwargs: Any) -> None:
|
|
28
|
+
"""Initialize the LlamaIndexEMInvokerAdapter.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
em_invoker (BaseEMInvoker): The EM invoker to wrap.
|
|
32
|
+
embed_batch_size (int, optional): The batch size for embedding operations.
|
|
33
|
+
Defaults to DEFAULT_EMBED_BATCH_SIZE from LlamaIndex.
|
|
34
|
+
**kwargs (Any): Additional keyword arguments passed to BaseEmbedding (e.g.,
|
|
35
|
+
callback_manager).
|
|
36
|
+
"""
|
|
37
|
+
@classmethod
|
|
38
|
+
def class_name(cls) -> str:
|
|
39
|
+
'''Get the class name (implements BaseEmbedding.class_name).
|
|
40
|
+
|
|
41
|
+
This is used by LlamaIndex for serialization and debugging.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
str: The class name "LlamaIndexEMInvokerAdapter".
|
|
45
|
+
'''
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from gllm_inference.lm_invoker.lm_invoker import BaseLMInvoker
|
|
3
|
+
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata
|
|
4
|
+
from llama_index.core.llms import LLM
|
|
5
|
+
from typing import Any, AsyncGenerator, Sequence
|
|
6
|
+
|
|
7
|
+
ROLE_MAPPING: Incomplete
|
|
8
|
+
|
|
9
|
+
class LlamaIndexLMInvokerAdapter(LLM):
|
|
10
|
+
"""Minimal LMInvoker adapter for the LlamaIndex LLM interface.
|
|
11
|
+
|
|
12
|
+
This adapter wraps a BaseLMInvoker instance to provide compatibility with
|
|
13
|
+
LlamaIndex's LLM interface. It handles conversion between GLLM message formats
|
|
14
|
+
and LlamaIndex ChatMessage formats.
|
|
15
|
+
|
|
16
|
+
Only chat functionality is implemented. Completion and streaming methods raise
|
|
17
|
+
NotImplementedError to keep the implementation minimal.
|
|
18
|
+
|
|
19
|
+
Attributes:
|
|
20
|
+
lm_invoker (BaseLMInvoker): The underlying LM invoker instance.
|
|
21
|
+
|
|
22
|
+
Note:
|
|
23
|
+
Message roles are converted using the ROLE_MAPPING constant, which maps
|
|
24
|
+
all LlamaIndex message roles (SYSTEM, DEVELOPER, USER, ASSISTANT, TOOL,
|
|
25
|
+
FUNCTION, CHATBOT, MODEL) to GLLM MessageRole values.
|
|
26
|
+
"""
|
|
27
|
+
lm_invoker: BaseLMInvoker
|
|
28
|
+
def __init__(self, lm_invoker: BaseLMInvoker, **kwargs: Any) -> None:
|
|
29
|
+
"""Initialize the LlamaIndexLMInvokerAdapter.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
lm_invoker (BaseLMInvoker): The LM invoker to wrap.
|
|
33
|
+
**kwargs (Any): Additional keyword arguments.
|
|
34
|
+
"""
|
|
35
|
+
@property
|
|
36
|
+
def metadata(self) -> LLMMetadata:
|
|
37
|
+
"""Get metadata about the language model.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
LLMMetadata: Metadata containing model information.
|
|
41
|
+
"""
|
|
42
|
+
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
|
|
43
|
+
"""Synchronous chat endpoint (implements LlamaIndex LLM.chat).
|
|
44
|
+
|
|
45
|
+
This is a synchronous wrapper around the async achat() method.
|
|
46
|
+
It handles both scenarios: when called from within an event loop and when
|
|
47
|
+
called from synchronous code.
|
|
48
|
+
|
|
49
|
+
Converts LlamaIndex ChatMessage objects to GLLM Message format, invokes
|
|
50
|
+
the underlying LM invoker, and converts the response back to ChatResponse.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
messages (Sequence[ChatMessage]): The chat messages in LlamaIndex format.
|
|
54
|
+
**kwargs (Any): Additional keyword arguments. Supports:
|
|
55
|
+
- hyperparameters (dict, optional): Model hyperparameters like
|
|
56
|
+
temperature, max_tokens, etc.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
ChatResponse: The chat response in LlamaIndex format with message content,
|
|
60
|
+
role, and optional metadata (token usage, finish details).
|
|
61
|
+
"""
|
|
62
|
+
def complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponse:
|
|
63
|
+
"""Synchronous completion endpoint.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
prompt (str): The prompt string.
|
|
67
|
+
formatted (bool, optional): Whether the prompt is already formatted. Defaults to False.
|
|
68
|
+
**kwargs (Any): Additional keyword arguments.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
CompletionResponse: The completion response.
|
|
72
|
+
|
|
73
|
+
Raises:
|
|
74
|
+
NotImplementedError: Always raises this exception.
|
|
75
|
+
"""
|
|
76
|
+
def stream_chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> AsyncGenerator[ChatResponse, None]:
|
|
77
|
+
"""Streaming chat endpoint.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
messages (Sequence[ChatMessage]): The chat messages.
|
|
81
|
+
**kwargs (Any): Additional keyword arguments.
|
|
82
|
+
|
|
83
|
+
Yields:
|
|
84
|
+
ChatResponse: Streaming chat responses.
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
NotImplementedError: Always raises this exception.
|
|
88
|
+
"""
|
|
89
|
+
def stream_complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> AsyncGenerator[CompletionResponse, None]:
|
|
90
|
+
"""Streaming completion endpoint.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
prompt (str): The prompt string.
|
|
94
|
+
formatted (bool, optional): Whether the prompt is already formatted. Defaults to False.
|
|
95
|
+
**kwargs (Any): Additional keyword arguments.
|
|
96
|
+
|
|
97
|
+
Yields:
|
|
98
|
+
CompletionResponse: Streaming completion responses.
|
|
99
|
+
|
|
100
|
+
Raises:
|
|
101
|
+
NotImplementedError: Always raises this exception.
|
|
102
|
+
"""
|
|
103
|
+
async def achat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
|
|
104
|
+
"""Asynchronous chat endpoint (implements LlamaIndex LLM.achat).
|
|
105
|
+
|
|
106
|
+
Converts LlamaIndex ChatMessage objects to GLLM Message format, invokes
|
|
107
|
+
the underlying LM invoker asynchronously, and converts the response back
|
|
108
|
+
to ChatResponse.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
messages (Sequence[ChatMessage]): The chat messages in LlamaIndex format.
|
|
112
|
+
**kwargs (Any): Additional keyword arguments. Supports:
|
|
113
|
+
- hyperparameters (dict, optional): Model hyperparameters like
|
|
114
|
+
temperature, max_tokens, etc.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
ChatResponse: The chat response in LlamaIndex format with message content,
|
|
118
|
+
role, and optional metadata (token usage, finish details).
|
|
119
|
+
"""
|
|
120
|
+
async def acomplete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponse:
|
|
121
|
+
"""Asynchronous completion endpoint.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
prompt (str): The prompt string.
|
|
125
|
+
formatted (bool, optional): Whether the prompt is already formatted. Defaults to False.
|
|
126
|
+
**kwargs (Any): Additional keyword arguments.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
CompletionResponse: The completion response.
|
|
130
|
+
|
|
131
|
+
Raises:
|
|
132
|
+
NotImplementedError: Always raises this exception.
|
|
133
|
+
"""
|
|
134
|
+
def astream_chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> AsyncGenerator[ChatResponse, None]:
|
|
135
|
+
"""Asynchronous streaming chat endpoint.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
messages (Sequence[ChatMessage]): The chat messages.
|
|
139
|
+
**kwargs (Any): Additional keyword arguments.
|
|
140
|
+
|
|
141
|
+
Yields:
|
|
142
|
+
ChatResponse: Streaming chat responses.
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
NotImplementedError: Always raises this exception.
|
|
146
|
+
"""
|
|
147
|
+
def astream_complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> AsyncGenerator[CompletionResponse, None]:
|
|
148
|
+
"""Asynchronous streaming completion endpoint.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
prompt (str): The prompt string.
|
|
152
|
+
formatted (bool, optional): Whether the prompt is already formatted. Defaults to False.
|
|
153
|
+
**kwargs (Any): Additional keyword arguments.
|
|
154
|
+
|
|
155
|
+
Yields:
|
|
156
|
+
CompletionResponse: Streaming completion responses.
|
|
157
|
+
|
|
158
|
+
Raises:
|
|
159
|
+
NotImplementedError: Always raises this exception.
|
|
160
|
+
"""
|
|
161
|
+
@classmethod
|
|
162
|
+
def class_name(cls) -> str:
|
|
163
|
+
'''Get the class name (implements LLM.class_name).
|
|
164
|
+
|
|
165
|
+
This is used by LlamaIndex for serialization and debugging.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
str: The class name "LlamaIndexLMInvokerAdapter".
|
|
169
|
+
'''
|
|
File without changes
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
def generate_webhook_signature_with_timestamp(payload: str | bytes, secret: str, timestamp: str) -> str:
|
|
2
|
+
"""Generate HMAC SHA-256 signature with timestamp for webhook payload.
|
|
3
|
+
|
|
4
|
+
This function creates a signature that includes a timestamp, which helps prevent
|
|
5
|
+
replay attacks. The timestamp is prepended to the payload before signing.
|
|
6
|
+
|
|
7
|
+
Args:
|
|
8
|
+
payload (str | bytes): The webhook payload to sign.
|
|
9
|
+
secret (str): The shared secret key used for signing.
|
|
10
|
+
timestamp (str): Unix timestamp (in seconds) as a string.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
str: The hexadecimal representation of the HMAC SHA-256 signature.
|
|
14
|
+
"""
|
|
15
|
+
def verify_webhook_signature_with_timestamp(payload: str | bytes, secret: str, received_signature: str, timestamp: str, tolerance_seconds: int = 300) -> bool:
|
|
16
|
+
"""Verify HMAC SHA-256 signature with timestamp for webhook payload.
|
|
17
|
+
|
|
18
|
+
This function verifies the signature and checks that the timestamp is within
|
|
19
|
+
the acceptable tolerance window. This prevents replay attacks where an attacker
|
|
20
|
+
could intercept and resend old webhook requests.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
payload (str | bytes): The webhook payload to verify.
|
|
24
|
+
secret (str): The shared secret key used for verification.
|
|
25
|
+
received_signature (str): The signature received in the webhook request.
|
|
26
|
+
timestamp (str): The timestamp received in the webhook request.
|
|
27
|
+
tolerance_seconds (int): Maximum age of the webhook in seconds. Defaults to 300 (5 minutes).
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
bool: True if the signature is valid and timestamp is within tolerance, False otherwise.
|
|
31
|
+
"""
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
from gllm_datastore.sql_data_store.sqlalchemy_sql_data_store import SQLAlchemySQLDataStore as SQLAlchemySQLDataStore
|
|
2
|
+
from gllm_datastore.sql_data_store.types import QueryFilter as QueryFilter, QueryOptions as QueryOptions
|
|
3
|
+
|
|
4
|
+
__all__ = ['SQLAlchemySQLDataStore', 'QueryFilter', 'QueryOptions']
|
|
File without changes
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from sqlalchemy.engine import Engine
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
class SQLAlchemyAdapter:
|
|
6
|
+
"""Initializes a database engine and session using SQLAlchemy.
|
|
7
|
+
|
|
8
|
+
Provides a scoped session and a base query property for interacting with the database.
|
|
9
|
+
|
|
10
|
+
Attributes:
|
|
11
|
+
engine (Engine): The SQLAlchemy engine object.
|
|
12
|
+
db (Session): The SQLAlchemy session object.
|
|
13
|
+
base (DeclarativeMeta): The SQLAlchemy declarative base object.
|
|
14
|
+
"""
|
|
15
|
+
engine: Incomplete
|
|
16
|
+
db: Incomplete
|
|
17
|
+
base: Incomplete
|
|
18
|
+
@classmethod
|
|
19
|
+
def initialize(cls, engine_or_url: Engine | str, pool_size: int = 10, max_overflow: int = 10, autocommit: bool = False, autoflush: bool = True, **kwargs: Any):
|
|
20
|
+
"""Creates a new database engine and session.
|
|
21
|
+
|
|
22
|
+
Must provide either an engine or a database URL.
|
|
23
|
+
If a database URL is provided, the engine will be created with the specified configurations:
|
|
24
|
+
1. For SQLite, only the pool size can be specified, since the engine will use SingletonThreadPool which
|
|
25
|
+
doesn't support max_overflow.
|
|
26
|
+
2. For other databases, the pool size and max overflow can be specified.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
engine_or_url (Engine | str): Sqlalchemy engine object or database URL.
|
|
30
|
+
pool_size (int, optional): The size of the database connections to be maintained. Defaults to 10.
|
|
31
|
+
max_overflow (int, optional): The maximum overflow size of the pool. Defaults to 10.
|
|
32
|
+
If the engine_or_url is a SQLite URL, this parameter is ignored.
|
|
33
|
+
autocommit (bool, optional): If True, all changes to the database are committed immediately.
|
|
34
|
+
Defaults to False.
|
|
35
|
+
autoflush (bool, optional): If True, all changes to the database are flushed immediately. Defaults to True.
|
|
36
|
+
**kwargs (Any): Additional keyword arguments to be passed to the SQLAlchemy create_engine function.
|
|
37
|
+
These are only used when engine_or_url is a string URL.
|
|
38
|
+
"""
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from gllm_datastore.sql_data_store.types import QueryFilter as QueryFilter, QueryOptions as QueryOptions
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
class BaseSQLDataStore(ABC):
|
|
7
|
+
"""Abstract base class for SQL data stores.
|
|
8
|
+
|
|
9
|
+
This class defines the interface for all SQL data store implementations.
|
|
10
|
+
Subclasses must implement the abstract methods.
|
|
11
|
+
"""
|
|
12
|
+
@abstractmethod
|
|
13
|
+
async def query(self, query: str, params: dict[str, Any] | None = None) -> pd.DataFrame:
|
|
14
|
+
"""Executes raw SQL query.
|
|
15
|
+
|
|
16
|
+
This method must be implemented by subclasses to execute a raw SQL query.
|
|
17
|
+
Use this method for raw queries, complex queries, or for executing a query generated by LLM.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
query (str): The query string to execute.
|
|
21
|
+
params (dict[str, Any] | None, optional): Parameters to bind to the query. Defaults to None.
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
pd.DataFrame: A DataFrame of query results.
|
|
25
|
+
|
|
26
|
+
Raises:
|
|
27
|
+
NotImplementedError: If the method is not implemented.
|
|
28
|
+
"""
|
|
29
|
+
@abstractmethod
|
|
30
|
+
def create(self, **kwargs: Any) -> None:
|
|
31
|
+
"""Create data using available information in kwargs.
|
|
32
|
+
|
|
33
|
+
This method must be implemented by subclasses to create data in the data store.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
**kwargs (Any): A dictionary of information to create data.
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
NotImplementedError: If the method is not implemented.
|
|
40
|
+
"""
|
|
41
|
+
@abstractmethod
|
|
42
|
+
def read(self, filters: QueryFilter | None = None, options: QueryOptions | None = None, **kwargs: Any) -> pd.DataFrame:
|
|
43
|
+
"""Read data from the data store using optional filters and options.
|
|
44
|
+
|
|
45
|
+
This method must be implemented by subclasses to read data from the data store.
|
|
46
|
+
Use this method for simple queries with filters and options.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
filters (QueryFilter | None, optional): Filters to apply to the query. Defaults to None.
|
|
50
|
+
options (QueryOptions | None, optional): Options to apply to the query. Defaults to None.
|
|
51
|
+
**kwargs (Any): A dictionary of additional information to support the read method.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
pd.DataFrame: A DataFrame of query results.
|
|
55
|
+
|
|
56
|
+
Raises:
|
|
57
|
+
NotImplementedError: If the method is not implemented.
|
|
58
|
+
"""
|
|
59
|
+
@abstractmethod
|
|
60
|
+
def update(self, update_values: dict[str, Any], filters: QueryFilter | None = None, **kwargs: Any) -> None:
|
|
61
|
+
"""Update data in the data store using optional filters and update values.
|
|
62
|
+
|
|
63
|
+
This method must be implemented by subclasses to update data in the data store.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
update_values (dict[str, Any]): Values to update in the data store.
|
|
67
|
+
filters (QueryFilter | None, optional): Filters to apply to the query. Defaults to None.
|
|
68
|
+
**kwargs (Any): A dictionary of additional information to support the update method.
|
|
69
|
+
|
|
70
|
+
Raises:
|
|
71
|
+
NotImplementedError: If the method is not implemented.
|
|
72
|
+
"""
|
|
73
|
+
@abstractmethod
|
|
74
|
+
def delete(self, filters: QueryFilter | None = None, allow_delete_all: bool = False, **kwargs: Any) -> None:
|
|
75
|
+
"""Delete data in the data store using filters.
|
|
76
|
+
|
|
77
|
+
This method must be implemented by subclasses to delete data in the data store.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
filters (QueryFilter | None, optional): Filters to apply to the query. Defaults to None.
|
|
81
|
+
allow_delete_all (bool, optional): A flag to allow deleting all data. Defaults to False.
|
|
82
|
+
**kwargs (Any): A dictionary of additional information to support the delete method.
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
NotImplementedError: If the method is not implemented.
|
|
86
|
+
"""
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
from _typeshed import Incomplete
|
|
3
|
+
from concurrent.futures import Future as Future
|
|
4
|
+
from gllm_datastore.encryptor.encryptor import BaseEncryptor as BaseEncryptor
|
|
5
|
+
from gllm_datastore.sql_data_store.adapter.sqlalchemy_adapter import SQLAlchemyAdapter as SQLAlchemyAdapter
|
|
6
|
+
from gllm_datastore.sql_data_store.constants import CREATE_ERROR_MSG as CREATE_ERROR_MSG, DELETE_ERROR_MSG as DELETE_ERROR_MSG, QUERY_ERROR_MSG as QUERY_ERROR_MSG, READ_ERROR_MSG as READ_ERROR_MSG, UNEXPECTED_ERROR_MSG as UNEXPECTED_ERROR_MSG, UPDATE_ERROR_MSG as UPDATE_ERROR_MSG
|
|
7
|
+
from gllm_datastore.sql_data_store.sql_data_store import BaseSQLDataStore as BaseSQLDataStore
|
|
8
|
+
from gllm_datastore.sql_data_store.types import QueryFilter as QueryFilter, QueryOptions as QueryOptions
|
|
9
|
+
from sqlalchemy import Engine
|
|
10
|
+
from sqlalchemy.orm import DeclarativeBase
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
DEFAULT_POOL_SIZE: int
|
|
14
|
+
DEFAULT_MAX_OVERFLOW: int
|
|
15
|
+
DEFAULT_MAX_WORKERS: int
|
|
16
|
+
DEFAULT_BATCH_SIZE: int
|
|
17
|
+
|
|
18
|
+
class SQLAlchemySQLDataStore(BaseSQLDataStore):
|
|
19
|
+
"""Data store for interacting with SQLAlchemy.
|
|
20
|
+
|
|
21
|
+
This class provides methods to interact with a SQL database using SQLAlchemy.
|
|
22
|
+
|
|
23
|
+
Attributes:
|
|
24
|
+
db (Session): The SQLAlchemy session object.
|
|
25
|
+
engine (Engine): The SQLAlchemy engine object.
|
|
26
|
+
logger (Logger): The logger object.
|
|
27
|
+
encryptor (BaseEncryptor | None): The encryptor object to use for encryption.
|
|
28
|
+
encrypted_table_fields (list[str]): The table.column fields to encrypt.
|
|
29
|
+
"""
|
|
30
|
+
db: Incomplete
|
|
31
|
+
engine: Incomplete
|
|
32
|
+
logger: Incomplete
|
|
33
|
+
encryptor: Incomplete
|
|
34
|
+
encrypted_table_fields: Incomplete
|
|
35
|
+
def __init__(self, engine_or_url: Engine | str, pool_size: int = ..., max_overflow: int = ..., autoflush: bool = True, encryptor: BaseEncryptor | None = None, encrypted_table_fields: list[str] | None = None, **kwargs: Any) -> None:
|
|
36
|
+
'''Initialize SQLAlchemySQLDataStore class.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
engine_or_url (Engine | str): SQLAlchemy engine object or database URL.
|
|
40
|
+
pool_size (int, optional): The size of the database connections to be maintained. Defaults to 10.
|
|
41
|
+
max_overflow (int, optional): The maximum overflow size of the pool. Defaults to 10.
|
|
42
|
+
This parameter is ignored for SQLite.
|
|
43
|
+
autoflush (bool, optional): If True, all changes to the database are flushed immediately. Defaults to True.
|
|
44
|
+
encryptor (BaseEncryptor | None, optional): The encryptor object to use for encryption.
|
|
45
|
+
Should comply with the BaseEncryptor interface. Defaults to None.
|
|
46
|
+
encrypted_table_fields (list[str] | None, optional): The table.column fields to encrypt.
|
|
47
|
+
Format: ["table_name.column_name", "messages.content", "users.email"].
|
|
48
|
+
Defaults to None, in which case no fields will be encrypted.
|
|
49
|
+
**kwargs (Any): Additional keyword arguments to support the initialization of the SQLAlchemy adapter.
|
|
50
|
+
|
|
51
|
+
Raises:
|
|
52
|
+
ValueError: If the database adapter is not initialized.
|
|
53
|
+
'''
|
|
54
|
+
async def query(self, query: str, params: dict[str, Any] | None = None) -> pd.DataFrame:
|
|
55
|
+
'''Executes raw SQL queries.
|
|
56
|
+
|
|
57
|
+
Preferred for complex queries, when working with legacy schemas without ORM models,
|
|
58
|
+
or when using an LLM to generate your SQL queries.
|
|
59
|
+
Use this method when you need advanced SQL operations not supported by read().
|
|
60
|
+
|
|
61
|
+
For raw queries, we can\'t determine table context automatically. Therefore, no decryption is performed.
|
|
62
|
+
Users should handle decryption manually if needed for raw queries.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
query (str): The query string with optional :param style parameters.
|
|
66
|
+
params (dict[str, Any] | None, optional): Parameters to bind to the query. Defaults to None.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
pd.DataFrame: The result of the query.
|
|
70
|
+
|
|
71
|
+
Note:
|
|
72
|
+
Using string parameters directly in queries is unsafe and vulnerable to SQL injection.
|
|
73
|
+
Therefore, please avoid doing as follows as they\'re unsafe:
|
|
74
|
+
```
|
|
75
|
+
name = "O\'Connor"
|
|
76
|
+
query = f"SELECT * FROM users WHERE last_name = \'{name}\'"
|
|
77
|
+
```
|
|
78
|
+
or
|
|
79
|
+
```
|
|
80
|
+
query = "SELECT * FROM users WHERE last_name = \'" + name + "\'"
|
|
81
|
+
```
|
|
82
|
+
Instead, please use parameterized queries with :param style notation as follows:
|
|
83
|
+
```
|
|
84
|
+
query = "SELECT * FROM users WHERE last_name = :last_name"
|
|
85
|
+
params = {"last_name": "O\'Connor"}
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
RuntimeError: If the query fails.
|
|
90
|
+
RuntimeError: If an unexpected error occurs.
|
|
91
|
+
'''
|
|
92
|
+
def create(self, model: DeclarativeBase | list[DeclarativeBase]) -> None:
|
|
93
|
+
'''Inserts data into the database using SQLAlchemy ORM.
|
|
94
|
+
|
|
95
|
+
This method provides a structured way to insert data using ORM models.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
model (DeclarativeBase | list[DeclarativeBase]): An instance or list of instances of SQLAlchemy
|
|
99
|
+
model to be inserted.
|
|
100
|
+
|
|
101
|
+
Example:
|
|
102
|
+
To insert a row into a table:
|
|
103
|
+
```
|
|
104
|
+
data_store.create(MyModel(column1="value1", column2="value2"))
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
To insert multiple rows:
|
|
108
|
+
```
|
|
109
|
+
data_store.create([
|
|
110
|
+
MyModel(column1="value1", column2="value2"),
|
|
111
|
+
MyModel(column1="value3", column2="value4")
|
|
112
|
+
])
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
Raises:
|
|
116
|
+
RuntimeError: If the insertion fails.
|
|
117
|
+
RuntimeError: If an unexpected error occurs.
|
|
118
|
+
'''
|
|
119
|
+
def read(self, model_class: type[DeclarativeBase], filters: QueryFilter | None = None, options: QueryOptions | None = None) -> pd.DataFrame:
|
|
120
|
+
'''Reads data from the database using SQLAlchemy ORM with a structured, type-safe interface.
|
|
121
|
+
|
|
122
|
+
This method provides a high-level interface for querying data using ORM models. It supports
|
|
123
|
+
filtering, column selection, ordering, and limiting results through a type-safe interface.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
model_class (Type[DeclarativeBase]): The SQLAlchemy model class to query.
|
|
127
|
+
filters (QueryFilter | None, optional): Optional query filters containing column-value pairs
|
|
128
|
+
to filter the results. Defaults to None.
|
|
129
|
+
options (QueryOptions | None, optional): Optional query configuration including:
|
|
130
|
+
- columns: Specific columns to select
|
|
131
|
+
- order_by: Column to sort by
|
|
132
|
+
- order_desc: Sort order (ascending/descending)
|
|
133
|
+
- limit: Maximum number of results
|
|
134
|
+
Defaults to None.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
pd.DataFrame: A DataFrame containing the query results.
|
|
138
|
+
|
|
139
|
+
Example:
|
|
140
|
+
```python
|
|
141
|
+
data_store.read(
|
|
142
|
+
Message,
|
|
143
|
+
filters=QueryFilter(conditions={"conversation_id": "123"}),
|
|
144
|
+
options=QueryOptions(
|
|
145
|
+
columns=["role", "content"],
|
|
146
|
+
order_by="created_at",
|
|
147
|
+
order_desc=True,
|
|
148
|
+
limit=10
|
|
149
|
+
)
|
|
150
|
+
)
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
Raises:
|
|
154
|
+
RuntimeError: If the read operation fails.
|
|
155
|
+
RuntimeError: If an unexpected error occurs.
|
|
156
|
+
'''
|
|
157
|
+
def update(self, model_class: type[DeclarativeBase], update_values: dict[str, Any], filters: QueryFilter | None = None, **kwargs: Any) -> None:
|
|
158
|
+
'''Updates data in the database using SQLAlchemy ORM.
|
|
159
|
+
|
|
160
|
+
This method provides a structured way to update data using ORM models.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
model_class (Type[DeclarativeBase]): The SQLAlchemy model class to update.
|
|
164
|
+
update_values (dict[str, Any]): Values to update.
|
|
165
|
+
filters (QueryFilter | None, optional): Filters to apply to the query. Defaults to None.
|
|
166
|
+
**kwargs (Any): Additional keyword arguments to support the update method.
|
|
167
|
+
|
|
168
|
+
Example:
|
|
169
|
+
To update a row in a table:
|
|
170
|
+
```
|
|
171
|
+
data_store.update(
|
|
172
|
+
MyModel,
|
|
173
|
+
update_values={"column1": "new_value"}
|
|
174
|
+
filters=QueryFilter(conditions={"id": 1}),
|
|
175
|
+
)
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
Note:
|
|
179
|
+
Encrypted fields cannot be used in update conditions due to non-deterministic encryption.
|
|
180
|
+
Use non-encrypted fields (like \'id\') for update conditions.
|
|
181
|
+
|
|
182
|
+
Raises:
|
|
183
|
+
ValueError: If encrypted fields are used in update conditions.
|
|
184
|
+
RuntimeError: If the update operation fails.
|
|
185
|
+
RuntimeError: If an unexpected error occurs.
|
|
186
|
+
'''
|
|
187
|
+
def delete(self, model_class: type[DeclarativeBase], filters: QueryFilter | None = None, allow_delete_all: bool = False, **kwargs: Any) -> None:
|
|
188
|
+
'''Deletes data from the database using SQLAlchemy ORM.
|
|
189
|
+
|
|
190
|
+
This method provides a structured way to delete data using ORM models.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
model_class (Type[DeclarativeBase]): The SQLAlchemy model class to delete.
|
|
194
|
+
filters (QueryFilter | None, optional): Filters to apply to the query. Defaults to None.
|
|
195
|
+
allow_delete_all (bool, optional): If True, allows deletion of all records. Defaults to False.
|
|
196
|
+
**kwargs (Any): Additional keyword arguments to support the delete method.
|
|
197
|
+
|
|
198
|
+
Example:
|
|
199
|
+
To delete a row from a table:
|
|
200
|
+
```
|
|
201
|
+
data_store.delete(
|
|
202
|
+
MyModel,
|
|
203
|
+
filters=QueryFilter(conditions={"id": 1})
|
|
204
|
+
)
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
Note:
|
|
208
|
+
Encrypted fields cannot be used in delete conditions due to non-deterministic encryption.
|
|
209
|
+
Use non-encrypted fields (like \'id\') for deletion conditions.
|
|
210
|
+
|
|
211
|
+
Raises:
|
|
212
|
+
ValueError: If no filters are provided (to prevent accidental deletion of all records).
|
|
213
|
+
ValueError: If encrypted fields are used in delete conditions.
|
|
214
|
+
RuntimeError: If the delete operation fails.
|
|
215
|
+
RuntimeError: If an unexpected error occurs.
|
|
216
|
+
'''
|