kailash 0.6.3__py3-none-any.whl → 0.6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +3 -3
- kailash/api/custom_nodes_secure.py +3 -3
- kailash/api/gateway.py +1 -1
- kailash/api/studio.py +2 -3
- kailash/api/workflow_api.py +3 -4
- kailash/core/resilience/bulkhead.py +460 -0
- kailash/core/resilience/circuit_breaker.py +92 -10
- kailash/edge/discovery.py +86 -0
- kailash/mcp_server/__init__.py +309 -33
- kailash/mcp_server/advanced_features.py +1022 -0
- kailash/mcp_server/ai_registry_server.py +27 -2
- kailash/mcp_server/auth.py +789 -0
- kailash/mcp_server/client.py +645 -378
- kailash/mcp_server/discovery.py +1593 -0
- kailash/mcp_server/errors.py +673 -0
- kailash/mcp_server/oauth.py +1727 -0
- kailash/mcp_server/protocol.py +1126 -0
- kailash/mcp_server/registry_integration.py +587 -0
- kailash/mcp_server/server.py +1213 -98
- kailash/mcp_server/transports.py +1169 -0
- kailash/mcp_server/utils/__init__.py +6 -1
- kailash/mcp_server/utils/cache.py +250 -7
- kailash/middleware/auth/auth_manager.py +3 -3
- kailash/middleware/communication/api_gateway.py +2 -9
- kailash/middleware/communication/realtime.py +1 -1
- kailash/middleware/mcp/enhanced_server.py +1 -1
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/admin/audit_log.py +6 -6
- kailash/nodes/admin/permission_check.py +8 -8
- kailash/nodes/admin/role_management.py +32 -28
- kailash/nodes/admin/schema.sql +6 -1
- kailash/nodes/admin/schema_manager.py +13 -13
- kailash/nodes/admin/security_event.py +16 -20
- kailash/nodes/admin/tenant_isolation.py +3 -3
- kailash/nodes/admin/transaction_utils.py +3 -3
- kailash/nodes/admin/user_management.py +21 -22
- kailash/nodes/ai/a2a.py +11 -11
- kailash/nodes/ai/ai_providers.py +9 -12
- kailash/nodes/ai/embedding_generator.py +13 -14
- kailash/nodes/ai/intelligent_agent_orchestrator.py +19 -19
- kailash/nodes/ai/iterative_llm_agent.py +2 -2
- kailash/nodes/ai/llm_agent.py +210 -33
- kailash/nodes/ai/self_organizing.py +2 -2
- kailash/nodes/alerts/discord.py +4 -4
- kailash/nodes/api/graphql.py +6 -6
- kailash/nodes/api/http.py +12 -17
- kailash/nodes/api/rate_limiting.py +4 -4
- kailash/nodes/api/rest.py +15 -15
- kailash/nodes/auth/mfa.py +3 -4
- kailash/nodes/auth/risk_assessment.py +2 -2
- kailash/nodes/auth/session_management.py +5 -5
- kailash/nodes/auth/sso.py +143 -0
- kailash/nodes/base.py +6 -2
- kailash/nodes/base_async.py +16 -2
- kailash/nodes/base_with_acl.py +2 -2
- kailash/nodes/cache/__init__.py +9 -0
- kailash/nodes/cache/cache.py +1172 -0
- kailash/nodes/cache/cache_invalidation.py +870 -0
- kailash/nodes/cache/redis_pool_manager.py +595 -0
- kailash/nodes/code/async_python.py +2 -1
- kailash/nodes/code/python.py +196 -35
- kailash/nodes/compliance/data_retention.py +6 -6
- kailash/nodes/compliance/gdpr.py +5 -5
- kailash/nodes/data/__init__.py +10 -0
- kailash/nodes/data/optimistic_locking.py +906 -0
- kailash/nodes/data/readers.py +8 -8
- kailash/nodes/data/redis.py +349 -0
- kailash/nodes/data/sql.py +314 -3
- kailash/nodes/data/streaming.py +21 -0
- kailash/nodes/enterprise/__init__.py +8 -0
- kailash/nodes/enterprise/audit_logger.py +285 -0
- kailash/nodes/enterprise/batch_processor.py +22 -3
- kailash/nodes/enterprise/data_lineage.py +1 -1
- kailash/nodes/enterprise/mcp_executor.py +205 -0
- kailash/nodes/enterprise/service_discovery.py +150 -0
- kailash/nodes/enterprise/tenant_assignment.py +108 -0
- kailash/nodes/logic/async_operations.py +2 -2
- kailash/nodes/logic/convergence.py +1 -1
- kailash/nodes/logic/operations.py +1 -1
- kailash/nodes/monitoring/__init__.py +11 -1
- kailash/nodes/monitoring/health_check.py +456 -0
- kailash/nodes/monitoring/log_processor.py +817 -0
- kailash/nodes/monitoring/metrics_collector.py +627 -0
- kailash/nodes/monitoring/performance_benchmark.py +137 -11
- kailash/nodes/rag/advanced.py +7 -7
- kailash/nodes/rag/agentic.py +49 -2
- kailash/nodes/rag/conversational.py +3 -3
- kailash/nodes/rag/evaluation.py +3 -3
- kailash/nodes/rag/federated.py +3 -3
- kailash/nodes/rag/graph.py +3 -3
- kailash/nodes/rag/multimodal.py +3 -3
- kailash/nodes/rag/optimized.py +5 -5
- kailash/nodes/rag/privacy.py +3 -3
- kailash/nodes/rag/query_processing.py +6 -6
- kailash/nodes/rag/realtime.py +1 -1
- kailash/nodes/rag/registry.py +2 -6
- kailash/nodes/rag/router.py +1 -1
- kailash/nodes/rag/similarity.py +7 -7
- kailash/nodes/rag/strategies.py +4 -4
- kailash/nodes/security/abac_evaluator.py +6 -6
- kailash/nodes/security/behavior_analysis.py +5 -6
- kailash/nodes/security/credential_manager.py +1 -1
- kailash/nodes/security/rotating_credentials.py +11 -11
- kailash/nodes/security/threat_detection.py +8 -8
- kailash/nodes/testing/credential_testing.py +2 -2
- kailash/nodes/transform/processors.py +5 -5
- kailash/runtime/local.py +162 -14
- kailash/runtime/parameter_injection.py +425 -0
- kailash/runtime/parameter_injector.py +657 -0
- kailash/runtime/testing.py +2 -2
- kailash/testing/fixtures.py +2 -2
- kailash/workflow/builder.py +99 -18
- kailash/workflow/builder_improvements.py +207 -0
- kailash/workflow/input_handling.py +170 -0
- {kailash-0.6.3.dist-info → kailash-0.6.4.dist-info}/METADATA +22 -9
- {kailash-0.6.3.dist-info → kailash-0.6.4.dist-info}/RECORD +120 -94
- {kailash-0.6.3.dist-info → kailash-0.6.4.dist-info}/WHEEL +0 -0
- {kailash-0.6.3.dist-info → kailash-0.6.4.dist-info}/entry_points.txt +0 -0
- {kailash-0.6.3.dist-info → kailash-0.6.4.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.6.3.dist-info → kailash-0.6.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,906 @@
|
|
1
|
+
"""Optimistic locking support for enterprise-grade concurrency control.
|
2
|
+
|
3
|
+
Provides version-based concurrency control, conflict detection, and automatic
|
4
|
+
retry mechanisms to prevent lost updates in concurrent environments.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
import logging
|
9
|
+
import time
|
10
|
+
from datetime import UTC, datetime
|
11
|
+
from enum import Enum
|
12
|
+
from typing import Any, Dict, List, Optional, Union
|
13
|
+
|
14
|
+
from kailash.nodes.base import NodeParameter, register_node
|
15
|
+
from kailash.nodes.base_async import AsyncNode
|
16
|
+
from kailash.sdk_exceptions import NodeExecutionError
|
17
|
+
|
18
|
+
logger = logging.getLogger(__name__)
|
19
|
+
|
20
|
+
|
21
|
+
class ConflictResolution(Enum):
|
22
|
+
"""Conflict resolution strategies."""
|
23
|
+
|
24
|
+
FAIL_FAST = "fail_fast" # Immediately fail on conflict
|
25
|
+
RETRY = "retry" # Retry operation with new version
|
26
|
+
MERGE = "merge" # Attempt to merge changes
|
27
|
+
LAST_WRITER_WINS = "last_writer_wins" # Override with new data
|
28
|
+
|
29
|
+
|
30
|
+
class LockStatus(Enum):
|
31
|
+
"""Lock operation status."""
|
32
|
+
|
33
|
+
SUCCESS = "success"
|
34
|
+
VERSION_CONFLICT = "version_conflict"
|
35
|
+
RECORD_NOT_FOUND = "record_not_found"
|
36
|
+
RETRY_EXHAUSTED = "retry_exhausted"
|
37
|
+
MERGE_CONFLICT = "merge_conflict"
|
38
|
+
|
39
|
+
|
40
|
+
@register_node()
|
41
|
+
class OptimisticLockingNode(AsyncNode):
|
42
|
+
"""Implements optimistic locking with version fields for concurrency control.
|
43
|
+
|
44
|
+
Provides:
|
45
|
+
- Version-based concurrency control
|
46
|
+
- Automatic conflict detection and resolution
|
47
|
+
- Configurable retry strategies
|
48
|
+
- Performance metrics for lock contention
|
49
|
+
- Integration with existing SQL nodes
|
50
|
+
|
51
|
+
Design Purpose:
|
52
|
+
- Prevent lost updates in concurrent environments
|
53
|
+
- Provide enterprise-grade data consistency
|
54
|
+
- Support multiple conflict resolution strategies
|
55
|
+
- Enable high-performance concurrent operations
|
56
|
+
|
57
|
+
Examples:
|
58
|
+
>>> # Read with version tracking
|
59
|
+
>>> lock_manager = OptimisticLockingNode()
|
60
|
+
>>> result = await lock_manager.execute(
|
61
|
+
... action="read_with_version",
|
62
|
+
... table_name="users",
|
63
|
+
... record_id=123,
|
64
|
+
... connection=db_connection
|
65
|
+
... )
|
66
|
+
|
67
|
+
>>> # Update with version check
|
68
|
+
>>> update_result = await lock_manager.execute(
|
69
|
+
... action="update_with_version",
|
70
|
+
... table_name="users",
|
71
|
+
... record_id=123,
|
72
|
+
... update_data={"name": "John Updated"},
|
73
|
+
... expected_version=result["version"],
|
74
|
+
... conflict_resolution="retry",
|
75
|
+
... connection=db_connection
|
76
|
+
... )
|
77
|
+
"""
|
78
|
+
|
79
|
+
def __init__(
|
80
|
+
self,
|
81
|
+
version_field: str = "version",
|
82
|
+
max_retries: int = 3,
|
83
|
+
retry_delay: float = 0.1,
|
84
|
+
retry_backoff_multiplier: float = 2.0,
|
85
|
+
default_conflict_resolution: ConflictResolution = ConflictResolution.RETRY,
|
86
|
+
**kwargs,
|
87
|
+
):
|
88
|
+
"""Initialize optimistic locking manager."""
|
89
|
+
super().__init__(**kwargs)
|
90
|
+
|
91
|
+
self.version_field = version_field
|
92
|
+
self.max_retries = max_retries
|
93
|
+
self.retry_delay = retry_delay
|
94
|
+
self.retry_backoff_multiplier = retry_backoff_multiplier
|
95
|
+
self.default_conflict_resolution = default_conflict_resolution
|
96
|
+
|
97
|
+
# Metrics tracking
|
98
|
+
self.lock_metrics = {
|
99
|
+
"total_operations": 0,
|
100
|
+
"successful_operations": 0,
|
101
|
+
"version_conflicts": 0,
|
102
|
+
"retries_performed": 0,
|
103
|
+
"merge_conflicts": 0,
|
104
|
+
"avg_retry_count": 0.0,
|
105
|
+
}
|
106
|
+
|
107
|
+
# Conflict history for analysis
|
108
|
+
self.conflict_history: List[Dict[str, Any]] = []
|
109
|
+
|
110
|
+
self.logger.info(f"Initialized OptimisticLockingNode: {self.id}")
|
111
|
+
|
112
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
113
|
+
"""Define the parameters this node accepts."""
|
114
|
+
return {
|
115
|
+
"action": NodeParameter(
|
116
|
+
name="action",
|
117
|
+
type=str,
|
118
|
+
required=True,
|
119
|
+
description="Action to perform (read_with_version, update_with_version, batch_update)",
|
120
|
+
),
|
121
|
+
"connection": NodeParameter(
|
122
|
+
name="connection",
|
123
|
+
type=Any,
|
124
|
+
required=True,
|
125
|
+
description="Database connection object",
|
126
|
+
),
|
127
|
+
"table_name": NodeParameter(
|
128
|
+
name="table_name",
|
129
|
+
type=str,
|
130
|
+
required=True,
|
131
|
+
description="Table name for the operation",
|
132
|
+
),
|
133
|
+
"record_id": NodeParameter(
|
134
|
+
name="record_id",
|
135
|
+
type=Any,
|
136
|
+
required=False,
|
137
|
+
description="Record identifier (for single record operations)",
|
138
|
+
),
|
139
|
+
"record_ids": NodeParameter(
|
140
|
+
name="record_ids",
|
141
|
+
type=list,
|
142
|
+
required=False,
|
143
|
+
description="Multiple record identifiers (for batch operations)",
|
144
|
+
),
|
145
|
+
"update_data": NodeParameter(
|
146
|
+
name="update_data",
|
147
|
+
type=dict,
|
148
|
+
required=False,
|
149
|
+
description="Data to update",
|
150
|
+
),
|
151
|
+
"batch_updates": NodeParameter(
|
152
|
+
name="batch_updates",
|
153
|
+
type=list,
|
154
|
+
required=False,
|
155
|
+
description="List of update operations for batch processing",
|
156
|
+
),
|
157
|
+
"expected_version": NodeParameter(
|
158
|
+
name="expected_version",
|
159
|
+
type=int,
|
160
|
+
required=False,
|
161
|
+
description="Expected version for conflict detection",
|
162
|
+
),
|
163
|
+
"conflict_resolution": NodeParameter(
|
164
|
+
name="conflict_resolution",
|
165
|
+
type=str,
|
166
|
+
required=False,
|
167
|
+
default="retry",
|
168
|
+
description="Conflict resolution strategy (fail_fast, retry, merge, last_writer_wins)",
|
169
|
+
),
|
170
|
+
"version_field": NodeParameter(
|
171
|
+
name="version_field",
|
172
|
+
type=str,
|
173
|
+
required=False,
|
174
|
+
default="version",
|
175
|
+
description="Name of the version field",
|
176
|
+
),
|
177
|
+
"id_field": NodeParameter(
|
178
|
+
name="id_field",
|
179
|
+
type=str,
|
180
|
+
required=False,
|
181
|
+
default="id",
|
182
|
+
description="Name of the ID field",
|
183
|
+
),
|
184
|
+
"merge_strategy": NodeParameter(
|
185
|
+
name="merge_strategy",
|
186
|
+
type=dict,
|
187
|
+
required=False,
|
188
|
+
description="Merge strategy configuration for conflict resolution",
|
189
|
+
),
|
190
|
+
"timeout": NodeParameter(
|
191
|
+
name="timeout",
|
192
|
+
type=int,
|
193
|
+
required=False,
|
194
|
+
default=30,
|
195
|
+
description="Operation timeout in seconds",
|
196
|
+
),
|
197
|
+
}
|
198
|
+
|
199
|
+
def get_output_schema(self) -> Dict[str, NodeParameter]:
|
200
|
+
"""Define the output schema for this node."""
|
201
|
+
return {
|
202
|
+
"success": NodeParameter(
|
203
|
+
name="success",
|
204
|
+
type=bool,
|
205
|
+
description="Whether the operation succeeded",
|
206
|
+
),
|
207
|
+
"status": NodeParameter(
|
208
|
+
name="status",
|
209
|
+
type=str,
|
210
|
+
description="Operation status (success, version_conflict, etc.)",
|
211
|
+
),
|
212
|
+
"record": NodeParameter(
|
213
|
+
name="record",
|
214
|
+
type=dict,
|
215
|
+
required=False,
|
216
|
+
description="Retrieved record with version information",
|
217
|
+
),
|
218
|
+
"records": NodeParameter(
|
219
|
+
name="records",
|
220
|
+
type=list,
|
221
|
+
required=False,
|
222
|
+
description="Multiple records (for batch operations)",
|
223
|
+
),
|
224
|
+
"version": NodeParameter(
|
225
|
+
name="version",
|
226
|
+
type=int,
|
227
|
+
required=False,
|
228
|
+
description="Current version of the record",
|
229
|
+
),
|
230
|
+
"new_version": NodeParameter(
|
231
|
+
name="new_version",
|
232
|
+
type=int,
|
233
|
+
required=False,
|
234
|
+
description="New version after update",
|
235
|
+
),
|
236
|
+
"updated": NodeParameter(
|
237
|
+
name="updated",
|
238
|
+
type=bool,
|
239
|
+
required=False,
|
240
|
+
description="Whether record was updated",
|
241
|
+
),
|
242
|
+
"retry_count": NodeParameter(
|
243
|
+
name="retry_count",
|
244
|
+
type=int,
|
245
|
+
required=False,
|
246
|
+
description="Number of retries performed",
|
247
|
+
),
|
248
|
+
"conflict_info": NodeParameter(
|
249
|
+
name="conflict_info",
|
250
|
+
type=dict,
|
251
|
+
required=False,
|
252
|
+
description="Information about version conflicts",
|
253
|
+
),
|
254
|
+
"execution_time": NodeParameter(
|
255
|
+
name="execution_time",
|
256
|
+
type=float,
|
257
|
+
description="Operation execution time",
|
258
|
+
),
|
259
|
+
"metrics": NodeParameter(
|
260
|
+
name="metrics",
|
261
|
+
type=dict,
|
262
|
+
required=False,
|
263
|
+
description="Lock contention metrics",
|
264
|
+
),
|
265
|
+
}
|
266
|
+
|
267
|
+
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
268
|
+
"""Execute optimistic locking operations."""
|
269
|
+
action = kwargs["action"]
|
270
|
+
start_time = time.time()
|
271
|
+
|
272
|
+
try:
|
273
|
+
self.lock_metrics["total_operations"] += 1
|
274
|
+
|
275
|
+
if action == "read_with_version":
|
276
|
+
result = await self._read_with_version(kwargs)
|
277
|
+
elif action == "update_with_version":
|
278
|
+
result = await self._update_with_version(kwargs)
|
279
|
+
elif action == "batch_update":
|
280
|
+
result = await self._batch_update_with_version(kwargs)
|
281
|
+
elif action == "get_metrics":
|
282
|
+
result = await self._get_lock_metrics()
|
283
|
+
elif action == "analyze_conflicts":
|
284
|
+
result = await self._analyze_conflict_patterns()
|
285
|
+
else:
|
286
|
+
raise ValueError(f"Unknown action: {action}")
|
287
|
+
|
288
|
+
execution_time = time.time() - start_time
|
289
|
+
|
290
|
+
if result.get("success", False):
|
291
|
+
self.lock_metrics["successful_operations"] += 1
|
292
|
+
|
293
|
+
return {"execution_time": execution_time, **result}
|
294
|
+
|
295
|
+
except Exception as e:
|
296
|
+
execution_time = time.time() - start_time
|
297
|
+
self.logger.error(f"Optimistic locking operation failed: {str(e)}")
|
298
|
+
return {
|
299
|
+
"success": False,
|
300
|
+
"status": "error",
|
301
|
+
"error": str(e),
|
302
|
+
"execution_time": execution_time,
|
303
|
+
}
|
304
|
+
|
305
|
+
async def _read_with_version(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
306
|
+
"""Read record with version information."""
|
307
|
+
connection = kwargs["connection"]
|
308
|
+
table_name = kwargs["table_name"]
|
309
|
+
record_id = kwargs["record_id"]
|
310
|
+
version_field = kwargs.get("version_field", self.version_field)
|
311
|
+
id_field = kwargs.get("id_field", "id")
|
312
|
+
|
313
|
+
try:
|
314
|
+
# Build query to fetch record with version
|
315
|
+
query = f"SELECT *, {version_field} FROM {table_name} WHERE {id_field} = ?"
|
316
|
+
|
317
|
+
# Execute query
|
318
|
+
if hasattr(connection, "execute"):
|
319
|
+
# Synchronous connection
|
320
|
+
cursor = connection.execute(query, [record_id])
|
321
|
+
record = cursor.fetchone()
|
322
|
+
else:
|
323
|
+
# Assume async connection
|
324
|
+
cursor = await connection.execute(query, [record_id])
|
325
|
+
record = await cursor.fetchone()
|
326
|
+
|
327
|
+
if record is None:
|
328
|
+
return {
|
329
|
+
"success": False,
|
330
|
+
"status": LockStatus.RECORD_NOT_FOUND.value,
|
331
|
+
"error": f"Record with {id_field}={record_id} not found",
|
332
|
+
}
|
333
|
+
|
334
|
+
# Convert record to dict if needed
|
335
|
+
if hasattr(record, "_asdict"):
|
336
|
+
record_dict = record._asdict()
|
337
|
+
elif hasattr(record, "keys"):
|
338
|
+
record_dict = dict(record)
|
339
|
+
else:
|
340
|
+
# Assume it's a tuple/list with column names
|
341
|
+
columns = [desc[0] for desc in cursor.description]
|
342
|
+
record_dict = dict(zip(columns, record))
|
343
|
+
|
344
|
+
current_version = record_dict.get(version_field, 0)
|
345
|
+
|
346
|
+
return {
|
347
|
+
"success": True,
|
348
|
+
"status": LockStatus.SUCCESS.value,
|
349
|
+
"record": record_dict,
|
350
|
+
"version": current_version,
|
351
|
+
}
|
352
|
+
|
353
|
+
except Exception as e:
|
354
|
+
self.logger.error(f"Failed to read record with version: {e}")
|
355
|
+
return {
|
356
|
+
"success": False,
|
357
|
+
"status": "error",
|
358
|
+
"error": str(e),
|
359
|
+
}
|
360
|
+
|
361
|
+
async def _update_with_version(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
362
|
+
"""Update record with version check and conflict resolution."""
|
363
|
+
connection = kwargs["connection"]
|
364
|
+
table_name = kwargs["table_name"]
|
365
|
+
record_id = kwargs["record_id"]
|
366
|
+
update_data = kwargs["update_data"]
|
367
|
+
expected_version = kwargs["expected_version"]
|
368
|
+
conflict_resolution = ConflictResolution(
|
369
|
+
kwargs.get("conflict_resolution", self.default_conflict_resolution.value)
|
370
|
+
)
|
371
|
+
version_field = kwargs.get("version_field", self.version_field)
|
372
|
+
id_field = kwargs.get("id_field", "id")
|
373
|
+
|
374
|
+
retry_count = 0
|
375
|
+
current_delay = self.retry_delay
|
376
|
+
|
377
|
+
while retry_count <= self.max_retries:
|
378
|
+
try:
|
379
|
+
result = await self._attempt_versioned_update(
|
380
|
+
connection,
|
381
|
+
table_name,
|
382
|
+
record_id,
|
383
|
+
update_data,
|
384
|
+
expected_version,
|
385
|
+
version_field,
|
386
|
+
id_field,
|
387
|
+
)
|
388
|
+
|
389
|
+
if result["success"]:
|
390
|
+
return {
|
391
|
+
**result,
|
392
|
+
"retry_count": retry_count,
|
393
|
+
}
|
394
|
+
|
395
|
+
# Handle version conflict
|
396
|
+
if result["status"] == LockStatus.VERSION_CONFLICT.value:
|
397
|
+
self.lock_metrics["version_conflicts"] += 1
|
398
|
+
|
399
|
+
# Record conflict for analysis
|
400
|
+
conflict_info = {
|
401
|
+
"timestamp": datetime.now(UTC),
|
402
|
+
"table_name": table_name,
|
403
|
+
"record_id": record_id,
|
404
|
+
"expected_version": expected_version,
|
405
|
+
"current_version": result.get("current_version"),
|
406
|
+
"retry_count": retry_count,
|
407
|
+
"resolution_strategy": conflict_resolution.value,
|
408
|
+
}
|
409
|
+
self.conflict_history.append(conflict_info)
|
410
|
+
|
411
|
+
# Apply conflict resolution strategy
|
412
|
+
if conflict_resolution == ConflictResolution.FAIL_FAST:
|
413
|
+
return {
|
414
|
+
**result,
|
415
|
+
"retry_count": retry_count,
|
416
|
+
"conflict_info": conflict_info,
|
417
|
+
}
|
418
|
+
|
419
|
+
elif conflict_resolution == ConflictResolution.RETRY:
|
420
|
+
if retry_count >= self.max_retries:
|
421
|
+
return {
|
422
|
+
"success": False,
|
423
|
+
"status": LockStatus.RETRY_EXHAUSTED.value,
|
424
|
+
"retry_count": retry_count,
|
425
|
+
"conflict_info": conflict_info,
|
426
|
+
"error": f"Maximum retries ({self.max_retries}) exceeded",
|
427
|
+
}
|
428
|
+
|
429
|
+
# Get current version for retry
|
430
|
+
read_result = await self._read_with_version(
|
431
|
+
{
|
432
|
+
"connection": connection,
|
433
|
+
"table_name": table_name,
|
434
|
+
"record_id": record_id,
|
435
|
+
"version_field": version_field,
|
436
|
+
"id_field": id_field,
|
437
|
+
}
|
438
|
+
)
|
439
|
+
|
440
|
+
if not read_result["success"]:
|
441
|
+
return read_result
|
442
|
+
|
443
|
+
expected_version = read_result["version"]
|
444
|
+
retry_count += 1
|
445
|
+
self.lock_metrics["retries_performed"] += 1
|
446
|
+
|
447
|
+
# Exponential backoff
|
448
|
+
await asyncio.sleep(current_delay)
|
449
|
+
current_delay *= self.retry_backoff_multiplier
|
450
|
+
|
451
|
+
continue
|
452
|
+
|
453
|
+
elif conflict_resolution == ConflictResolution.MERGE:
|
454
|
+
merge_result = await self._attempt_merge_update(
|
455
|
+
connection,
|
456
|
+
table_name,
|
457
|
+
record_id,
|
458
|
+
update_data,
|
459
|
+
expected_version,
|
460
|
+
kwargs.get("merge_strategy", {}),
|
461
|
+
version_field,
|
462
|
+
id_field,
|
463
|
+
)
|
464
|
+
return {
|
465
|
+
**merge_result,
|
466
|
+
"retry_count": retry_count,
|
467
|
+
"conflict_info": conflict_info,
|
468
|
+
}
|
469
|
+
|
470
|
+
elif conflict_resolution == ConflictResolution.LAST_WRITER_WINS:
|
471
|
+
# Force update regardless of version
|
472
|
+
return await self._force_update(
|
473
|
+
connection,
|
474
|
+
table_name,
|
475
|
+
record_id,
|
476
|
+
update_data,
|
477
|
+
version_field,
|
478
|
+
id_field,
|
479
|
+
retry_count,
|
480
|
+
conflict_info,
|
481
|
+
)
|
482
|
+
|
483
|
+
else:
|
484
|
+
# Other error (record not found, etc.)
|
485
|
+
return {
|
486
|
+
**result,
|
487
|
+
"retry_count": retry_count,
|
488
|
+
}
|
489
|
+
|
490
|
+
except Exception as e:
|
491
|
+
self.logger.error(f"Update attempt failed: {e}")
|
492
|
+
return {
|
493
|
+
"success": False,
|
494
|
+
"status": "error",
|
495
|
+
"error": str(e),
|
496
|
+
"retry_count": retry_count,
|
497
|
+
}
|
498
|
+
|
499
|
+
# Should not reach here, but fallback
|
500
|
+
return {
|
501
|
+
"success": False,
|
502
|
+
"status": LockStatus.RETRY_EXHAUSTED.value,
|
503
|
+
"retry_count": retry_count,
|
504
|
+
"error": "Unexpected retry exhaustion",
|
505
|
+
}
|
506
|
+
|
507
|
+
async def _attempt_versioned_update(
|
508
|
+
self,
|
509
|
+
connection: Any,
|
510
|
+
table_name: str,
|
511
|
+
record_id: Any,
|
512
|
+
update_data: Dict[str, Any],
|
513
|
+
expected_version: int,
|
514
|
+
version_field: str,
|
515
|
+
id_field: str,
|
516
|
+
) -> Dict[str, Any]:
|
517
|
+
"""Attempt to update record with version check."""
|
518
|
+
try:
|
519
|
+
# Build update query with version check and increment
|
520
|
+
set_clause = ", ".join([f"{k} = ?" for k in update_data.keys()])
|
521
|
+
update_query = f"""
|
522
|
+
UPDATE {table_name}
|
523
|
+
SET {set_clause}, {version_field} = {version_field} + 1
|
524
|
+
WHERE {id_field} = ? AND {version_field} = ?
|
525
|
+
"""
|
526
|
+
|
527
|
+
params = list(update_data.values()) + [record_id, expected_version]
|
528
|
+
|
529
|
+
# Execute update
|
530
|
+
if hasattr(connection, "execute"):
|
531
|
+
# Synchronous connection
|
532
|
+
result = connection.execute(update_query, params)
|
533
|
+
rows_affected = result.rowcount
|
534
|
+
else:
|
535
|
+
# Assume async connection
|
536
|
+
result = await connection.execute(update_query, params)
|
537
|
+
rows_affected = result.rowcount
|
538
|
+
|
539
|
+
if rows_affected == 0:
|
540
|
+
# Check if record exists or version mismatch
|
541
|
+
check_query = (
|
542
|
+
f"SELECT {version_field} FROM {table_name} WHERE {id_field} = ?"
|
543
|
+
)
|
544
|
+
|
545
|
+
if hasattr(connection, "execute"):
|
546
|
+
check_result = connection.execute(check_query, [record_id])
|
547
|
+
current_record = check_result.fetchone()
|
548
|
+
else:
|
549
|
+
check_result = await connection.execute(check_query, [record_id])
|
550
|
+
current_record = await check_result.fetchone()
|
551
|
+
|
552
|
+
if current_record is None:
|
553
|
+
return {
|
554
|
+
"success": False,
|
555
|
+
"status": LockStatus.RECORD_NOT_FOUND.value,
|
556
|
+
"error": f"Record with {id_field}={record_id} not found",
|
557
|
+
}
|
558
|
+
else:
|
559
|
+
current_version = current_record[0]
|
560
|
+
return {
|
561
|
+
"success": False,
|
562
|
+
"status": LockStatus.VERSION_CONFLICT.value,
|
563
|
+
"error": "Version mismatch - record was modified by another transaction",
|
564
|
+
"expected_version": expected_version,
|
565
|
+
"current_version": current_version,
|
566
|
+
}
|
567
|
+
|
568
|
+
return {
|
569
|
+
"success": True,
|
570
|
+
"status": LockStatus.SUCCESS.value,
|
571
|
+
"updated": True,
|
572
|
+
"new_version": expected_version + 1,
|
573
|
+
"rows_affected": rows_affected,
|
574
|
+
}
|
575
|
+
|
576
|
+
except Exception as e:
|
577
|
+
return {
|
578
|
+
"success": False,
|
579
|
+
"status": "error",
|
580
|
+
"error": f"Update failed: {e}",
|
581
|
+
}
|
582
|
+
|
583
|
+
async def _attempt_merge_update(
|
584
|
+
self,
|
585
|
+
connection: Any,
|
586
|
+
table_name: str,
|
587
|
+
record_id: Any,
|
588
|
+
update_data: Dict[str, Any],
|
589
|
+
expected_version: int,
|
590
|
+
merge_strategy: Dict[str, Any],
|
591
|
+
version_field: str,
|
592
|
+
id_field: str,
|
593
|
+
) -> Dict[str, Any]:
|
594
|
+
"""Attempt to merge conflicting updates."""
|
595
|
+
try:
|
596
|
+
# Read current record
|
597
|
+
read_result = await self._read_with_version(
|
598
|
+
{
|
599
|
+
"connection": connection,
|
600
|
+
"table_name": table_name,
|
601
|
+
"record_id": record_id,
|
602
|
+
"version_field": version_field,
|
603
|
+
"id_field": id_field,
|
604
|
+
}
|
605
|
+
)
|
606
|
+
|
607
|
+
if not read_result["success"]:
|
608
|
+
return read_result
|
609
|
+
|
610
|
+
current_record = read_result["record"]
|
611
|
+
current_version = read_result["version"]
|
612
|
+
|
613
|
+
# Apply merge strategy
|
614
|
+
merged_data = self._merge_record_data(
|
615
|
+
current_record, update_data, merge_strategy
|
616
|
+
)
|
617
|
+
|
618
|
+
# Attempt update with current version
|
619
|
+
return await self._attempt_versioned_update(
|
620
|
+
connection,
|
621
|
+
table_name,
|
622
|
+
record_id,
|
623
|
+
merged_data,
|
624
|
+
current_version,
|
625
|
+
version_field,
|
626
|
+
id_field,
|
627
|
+
)
|
628
|
+
|
629
|
+
except Exception as e:
|
630
|
+
self.lock_metrics["merge_conflicts"] += 1
|
631
|
+
return {
|
632
|
+
"success": False,
|
633
|
+
"status": LockStatus.MERGE_CONFLICT.value,
|
634
|
+
"error": f"Merge failed: {e}",
|
635
|
+
}
|
636
|
+
|
637
|
+
def _merge_record_data(
|
638
|
+
self,
|
639
|
+
current_record: Dict[str, Any],
|
640
|
+
update_data: Dict[str, Any],
|
641
|
+
merge_strategy: Dict[str, Any],
|
642
|
+
) -> Dict[str, Any]:
|
643
|
+
"""Merge record data using specified strategy."""
|
644
|
+
merged_data = {}
|
645
|
+
|
646
|
+
# Default merge strategy: last writer wins for each field
|
647
|
+
default_strategy = merge_strategy.get("default", "last_writer_wins")
|
648
|
+
field_strategies = merge_strategy.get("fields", {})
|
649
|
+
|
650
|
+
for field, new_value in update_data.items():
|
651
|
+
strategy = field_strategies.get(field, default_strategy)
|
652
|
+
current_value = current_record.get(field)
|
653
|
+
|
654
|
+
if strategy == "last_writer_wins":
|
655
|
+
merged_data[field] = new_value
|
656
|
+
elif strategy == "keep_current":
|
657
|
+
merged_data[field] = current_value
|
658
|
+
elif strategy == "numeric_add":
|
659
|
+
if isinstance(current_value, (int, float)) and isinstance(
|
660
|
+
new_value, (int, float)
|
661
|
+
):
|
662
|
+
merged_data[field] = current_value + new_value
|
663
|
+
else:
|
664
|
+
merged_data[field] = new_value
|
665
|
+
elif strategy == "list_append":
|
666
|
+
if isinstance(current_value, list) and isinstance(new_value, list):
|
667
|
+
merged_data[field] = current_value + new_value
|
668
|
+
else:
|
669
|
+
merged_data[field] = new_value
|
670
|
+
else:
|
671
|
+
# Default to last writer wins
|
672
|
+
merged_data[field] = new_value
|
673
|
+
|
674
|
+
return merged_data
|
675
|
+
|
676
|
+
async def _force_update(
|
677
|
+
self,
|
678
|
+
connection: Any,
|
679
|
+
table_name: str,
|
680
|
+
record_id: Any,
|
681
|
+
update_data: Dict[str, Any],
|
682
|
+
version_field: str,
|
683
|
+
id_field: str,
|
684
|
+
retry_count: int,
|
685
|
+
conflict_info: Dict[str, Any],
|
686
|
+
) -> Dict[str, Any]:
|
687
|
+
"""Force update without version check (last writer wins)."""
|
688
|
+
try:
|
689
|
+
# Build update query without version check
|
690
|
+
set_clause = ", ".join([f"{k} = ?" for k in update_data.keys()])
|
691
|
+
update_query = f"""
|
692
|
+
UPDATE {table_name}
|
693
|
+
SET {set_clause}, {version_field} = {version_field} + 1
|
694
|
+
WHERE {id_field} = ?
|
695
|
+
"""
|
696
|
+
|
697
|
+
params = list(update_data.values()) + [record_id]
|
698
|
+
|
699
|
+
# Execute update
|
700
|
+
if hasattr(connection, "execute"):
|
701
|
+
result = connection.execute(update_query, params)
|
702
|
+
rows_affected = result.rowcount
|
703
|
+
else:
|
704
|
+
result = await connection.execute(update_query, params)
|
705
|
+
rows_affected = result.rowcount
|
706
|
+
|
707
|
+
if rows_affected == 0:
|
708
|
+
return {
|
709
|
+
"success": False,
|
710
|
+
"status": LockStatus.RECORD_NOT_FOUND.value,
|
711
|
+
"error": f"Record with {id_field}={record_id} not found",
|
712
|
+
"retry_count": retry_count,
|
713
|
+
"conflict_info": conflict_info,
|
714
|
+
}
|
715
|
+
|
716
|
+
# Get new version
|
717
|
+
version_query = (
|
718
|
+
f"SELECT {version_field} FROM {table_name} WHERE {id_field} = ?"
|
719
|
+
)
|
720
|
+
if hasattr(connection, "execute"):
|
721
|
+
version_result = connection.execute(version_query, [record_id])
|
722
|
+
new_version = version_result.fetchone()[0]
|
723
|
+
else:
|
724
|
+
version_result = await connection.execute(version_query, [record_id])
|
725
|
+
new_version_row = await version_result.fetchone()
|
726
|
+
new_version = new_version_row[0]
|
727
|
+
|
728
|
+
return {
|
729
|
+
"success": True,
|
730
|
+
"status": LockStatus.SUCCESS.value,
|
731
|
+
"updated": True,
|
732
|
+
"new_version": new_version,
|
733
|
+
"rows_affected": rows_affected,
|
734
|
+
"retry_count": retry_count,
|
735
|
+
"conflict_info": conflict_info,
|
736
|
+
"forced_update": True,
|
737
|
+
}
|
738
|
+
|
739
|
+
except Exception as e:
|
740
|
+
return {
|
741
|
+
"success": False,
|
742
|
+
"status": "error",
|
743
|
+
"error": f"Force update failed: {e}",
|
744
|
+
"retry_count": retry_count,
|
745
|
+
"conflict_info": conflict_info,
|
746
|
+
}
|
747
|
+
|
748
|
+
async def _batch_update_with_version(
|
749
|
+
self, kwargs: Dict[str, Any]
|
750
|
+
) -> Dict[str, Any]:
|
751
|
+
"""Perform batch updates with version checking."""
|
752
|
+
connection = kwargs["connection"]
|
753
|
+
table_name = kwargs["table_name"]
|
754
|
+
batch_updates = kwargs["batch_updates"]
|
755
|
+
conflict_resolution = ConflictResolution(
|
756
|
+
kwargs.get("conflict_resolution", self.default_conflict_resolution.value)
|
757
|
+
)
|
758
|
+
version_field = kwargs.get("version_field", self.version_field)
|
759
|
+
id_field = kwargs.get("id_field", "id")
|
760
|
+
|
761
|
+
results = []
|
762
|
+
total_updated = 0
|
763
|
+
total_conflicts = 0
|
764
|
+
|
765
|
+
for update_item in batch_updates:
|
766
|
+
record_id = update_item["record_id"]
|
767
|
+
update_data = update_item["update_data"]
|
768
|
+
expected_version = update_item["expected_version"]
|
769
|
+
|
770
|
+
# Perform individual update
|
771
|
+
update_kwargs = {
|
772
|
+
"connection": connection,
|
773
|
+
"table_name": table_name,
|
774
|
+
"record_id": record_id,
|
775
|
+
"update_data": update_data,
|
776
|
+
"expected_version": expected_version,
|
777
|
+
"conflict_resolution": conflict_resolution.value,
|
778
|
+
"version_field": version_field,
|
779
|
+
"id_field": id_field,
|
780
|
+
}
|
781
|
+
|
782
|
+
result = await self._update_with_version(update_kwargs)
|
783
|
+
results.append({"record_id": record_id, **result})
|
784
|
+
|
785
|
+
if result.get("success"):
|
786
|
+
total_updated += 1
|
787
|
+
elif result.get("status") == LockStatus.VERSION_CONFLICT.value:
|
788
|
+
total_conflicts += 1
|
789
|
+
|
790
|
+
return {
|
791
|
+
"success": True,
|
792
|
+
"status": "batch_completed",
|
793
|
+
"results": results,
|
794
|
+
"total_operations": len(batch_updates),
|
795
|
+
"total_updated": total_updated,
|
796
|
+
"total_conflicts": total_conflicts,
|
797
|
+
"success_rate": total_updated / len(batch_updates) if batch_updates else 0,
|
798
|
+
}
|
799
|
+
|
800
|
+
async def _get_lock_metrics(self) -> Dict[str, Any]:
|
801
|
+
"""Get current lock contention metrics."""
|
802
|
+
total_ops = self.lock_metrics["total_operations"]
|
803
|
+
|
804
|
+
if total_ops > 0:
|
805
|
+
self.lock_metrics["avg_retry_count"] = (
|
806
|
+
self.lock_metrics["retries_performed"] / total_ops
|
807
|
+
)
|
808
|
+
|
809
|
+
return {
|
810
|
+
"success": True,
|
811
|
+
"metrics": dict(self.lock_metrics),
|
812
|
+
"conflict_rate": (
|
813
|
+
self.lock_metrics["version_conflicts"] / total_ops
|
814
|
+
if total_ops > 0
|
815
|
+
else 0
|
816
|
+
),
|
817
|
+
"success_rate": (
|
818
|
+
self.lock_metrics["successful_operations"] / total_ops
|
819
|
+
if total_ops > 0
|
820
|
+
else 0
|
821
|
+
),
|
822
|
+
}
|
823
|
+
|
824
|
+
async def _analyze_conflict_patterns(self) -> Dict[str, Any]:
|
825
|
+
"""Analyze conflict patterns for optimization insights."""
|
826
|
+
if not self.conflict_history:
|
827
|
+
return {
|
828
|
+
"success": True,
|
829
|
+
"analysis": "No conflicts recorded yet",
|
830
|
+
}
|
831
|
+
|
832
|
+
# Analyze conflict patterns
|
833
|
+
table_conflicts = {}
|
834
|
+
retry_patterns = {}
|
835
|
+
|
836
|
+
for conflict in self.conflict_history:
|
837
|
+
table = conflict["table_name"]
|
838
|
+
retry_count = conflict["retry_count"]
|
839
|
+
|
840
|
+
table_conflicts[table] = table_conflicts.get(table, 0) + 1
|
841
|
+
retry_patterns[retry_count] = retry_patterns.get(retry_count, 0) + 1
|
842
|
+
|
843
|
+
# Find hotspot tables
|
844
|
+
hotspot_tables = sorted(
|
845
|
+
table_conflicts.items(), key=lambda x: x[1], reverse=True
|
846
|
+
)[:5]
|
847
|
+
|
848
|
+
return {
|
849
|
+
"success": True,
|
850
|
+
"analysis": {
|
851
|
+
"total_conflicts": len(self.conflict_history),
|
852
|
+
"hotspot_tables": hotspot_tables,
|
853
|
+
"retry_distribution": retry_patterns,
|
854
|
+
"avg_retries": sum(
|
855
|
+
conflict["retry_count"] for conflict in self.conflict_history
|
856
|
+
)
|
857
|
+
/ len(self.conflict_history),
|
858
|
+
"recommendations": self._generate_optimization_recommendations(
|
859
|
+
table_conflicts, retry_patterns
|
860
|
+
),
|
861
|
+
},
|
862
|
+
}
|
863
|
+
|
864
|
+
def _generate_optimization_recommendations(
|
865
|
+
self, table_conflicts: Dict[str, int], retry_patterns: Dict[int, int]
|
866
|
+
) -> List[str]:
|
867
|
+
"""Generate optimization recommendations based on conflict patterns."""
|
868
|
+
recommendations = []
|
869
|
+
|
870
|
+
# High conflict tables
|
871
|
+
high_conflict_tables = [
|
872
|
+
table
|
873
|
+
for table, conflicts in table_conflicts.items()
|
874
|
+
if conflicts > self.lock_metrics["total_operations"] * 0.1
|
875
|
+
]
|
876
|
+
|
877
|
+
if high_conflict_tables:
|
878
|
+
recommendations.append(
|
879
|
+
f"Consider partitioning or optimizing queries for high-conflict tables: {high_conflict_tables}"
|
880
|
+
)
|
881
|
+
|
882
|
+
# High retry rates
|
883
|
+
total_retries = sum(retry_patterns.values())
|
884
|
+
high_retry_rate = (
|
885
|
+
sum(
|
886
|
+
count
|
887
|
+
for retry_count, count in retry_patterns.items()
|
888
|
+
if retry_count >= self.max_retries
|
889
|
+
)
|
890
|
+
/ total_retries
|
891
|
+
if total_retries > 0
|
892
|
+
else 0
|
893
|
+
)
|
894
|
+
|
895
|
+
if high_retry_rate > 0.2:
|
896
|
+
recommendations.append(
|
897
|
+
"High retry exhaustion rate detected. Consider increasing max_retries or using different conflict resolution strategy."
|
898
|
+
)
|
899
|
+
|
900
|
+
# Merge opportunities
|
901
|
+
if self.lock_metrics["merge_conflicts"] > 0:
|
902
|
+
recommendations.append(
|
903
|
+
"Merge conflicts detected. Review merge strategies for better conflict resolution."
|
904
|
+
)
|
905
|
+
|
906
|
+
return recommendations
|