attune-ai 2.1.5__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. attune/cli/__init__.py +3 -59
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +7 -15
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +9 -3
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/dashboard/app.py +3 -1
  20. attune/dashboard/simple_server.py +3 -1
  21. attune/dashboard/standalone_server.py +7 -3
  22. attune/mcp/server.py +54 -102
  23. attune/memory/long_term.py +0 -2
  24. attune/memory/short_term/__init__.py +84 -0
  25. attune/memory/short_term/base.py +467 -0
  26. attune/memory/short_term/batch.py +219 -0
  27. attune/memory/short_term/caching.py +227 -0
  28. attune/memory/short_term/conflicts.py +265 -0
  29. attune/memory/short_term/cross_session.py +122 -0
  30. attune/memory/short_term/facade.py +655 -0
  31. attune/memory/short_term/pagination.py +215 -0
  32. attune/memory/short_term/patterns.py +271 -0
  33. attune/memory/short_term/pubsub.py +286 -0
  34. attune/memory/short_term/queues.py +244 -0
  35. attune/memory/short_term/security.py +300 -0
  36. attune/memory/short_term/sessions.py +250 -0
  37. attune/memory/short_term/streams.py +249 -0
  38. attune/memory/short_term/timelines.py +234 -0
  39. attune/memory/short_term/transactions.py +186 -0
  40. attune/memory/short_term/working.py +252 -0
  41. attune/meta_workflows/cli_commands/__init__.py +3 -0
  42. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  43. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  44. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  45. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  48. attune/models/adaptive_routing.py +4 -8
  49. attune/models/auth_cli.py +3 -9
  50. attune/models/auth_strategy.py +2 -4
  51. attune/models/telemetry/analytics.py +0 -2
  52. attune/models/telemetry/backend.py +0 -3
  53. attune/models/telemetry/storage.py +0 -2
  54. attune/orchestration/_strategies/__init__.py +156 -0
  55. attune/orchestration/_strategies/base.py +231 -0
  56. attune/orchestration/_strategies/conditional_strategies.py +373 -0
  57. attune/orchestration/_strategies/conditions.py +369 -0
  58. attune/orchestration/_strategies/core_strategies.py +491 -0
  59. attune/orchestration/_strategies/data_classes.py +64 -0
  60. attune/orchestration/_strategies/nesting.py +233 -0
  61. attune/orchestration/execution_strategies.py +58 -1567
  62. attune/orchestration/meta_orchestrator.py +1 -3
  63. attune/project_index/scanner.py +1 -3
  64. attune/project_index/scanner_parallel.py +7 -5
  65. attune/socratic_router.py +1 -3
  66. attune/telemetry/agent_coordination.py +9 -3
  67. attune/telemetry/agent_tracking.py +16 -3
  68. attune/telemetry/approval_gates.py +22 -5
  69. attune/telemetry/cli.py +1 -3
  70. attune/telemetry/commands/dashboard_commands.py +24 -8
  71. attune/telemetry/event_streaming.py +8 -2
  72. attune/telemetry/feedback_loop.py +10 -2
  73. attune/tools.py +1 -0
  74. attune/workflow_commands.py +1 -3
  75. attune/workflows/__init__.py +53 -10
  76. attune/workflows/autonomous_test_gen.py +158 -102
  77. attune/workflows/base.py +48 -672
  78. attune/workflows/batch_processing.py +1 -3
  79. attune/workflows/compat.py +156 -0
  80. attune/workflows/cost_mixin.py +141 -0
  81. attune/workflows/data_classes.py +92 -0
  82. attune/workflows/document_gen/workflow.py +11 -14
  83. attune/workflows/history.py +62 -37
  84. attune/workflows/llm_base.py +1 -3
  85. attune/workflows/migration.py +422 -0
  86. attune/workflows/output.py +2 -7
  87. attune/workflows/parsing_mixin.py +427 -0
  88. attune/workflows/perf_audit.py +3 -1
  89. attune/workflows/progress.py +9 -11
  90. attune/workflows/release_prep.py +5 -1
  91. attune/workflows/routing.py +0 -2
  92. attune/workflows/secure_release.py +2 -1
  93. attune/workflows/security_audit.py +19 -14
  94. attune/workflows/security_audit_phase3.py +28 -22
  95. attune/workflows/seo_optimization.py +27 -27
  96. attune/workflows/test_gen/test_templates.py +1 -4
  97. attune/workflows/test_gen/workflow.py +0 -2
  98. attune/workflows/test_gen_behavioral.py +6 -19
  99. attune/workflows/test_gen_parallel.py +6 -4
  100. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
  101. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/RECORD +116 -91
  102. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
  103. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  104. attune_llm/agent_factory/__init__.py +6 -6
  105. attune_llm/commands/__init__.py +10 -10
  106. attune_llm/commands/models.py +3 -3
  107. attune_llm/config/__init__.py +8 -8
  108. attune_llm/learning/__init__.py +3 -3
  109. attune_llm/learning/extractor.py +5 -3
  110. attune_llm/learning/storage.py +5 -3
  111. attune_llm/security/__init__.py +17 -17
  112. attune_llm/utils/tokens.py +3 -1
  113. attune/cli_legacy.py +0 -3978
  114. attune/memory/short_term.py +0 -2192
  115. attune/workflows/manage_docs.py +0 -87
  116. attune/workflows/test5.py +0 -125
  117. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
  118. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
  119. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  120. {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,249 @@
1
+ """Redis Streams for ordered event logs.
2
+
3
+ This module provides stream operations for event sourcing:
4
+ - Append: Add events to stream with auto-generated IDs
5
+ - Read: Get events from specific position
6
+ - Read new: Block and wait for new events
7
+
8
+ Key Prefix: PREFIX_STREAM = "stream:"
9
+
10
+ Use Cases:
11
+ - Event sourcing
12
+ - Activity logs
13
+ - Message queues with persistence
14
+
15
+ Classes:
16
+ StreamManager: Redis Streams operations
17
+
18
+ Example:
19
+ >>> from attune.memory.short_term.streams import StreamManager
20
+ >>> from attune.memory.types import AgentCredentials, AccessTier
21
+ >>> streams = StreamManager(base_ops)
22
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
23
+ >>> entry_id = streams.append("audit", {"action": "promoted"}, creds)
24
+ >>> entries = streams.read("audit", creds, count=50)
25
+
26
+ Copyright 2025 Smart-AI-Memory
27
+ Licensed under Fair Source License 0.9
28
+ """
29
+
30
+ from __future__ import annotations
31
+
32
+ import json
33
+ import time
34
+ from datetime import datetime
35
+ from typing import TYPE_CHECKING
36
+
37
+ import structlog
38
+
39
+ from attune.memory.types import (
40
+ AgentCredentials,
41
+ )
42
+
43
+ if TYPE_CHECKING:
44
+ from attune.memory.short_term.base import BaseOperations
45
+
46
+ logger = structlog.get_logger(__name__)
47
+
48
+
49
+ class StreamManager:
50
+ """Redis Streams operations for audit trails and event logs.
51
+
52
+ Provides ordered, persistent event logging using Redis Streams.
53
+ Features include automatic ID generation, max length trimming,
54
+ and blocking reads for real-time event processing.
55
+
56
+ The class manages its own mock stream storage for testing,
57
+ composed with BaseOperations for Redis client access.
58
+
59
+ Attributes:
60
+ PREFIX_STREAM: Key prefix for stream names
61
+
62
+ Example:
63
+ >>> streams = StreamManager(base_ops)
64
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
65
+ >>> entry_id = streams.append("audit", {"action": "pattern_promoted"}, creds)
66
+ >>> entries = streams.read("audit", creds, count=100)
67
+ >>> for eid, data in entries:
68
+ ... print(f"{eid}: {data}")
69
+ """
70
+
71
+ PREFIX_STREAM = "stream:"
72
+
73
+ def __init__(self, base: BaseOperations) -> None:
74
+ """Initialize stream manager.
75
+
76
+ Args:
77
+ base: BaseOperations instance for Redis client access
78
+ """
79
+ self._base = base
80
+ self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
81
+
82
+ def append(
83
+ self,
84
+ stream_name: str,
85
+ data: dict,
86
+ credentials: AgentCredentials,
87
+ max_len: int = 10000,
88
+ ) -> str | None:
89
+ """Append an entry to a Redis Stream for audit trails.
90
+
91
+ Streams provide:
92
+ - Ordered, persistent event log
93
+ - Consumer groups for distributed processing
94
+ - Time-based retention
95
+
96
+ Args:
97
+ stream_name: Name of the stream
98
+ data: Event data to append
99
+ credentials: Agent credentials (must be CONTRIBUTOR+)
100
+ max_len: Maximum stream length (older entries trimmed)
101
+
102
+ Returns:
103
+ Entry ID if successful, None otherwise
104
+
105
+ Raises:
106
+ PermissionError: If credentials lack write access
107
+
108
+ Example:
109
+ >>> entry_id = streams.append(
110
+ ... "audit",
111
+ ... {"action": "pattern_promoted", "pattern_id": "xyz"},
112
+ ... creds
113
+ ... )
114
+ '1704067200000-0'
115
+ """
116
+ if not credentials.can_stage():
117
+ raise PermissionError(
118
+ f"Agent {credentials.agent_id} cannot write to stream. "
119
+ "Requires CONTRIBUTOR tier or higher.",
120
+ )
121
+
122
+ start_time = time.perf_counter()
123
+ full_stream = f"{self.PREFIX_STREAM}{stream_name}"
124
+
125
+ entry = {
126
+ "agent_id": credentials.agent_id,
127
+ "timestamp": datetime.now().isoformat(),
128
+ **{
129
+ str(k): json.dumps(v) if isinstance(v, dict | list) else str(v)
130
+ for k, v in data.items()
131
+ },
132
+ }
133
+
134
+ # Handle mock mode
135
+ if self._base.use_mock:
136
+ if full_stream not in self._mock_streams:
137
+ self._mock_streams[full_stream] = []
138
+ entry_id = f"{int(datetime.now().timestamp() * 1000)}-0"
139
+ self._mock_streams[full_stream].append((entry_id, entry))
140
+ # Trim to max_len
141
+ if len(self._mock_streams[full_stream]) > max_len:
142
+ self._mock_streams[full_stream] = self._mock_streams[full_stream][
143
+ -max_len:
144
+ ]
145
+ latency_ms = (time.perf_counter() - start_time) * 1000
146
+ self._base._metrics.record_operation("stream_append", latency_ms)
147
+ return entry_id
148
+
149
+ # Handle real Redis client
150
+ if self._base._client is None:
151
+ return None
152
+
153
+ entry_id = self._base._client.xadd(full_stream, entry, maxlen=max_len)
154
+ latency_ms = (time.perf_counter() - start_time) * 1000
155
+ self._base._metrics.record_operation("stream_append", latency_ms)
156
+
157
+ return str(entry_id) if entry_id else None
158
+
159
+ def read(
160
+ self,
161
+ stream_name: str,
162
+ credentials: AgentCredentials,
163
+ start_id: str = "0",
164
+ count: int = 100,
165
+ ) -> list[tuple[str, dict]]:
166
+ """Read entries from a Redis Stream.
167
+
168
+ Args:
169
+ stream_name: Name of the stream
170
+ credentials: Agent credentials
171
+ start_id: Start reading from this ID ("0" = beginning)
172
+ count: Maximum entries to read
173
+
174
+ Returns:
175
+ List of (entry_id, data) tuples
176
+
177
+ Example:
178
+ >>> entries = streams.read("audit", creds, count=50)
179
+ >>> for entry_id, data in entries:
180
+ ... print(f"{entry_id}: {data}")
181
+ """
182
+ full_stream = f"{self.PREFIX_STREAM}{stream_name}"
183
+
184
+ # Handle mock mode
185
+ if self._base.use_mock:
186
+ if full_stream not in self._mock_streams:
187
+ return []
188
+ entries = self._mock_streams[full_stream]
189
+ # Filter by start_id (simple comparison)
190
+ filtered = [(eid, data) for eid, data in entries if eid > start_id]
191
+ return filtered[:count]
192
+
193
+ # Handle real Redis client
194
+ if self._base._client is None:
195
+ return []
196
+
197
+ result = self._base._client.xrange(full_stream, min=start_id, count=count)
198
+ return [
199
+ (str(entry_id), {str(k): v for k, v in data.items()})
200
+ for entry_id, data in result
201
+ ]
202
+
203
+ def read_new(
204
+ self,
205
+ stream_name: str,
206
+ credentials: AgentCredentials,
207
+ block_ms: int = 0,
208
+ count: int = 100,
209
+ ) -> list[tuple[str, dict]]:
210
+ """Read only new entries from a stream (blocking read).
211
+
212
+ Blocks and waits for new entries to arrive. Useful for
213
+ real-time event processing.
214
+
215
+ Args:
216
+ stream_name: Name of the stream
217
+ credentials: Agent credentials
218
+ block_ms: Milliseconds to block waiting (0 = no block)
219
+ count: Maximum entries to read
220
+
221
+ Returns:
222
+ List of (entry_id, data) tuples
223
+
224
+ Example:
225
+ >>> # Wait up to 5 seconds for new entries
226
+ >>> entries = streams.read_new("audit", creds, block_ms=5000)
227
+ """
228
+ full_stream = f"{self.PREFIX_STREAM}{stream_name}"
229
+
230
+ # Handle mock mode - doesn't support blocking reads
231
+ if self._base.use_mock:
232
+ return []
233
+
234
+ # Handle real Redis client
235
+ if self._base._client is None:
236
+ return []
237
+
238
+ result = self._base._client.xread(
239
+ {full_stream: "$"}, block=block_ms, count=count
240
+ )
241
+ if not result:
242
+ return []
243
+
244
+ # Result format: [(stream_name, [(entry_id, data), ...])]
245
+ entries = []
246
+ for _stream, stream_entries in result:
247
+ for entry_id, data in stream_entries:
248
+ entries.append((str(entry_id), {str(k): v for k, v in data.items()}))
249
+ return entries
@@ -0,0 +1,234 @@
1
+ """Time-window queries using Redis sorted sets.
2
+
3
+ This module provides timeline operations for time-series data:
4
+ - Add: Insert event with timestamp score
5
+ - Query: Get events in time window
6
+ - Count: Count events in time window
7
+
8
+ Key Prefix: PREFIX_TIMELINE = "timeline:"
9
+
10
+ Use Cases:
11
+ - Activity timelines
12
+ - Time-based analytics
13
+ - Rate limiting windows
14
+
15
+ Classes:
16
+ TimelineManager: Redis sorted set operations for timelines
17
+
18
+ Example:
19
+ >>> from attune.memory.short_term.timelines import TimelineManager
20
+ >>> from attune.memory.types import AgentCredentials, AccessTier, TimeWindowQuery
21
+ >>> timelines = TimelineManager(base_ops)
22
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
23
+ >>> timelines.add("events", "evt_1", {"action": "login"}, creds)
24
+ >>> events = timelines.query("events", creds, TimeWindowQuery(limit=50))
25
+
26
+ Copyright 2025 Smart-AI-Memory
27
+ Licensed under Fair Source License 0.9
28
+ """
29
+
30
+ from __future__ import annotations
31
+
32
+ import json
33
+ from datetime import datetime
34
+ from typing import TYPE_CHECKING
35
+
36
+ import structlog
37
+
38
+ from attune.memory.types import (
39
+ AgentCredentials,
40
+ TimeWindowQuery,
41
+ )
42
+
43
+ if TYPE_CHECKING:
44
+ from attune.memory.short_term.base import BaseOperations
45
+
46
+ logger = structlog.get_logger(__name__)
47
+
48
+
49
+ class TimelineManager:
50
+ """Redis sorted set operations for timeline queries.
51
+
52
+ Provides time-series event storage using Redis sorted sets,
53
+ where events are scored by timestamp for efficient time-window queries.
54
+
55
+ The class manages its own mock sorted set storage for testing,
56
+ composed with BaseOperations for Redis client access.
57
+
58
+ Attributes:
59
+ PREFIX_TIMELINE: Key prefix for timeline names
60
+
61
+ Example:
62
+ >>> timelines = TimelineManager(base_ops)
63
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
64
+ >>> timelines.add("agent_events", "evt_123", {"action": "login"}, creds)
65
+ >>> query = TimeWindowQuery(limit=100)
66
+ >>> events = timelines.query("agent_events", creds, query)
67
+ """
68
+
69
+ PREFIX_TIMELINE = "timeline:"
70
+
71
+ def __init__(self, base: BaseOperations) -> None:
72
+ """Initialize timeline manager.
73
+
74
+ Args:
75
+ base: BaseOperations instance for Redis client access
76
+ """
77
+ self._base = base
78
+ self._mock_sorted_sets: dict[str, list[tuple[float, str]]] = {}
79
+
80
+ def add(
81
+ self,
82
+ timeline_name: str,
83
+ event_id: str,
84
+ data: dict,
85
+ credentials: AgentCredentials,
86
+ timestamp: datetime | None = None,
87
+ ) -> bool:
88
+ """Add an event to a timeline (sorted set by timestamp).
89
+
90
+ Args:
91
+ timeline_name: Name of the timeline
92
+ event_id: Unique event identifier
93
+ data: Event data
94
+ credentials: Agent credentials (must be CONTRIBUTOR+)
95
+ timestamp: Event timestamp (defaults to now)
96
+
97
+ Returns:
98
+ True if added successfully
99
+
100
+ Raises:
101
+ PermissionError: If credentials lack write access
102
+
103
+ Example:
104
+ >>> timelines.add(
105
+ ... "audit_log",
106
+ ... "evt_001",
107
+ ... {"action": "pattern_promoted"},
108
+ ... creds
109
+ ... )
110
+ True
111
+ """
112
+ if not credentials.can_stage():
113
+ raise PermissionError(
114
+ f"Agent {credentials.agent_id} cannot write to timeline. "
115
+ "Requires CONTRIBUTOR tier or higher.",
116
+ )
117
+
118
+ full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
119
+ ts = timestamp or datetime.now()
120
+ score = ts.timestamp()
121
+
122
+ payload = json.dumps(
123
+ {
124
+ "event_id": event_id,
125
+ "timestamp": ts.isoformat(),
126
+ "agent_id": credentials.agent_id,
127
+ "data": data,
128
+ },
129
+ )
130
+
131
+ # Handle mock mode
132
+ if self._base.use_mock:
133
+ if full_timeline not in self._mock_sorted_sets:
134
+ self._mock_sorted_sets[full_timeline] = []
135
+ self._mock_sorted_sets[full_timeline].append((score, payload))
136
+ self._mock_sorted_sets[full_timeline].sort(key=lambda x: x[0])
137
+ return True
138
+
139
+ # Handle real Redis client
140
+ if self._base._client is None:
141
+ return False
142
+
143
+ self._base._client.zadd(full_timeline, {payload: score})
144
+ return True
145
+
146
+ def query(
147
+ self,
148
+ timeline_name: str,
149
+ credentials: AgentCredentials,
150
+ query: TimeWindowQuery | None = None,
151
+ ) -> list[dict]:
152
+ """Query events from a timeline within a time window.
153
+
154
+ Args:
155
+ timeline_name: Name of the timeline
156
+ credentials: Agent credentials
157
+ query: Time window query parameters
158
+
159
+ Returns:
160
+ List of events in the time window
161
+
162
+ Example:
163
+ >>> from datetime import datetime, timedelta
164
+ >>> query = TimeWindowQuery(
165
+ ... start_time=datetime.now() - timedelta(hours=1),
166
+ ... end_time=datetime.now(),
167
+ ... limit=50
168
+ ... )
169
+ >>> events = timelines.query("agent_events", creds, query)
170
+ """
171
+ full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
172
+ q = query or TimeWindowQuery()
173
+
174
+ # Handle mock mode
175
+ if self._base.use_mock:
176
+ if full_timeline not in self._mock_sorted_sets:
177
+ return []
178
+ entries = self._mock_sorted_sets[full_timeline]
179
+ filtered = [
180
+ json.loads(payload)
181
+ for score, payload in entries
182
+ if q.start_score <= score <= q.end_score
183
+ ]
184
+ return filtered[q.offset : q.offset + q.limit]
185
+
186
+ # Handle real Redis client
187
+ if self._base._client is None:
188
+ return []
189
+
190
+ results = self._base._client.zrangebyscore(
191
+ full_timeline,
192
+ min=q.start_score,
193
+ max=q.end_score,
194
+ start=q.offset,
195
+ num=q.limit,
196
+ )
197
+
198
+ return [json.loads(r) for r in results]
199
+
200
+ def count(
201
+ self,
202
+ timeline_name: str,
203
+ credentials: AgentCredentials,
204
+ query: TimeWindowQuery | None = None,
205
+ ) -> int:
206
+ """Count events in a timeline within a time window.
207
+
208
+ Args:
209
+ timeline_name: Name of the timeline
210
+ credentials: Agent credentials
211
+ query: Time window query parameters
212
+
213
+ Returns:
214
+ Number of events in the time window
215
+
216
+ Example:
217
+ >>> count = timelines.count("agent_events", creds)
218
+ >>> print(f"Total events: {count}")
219
+ """
220
+ full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
221
+ q = query or TimeWindowQuery()
222
+
223
+ # Handle mock mode
224
+ if self._base.use_mock:
225
+ if full_timeline not in self._mock_sorted_sets:
226
+ return 0
227
+ entries = self._mock_sorted_sets[full_timeline]
228
+ return len([1 for score, _ in entries if q.start_score <= score <= q.end_score])
229
+
230
+ # Handle real Redis client
231
+ if self._base._client is None:
232
+ return 0
233
+
234
+ return int(self._base._client.zcount(full_timeline, q.start_score, q.end_score))
@@ -0,0 +1,186 @@
1
+ """Atomic operations using Redis transactions.
2
+
3
+ This module provides atomic multi-step operations using MULTI/EXEC:
4
+ - Pattern promotion with rollback on failure
5
+ - Consistent state updates across multiple keys
6
+
7
+ Use Cases:
8
+ - Pattern lifecycle (stage -> promote/reject)
9
+ - Session management with consistency guarantees
10
+ - Any multi-key operation requiring atomicity
11
+
12
+ Classes:
13
+ TransactionManager: Atomic operations using Redis transactions
14
+
15
+ Example:
16
+ >>> from attune.memory.short_term.transactions import TransactionManager
17
+ >>> from attune.memory.types import AgentCredentials, AccessTier
18
+ >>> transactions = TransactionManager(base_ops, caching_ops)
19
+ >>> creds = AgentCredentials("agent_1", AccessTier.VALIDATOR)
20
+ >>> success, pattern, msg = transactions.atomic_promote_pattern("pat_123", creds)
21
+
22
+ Copyright 2025 Smart-AI-Memory
23
+ Licensed under Fair Source License 0.9
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ import json
29
+ from datetime import datetime
30
+ from typing import TYPE_CHECKING
31
+
32
+ import redis
33
+ import structlog
34
+
35
+ from attune.memory.types import (
36
+ AgentCredentials,
37
+ StagedPattern,
38
+ )
39
+
40
+ if TYPE_CHECKING:
41
+ from attune.memory.short_term.base import BaseOperations
42
+ from attune.memory.short_term.caching import CachingOperations
43
+
44
+ logger = structlog.get_logger(__name__)
45
+
46
+
47
+ class TransactionManager:
48
+ """Atomic operations using Redis transactions.
49
+
50
+ Provides atomic multi-step operations using Redis WATCH/MULTI/EXEC
51
+ for operations that need consistency guarantees across multiple keys.
52
+
53
+ The class requires access to both BaseOperations and CachingOperations
54
+ to coordinate atomic updates with local cache invalidation.
55
+
56
+ Attributes:
57
+ PREFIX_STAGED: Key prefix for staged patterns namespace
58
+
59
+ Example:
60
+ >>> transactions = TransactionManager(base_ops, caching_ops)
61
+ >>> creds = AgentCredentials("agent_1", AccessTier.VALIDATOR)
62
+ >>> success, pattern, msg = transactions.atomic_promote_pattern(
63
+ ... "pat_123", creds, min_confidence=0.7
64
+ ... )
65
+ >>> if success:
66
+ ... library.add(pattern)
67
+ """
68
+
69
+ PREFIX_STAGED = "empathy:staged:"
70
+
71
+ def __init__(self, base: BaseOperations, caching: CachingOperations) -> None:
72
+ """Initialize transaction manager.
73
+
74
+ Args:
75
+ base: BaseOperations instance for Redis client access
76
+ caching: CachingOperations instance for cache invalidation
77
+ """
78
+ self._base = base
79
+ self._caching = caching
80
+
81
+ def atomic_promote_pattern(
82
+ self,
83
+ pattern_id: str,
84
+ credentials: AgentCredentials,
85
+ min_confidence: float = 0.0,
86
+ ) -> tuple[bool, StagedPattern | None, str]:
87
+ """Atomically promote a pattern with validation.
88
+
89
+ Uses Redis transaction (WATCH/MULTI/EXEC) to ensure:
90
+ - Pattern exists and meets confidence threshold
91
+ - Pattern is removed from staging atomically
92
+ - No race conditions with concurrent operations
93
+
94
+ Args:
95
+ pattern_id: Pattern to promote
96
+ credentials: Must be VALIDATOR or higher
97
+ min_confidence: Minimum confidence threshold
98
+
99
+ Returns:
100
+ Tuple of (success, pattern, message)
101
+
102
+ Raises:
103
+ ValueError: If pattern_id is empty or min_confidence out of range
104
+
105
+ Example:
106
+ >>> success, pattern, msg = transactions.atomic_promote_pattern(
107
+ ... "pat_123", creds, min_confidence=0.7
108
+ ... )
109
+ >>> if success:
110
+ ... library.add(pattern)
111
+ """
112
+ # Pattern 1: String ID validation
113
+ if not pattern_id or not pattern_id.strip():
114
+ raise ValueError(f"pattern_id cannot be empty. Got: {pattern_id!r}")
115
+
116
+ # Pattern 4: Range validation
117
+ if not 0.0 <= min_confidence <= 1.0:
118
+ raise ValueError(
119
+ f"min_confidence must be between 0.0 and 1.0, got {min_confidence}"
120
+ )
121
+
122
+ if not credentials.can_validate():
123
+ return False, None, "Requires VALIDATOR tier or higher"
124
+
125
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
126
+
127
+ # Handle mock mode
128
+ if self._base.use_mock:
129
+ if key not in self._base._mock_storage:
130
+ return False, None, "Pattern not found"
131
+ value, expires = self._base._mock_storage[key]
132
+ if expires and datetime.now().timestamp() >= expires:
133
+ return False, None, "Pattern expired"
134
+ pattern = StagedPattern.from_dict(json.loads(str(value)))
135
+ if pattern.confidence < min_confidence:
136
+ return (
137
+ False,
138
+ None,
139
+ f"Confidence {pattern.confidence} below threshold {min_confidence}",
140
+ )
141
+ del self._base._mock_storage[key]
142
+ # Also invalidate local cache
143
+ self._caching.invalidate(key)
144
+ return True, pattern, "Pattern promoted successfully"
145
+
146
+ # Handle real Redis client
147
+ if self._base._client is None:
148
+ return False, None, "Redis not connected"
149
+
150
+ # Use WATCH for optimistic locking
151
+ try:
152
+ self._base._client.watch(key)
153
+ raw = self._base._client.get(key)
154
+
155
+ if raw is None:
156
+ self._base._client.unwatch()
157
+ return False, None, "Pattern not found"
158
+
159
+ pattern = StagedPattern.from_dict(json.loads(raw))
160
+
161
+ if pattern.confidence < min_confidence:
162
+ self._base._client.unwatch()
163
+ return (
164
+ False,
165
+ None,
166
+ f"Confidence {pattern.confidence} below threshold {min_confidence}",
167
+ )
168
+
169
+ # Execute atomic delete
170
+ pipe = self._base._client.pipeline(True)
171
+ pipe.delete(key)
172
+ pipe.execute()
173
+
174
+ # Also invalidate local cache
175
+ self._caching.invalidate(key)
176
+
177
+ return True, pattern, "Pattern promoted successfully"
178
+
179
+ except redis.WatchError:
180
+ return False, None, "Pattern was modified by another process"
181
+ finally:
182
+ try:
183
+ self._base._client.unwatch()
184
+ except Exception: # noqa: BLE001
185
+ # INTENTIONAL: Best effort cleanup - don't fail on unwatch errors
186
+ pass