attune-ai 2.1.4__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. attune/cli/__init__.py +3 -55
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +7 -15
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +34 -28
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/core.py +190 -0
  20. attune/dashboard/app.py +4 -2
  21. attune/dashboard/simple_server.py +3 -1
  22. attune/dashboard/standalone_server.py +7 -3
  23. attune/mcp/server.py +54 -102
  24. attune/memory/long_term.py +0 -2
  25. attune/memory/short_term/__init__.py +84 -0
  26. attune/memory/short_term/base.py +467 -0
  27. attune/memory/short_term/batch.py +219 -0
  28. attune/memory/short_term/caching.py +227 -0
  29. attune/memory/short_term/conflicts.py +265 -0
  30. attune/memory/short_term/cross_session.py +122 -0
  31. attune/memory/short_term/facade.py +655 -0
  32. attune/memory/short_term/pagination.py +215 -0
  33. attune/memory/short_term/patterns.py +271 -0
  34. attune/memory/short_term/pubsub.py +286 -0
  35. attune/memory/short_term/queues.py +244 -0
  36. attune/memory/short_term/security.py +300 -0
  37. attune/memory/short_term/sessions.py +250 -0
  38. attune/memory/short_term/streams.py +249 -0
  39. attune/memory/short_term/timelines.py +234 -0
  40. attune/memory/short_term/transactions.py +186 -0
  41. attune/memory/short_term/working.py +252 -0
  42. attune/meta_workflows/cli_commands/__init__.py +3 -0
  43. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  44. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  45. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  48. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  49. attune/meta_workflows/workflow.py +1 -1
  50. attune/models/adaptive_routing.py +4 -8
  51. attune/models/auth_cli.py +3 -9
  52. attune/models/auth_strategy.py +2 -4
  53. attune/models/provider_config.py +20 -1
  54. attune/models/telemetry/analytics.py +0 -2
  55. attune/models/telemetry/backend.py +0 -3
  56. attune/models/telemetry/storage.py +0 -2
  57. attune/orchestration/_strategies/__init__.py +156 -0
  58. attune/orchestration/_strategies/base.py +231 -0
  59. attune/orchestration/_strategies/conditional_strategies.py +373 -0
  60. attune/orchestration/_strategies/conditions.py +369 -0
  61. attune/orchestration/_strategies/core_strategies.py +491 -0
  62. attune/orchestration/_strategies/data_classes.py +64 -0
  63. attune/orchestration/_strategies/nesting.py +233 -0
  64. attune/orchestration/execution_strategies.py +58 -1567
  65. attune/orchestration/meta_orchestrator.py +1 -3
  66. attune/project_index/scanner.py +1 -3
  67. attune/project_index/scanner_parallel.py +7 -5
  68. attune/socratic_router.py +1 -3
  69. attune/telemetry/agent_coordination.py +9 -3
  70. attune/telemetry/agent_tracking.py +16 -3
  71. attune/telemetry/approval_gates.py +22 -5
  72. attune/telemetry/cli.py +3 -3
  73. attune/telemetry/commands/dashboard_commands.py +24 -8
  74. attune/telemetry/event_streaming.py +8 -2
  75. attune/telemetry/feedback_loop.py +10 -2
  76. attune/tools.py +1 -0
  77. attune/workflow_commands.py +1 -3
  78. attune/workflows/__init__.py +53 -10
  79. attune/workflows/autonomous_test_gen.py +160 -104
  80. attune/workflows/base.py +48 -664
  81. attune/workflows/batch_processing.py +2 -4
  82. attune/workflows/compat.py +156 -0
  83. attune/workflows/cost_mixin.py +141 -0
  84. attune/workflows/data_classes.py +92 -0
  85. attune/workflows/document_gen/workflow.py +11 -14
  86. attune/workflows/history.py +62 -37
  87. attune/workflows/llm_base.py +2 -4
  88. attune/workflows/migration.py +422 -0
  89. attune/workflows/output.py +3 -9
  90. attune/workflows/parsing_mixin.py +427 -0
  91. attune/workflows/perf_audit.py +3 -1
  92. attune/workflows/progress.py +10 -13
  93. attune/workflows/release_prep.py +5 -1
  94. attune/workflows/routing.py +0 -2
  95. attune/workflows/secure_release.py +2 -1
  96. attune/workflows/security_audit.py +19 -14
  97. attune/workflows/security_audit_phase3.py +28 -22
  98. attune/workflows/seo_optimization.py +29 -29
  99. attune/workflows/test_gen/test_templates.py +1 -4
  100. attune/workflows/test_gen/workflow.py +0 -2
  101. attune/workflows/test_gen_behavioral.py +7 -20
  102. attune/workflows/test_gen_parallel.py +6 -4
  103. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
  104. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/RECORD +119 -94
  105. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
  106. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  107. attune_llm/agent_factory/__init__.py +6 -6
  108. attune_llm/commands/__init__.py +10 -10
  109. attune_llm/commands/models.py +3 -3
  110. attune_llm/config/__init__.py +8 -8
  111. attune_llm/learning/__init__.py +3 -3
  112. attune_llm/learning/extractor.py +5 -3
  113. attune_llm/learning/storage.py +5 -3
  114. attune_llm/security/__init__.py +17 -17
  115. attune_llm/utils/tokens.py +3 -1
  116. attune/cli_legacy.py +0 -3957
  117. attune/memory/short_term.py +0 -2192
  118. attune/workflows/manage_docs.py +0 -87
  119. attune/workflows/test5.py +0 -125
  120. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
  121. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
  122. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  123. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,286 @@
1
+ """Pub/Sub messaging for real-time agent communication.
2
+
3
+ This module provides publish/subscribe messaging:
4
+ - Publish: Send messages to channels
5
+ - Subscribe: Register handlers for channels
6
+ - Unsubscribe: Remove handlers
7
+ - Background listener thread management
8
+
9
+ Key Prefix: PREFIX_PUBSUB = "pubsub:"
10
+
11
+ Classes:
12
+ PubSubManager: Real-time publish/subscribe operations
13
+
14
+ Example:
15
+ >>> from attune.memory.short_term.pubsub import PubSubManager
16
+ >>> from attune.memory.types import AgentCredentials, AccessTier
17
+ >>> pubsub = PubSubManager(base_ops)
18
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
19
+ >>> def handler(msg): print(msg)
20
+ >>> pubsub.subscribe("signals", handler)
21
+ >>> pubsub.publish("signals", {"event": "done"}, creds)
22
+
23
+ Copyright 2025 Smart-AI-Memory
24
+ Licensed under Fair Source License 0.9
25
+ """
26
+
27
+ from __future__ import annotations
28
+
29
+ import json
30
+ import threading
31
+ import time
32
+ from collections.abc import Callable
33
+ from datetime import datetime
34
+ from typing import TYPE_CHECKING
35
+
36
+ import structlog
37
+
38
+ from attune.memory.types import (
39
+ AgentCredentials,
40
+ )
41
+
42
+ if TYPE_CHECKING:
43
+ from redis.client import PubSub
44
+
45
+ from attune.memory.short_term.base import BaseOperations
46
+
47
+ logger = structlog.get_logger(__name__)
48
+
49
+
50
+ class PubSubManager:
51
+ """Real-time publish/subscribe operations.
52
+
53
+ Provides channel-based messaging for agent communication.
54
+ Uses Redis Pub/Sub for real-time message delivery with
55
+ background listener threads.
56
+
57
+ The class manages its own state for subscriptions and
58
+ background threads, composed with BaseOperations for
59
+ Redis client access.
60
+
61
+ Attributes:
62
+ PREFIX_PUBSUB: Key prefix for pubsub channels
63
+
64
+ Example:
65
+ >>> pubsub = PubSubManager(base_ops)
66
+ >>> def on_signal(msg):
67
+ ... print(f"Signal: {msg['data']}")
68
+ >>> pubsub.subscribe("agent_signals", on_signal)
69
+ >>> pubsub.publish("agent_signals", {"type": "heartbeat"}, creds)
70
+ """
71
+
72
+ PREFIX_PUBSUB = "pubsub:"
73
+
74
+ def __init__(self, base: BaseOperations) -> None:
75
+ """Initialize pub/sub manager.
76
+
77
+ Args:
78
+ base: BaseOperations instance for Redis client access
79
+ """
80
+ self._base = base
81
+ self._pubsub: PubSub | None = None
82
+ self._pubsub_thread: threading.Thread | None = None
83
+ self._subscriptions: dict[str, list[Callable[[dict], None]]] = {}
84
+ self._pubsub_running: bool = False
85
+ self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
86
+
87
+ def publish(
88
+ self,
89
+ channel: str,
90
+ message: dict,
91
+ credentials: AgentCredentials,
92
+ ) -> int:
93
+ """Publish a message to a channel for real-time notifications.
94
+
95
+ Args:
96
+ channel: Channel name (will be prefixed)
97
+ message: Message payload (dict)
98
+ credentials: Agent credentials (must be CONTRIBUTOR+)
99
+
100
+ Returns:
101
+ Number of subscribers that received the message
102
+
103
+ Raises:
104
+ PermissionError: If credentials lack publish access
105
+
106
+ Example:
107
+ >>> pubsub.publish(
108
+ ... "agent_signals",
109
+ ... {"event": "task_complete", "task_id": "123"},
110
+ ... creds
111
+ ... )
112
+ 2
113
+ """
114
+ if not credentials.can_stage():
115
+ raise PermissionError(
116
+ f"Agent {credentials.agent_id} cannot publish. "
117
+ "Requires CONTRIBUTOR tier or higher.",
118
+ )
119
+
120
+ start_time = time.perf_counter()
121
+ full_channel = f"{self.PREFIX_PUBSUB}{channel}"
122
+
123
+ payload = {
124
+ "channel": channel,
125
+ "from_agent": credentials.agent_id,
126
+ "timestamp": datetime.now().isoformat(),
127
+ "data": message,
128
+ }
129
+
130
+ # Handle mock mode
131
+ if self._base.use_mock:
132
+ handlers = self._mock_pubsub_handlers.get(full_channel, [])
133
+ for handler in handlers:
134
+ try:
135
+ handler(payload)
136
+ except Exception as e:
137
+ logger.warning("pubsub_handler_error", channel=channel, error=str(e))
138
+ latency_ms = (time.perf_counter() - start_time) * 1000
139
+ self._base._metrics.record_operation("publish", latency_ms)
140
+ return len(handlers)
141
+
142
+ # Handle real Redis client
143
+ if self._base._client is None:
144
+ return 0
145
+
146
+ count = self._base._client.publish(full_channel, json.dumps(payload))
147
+ latency_ms = (time.perf_counter() - start_time) * 1000
148
+ self._base._metrics.record_operation("publish", latency_ms)
149
+
150
+ logger.debug("pubsub_published", channel=channel, subscribers=count)
151
+ return int(count)
152
+
153
+ def subscribe(
154
+ self,
155
+ channel: str,
156
+ handler: Callable[[dict], None],
157
+ credentials: AgentCredentials | None = None,
158
+ ) -> bool:
159
+ """Subscribe to a channel for real-time notifications.
160
+
161
+ Args:
162
+ channel: Channel name to subscribe to
163
+ handler: Callback function receiving message dict
164
+ credentials: Optional credentials (any tier can subscribe)
165
+
166
+ Returns:
167
+ True if subscribed successfully
168
+
169
+ Example:
170
+ >>> def on_message(msg):
171
+ ... print(f"Received: {msg['data']}")
172
+ >>> pubsub.subscribe("agent_signals", on_message)
173
+ True
174
+ """
175
+ full_channel = f"{self.PREFIX_PUBSUB}{channel}"
176
+
177
+ # Handle mock mode
178
+ if self._base.use_mock:
179
+ if full_channel not in self._mock_pubsub_handlers:
180
+ self._mock_pubsub_handlers[full_channel] = []
181
+ self._mock_pubsub_handlers[full_channel].append(handler)
182
+ logger.info("pubsub_subscribed_mock", channel=channel)
183
+ return True
184
+
185
+ # Handle real Redis client
186
+ if self._base._client is None:
187
+ return False
188
+
189
+ # Store handler
190
+ if full_channel not in self._subscriptions:
191
+ self._subscriptions[full_channel] = []
192
+ self._subscriptions[full_channel].append(handler)
193
+
194
+ # Create pubsub if needed
195
+ if self._pubsub is None:
196
+ self._pubsub = self._base._client.pubsub()
197
+
198
+ # Subscribe with internal handler
199
+ self._pubsub.subscribe(**{full_channel: self._pubsub_message_handler})
200
+
201
+ # Start listener thread if not running
202
+ if not self._pubsub_running:
203
+ self._pubsub_running = True
204
+ self._pubsub_thread = threading.Thread(
205
+ target=self._pubsub_listener,
206
+ daemon=True,
207
+ name="redis-pubsub-listener",
208
+ )
209
+ self._pubsub_thread.start()
210
+
211
+ logger.info("pubsub_subscribed", channel=channel)
212
+ return True
213
+
214
+ def _pubsub_message_handler(self, message: dict) -> None:
215
+ """Internal handler for pubsub messages."""
216
+ if message["type"] != "message":
217
+ return
218
+
219
+ channel = message["channel"]
220
+ if isinstance(channel, bytes):
221
+ channel = channel.decode()
222
+
223
+ try:
224
+ payload = json.loads(message["data"])
225
+ except json.JSONDecodeError:
226
+ payload = {"raw": message["data"]}
227
+
228
+ handlers = self._subscriptions.get(channel, [])
229
+ for handler in handlers:
230
+ try:
231
+ handler(payload)
232
+ except Exception as e:
233
+ logger.warning("pubsub_handler_error", channel=channel, error=str(e))
234
+
235
+ def _pubsub_listener(self) -> None:
236
+ """Background thread for listening to pubsub messages."""
237
+ while self._pubsub_running and self._pubsub:
238
+ try:
239
+ self._pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0)
240
+ except Exception as e:
241
+ logger.warning("pubsub_listener_error", error=str(e))
242
+ time.sleep(1)
243
+
244
+ def unsubscribe(self, channel: str) -> bool:
245
+ """Unsubscribe from a channel.
246
+
247
+ Args:
248
+ channel: Channel name to unsubscribe from
249
+
250
+ Returns:
251
+ True if unsubscribed successfully
252
+
253
+ Example:
254
+ >>> pubsub.unsubscribe("agent_signals")
255
+ True
256
+ """
257
+ full_channel = f"{self.PREFIX_PUBSUB}{channel}"
258
+
259
+ # Handle mock mode
260
+ if self._base.use_mock:
261
+ self._mock_pubsub_handlers.pop(full_channel, None)
262
+ return True
263
+
264
+ # Handle real Redis client
265
+ if self._pubsub is None:
266
+ return False
267
+
268
+ self._pubsub.unsubscribe(full_channel)
269
+ self._subscriptions.pop(full_channel, None)
270
+ return True
271
+
272
+ def close(self) -> None:
273
+ """Close pubsub connection and stop listener thread.
274
+
275
+ Should be called when the manager is no longer needed
276
+ to clean up resources and stop background threads.
277
+
278
+ Example:
279
+ >>> pubsub.close()
280
+ """
281
+ self._pubsub_running = False
282
+ if self._pubsub:
283
+ self._pubsub.close()
284
+ self._pubsub = None
285
+ self._subscriptions.clear()
286
+ self._mock_pubsub_handlers.clear()
@@ -0,0 +1,244 @@
1
+ """Task queue operations using Redis lists.
2
+
3
+ This module provides queue operations for task processing:
4
+ - Push: Add task to queue (LPUSH/RPUSH)
5
+ - Pop: Remove and return task (LPOP/BLPOP)
6
+ - Length: Get queue size
7
+ - Peek: View task without removing
8
+
9
+ Key Prefix: PREFIX_QUEUE = "queue:"
10
+
11
+ Use Cases:
12
+ - Background job queues
13
+ - Task distribution
14
+ - Work stealing patterns
15
+
16
+ Classes:
17
+ QueueManager: Redis list operations for task queues
18
+
19
+ Example:
20
+ >>> from attune.memory.short_term.queues import QueueManager
21
+ >>> from attune.memory.types import AgentCredentials, AccessTier
22
+ >>> queues = QueueManager(base_ops)
23
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
24
+ >>> queues.push("tasks", {"type": "analyze", "file": "main.py"}, creds)
25
+ >>> task = queues.pop("tasks", creds)
26
+
27
+ Copyright 2025 Smart-AI-Memory
28
+ Licensed under Fair Source License 0.9
29
+ """
30
+
31
+ from __future__ import annotations
32
+
33
+ import json
34
+ from datetime import datetime
35
+ from typing import TYPE_CHECKING
36
+
37
+ import structlog
38
+
39
+ from attune.memory.types import (
40
+ AgentCredentials,
41
+ )
42
+
43
+ if TYPE_CHECKING:
44
+ from attune.memory.short_term.base import BaseOperations
45
+
46
+ logger = structlog.get_logger(__name__)
47
+
48
+
49
+ class QueueManager:
50
+ """Redis list operations for task queues.
51
+
52
+ Provides FIFO queue operations using Redis lists for task
53
+ distribution and background job processing.
54
+
55
+ The class manages its own mock list storage for testing,
56
+ composed with BaseOperations for Redis client access.
57
+
58
+ Attributes:
59
+ PREFIX_QUEUE: Key prefix for queue names
60
+
61
+ Example:
62
+ >>> queues = QueueManager(base_ops)
63
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
64
+ >>> queues.push("analysis_tasks", {"file": "main.py"}, creds)
65
+ >>> task = queues.pop("analysis_tasks", creds, timeout=5)
66
+ """
67
+
68
+ PREFIX_QUEUE = "queue:"
69
+
70
+ def __init__(self, base: BaseOperations) -> None:
71
+ """Initialize queue manager.
72
+
73
+ Args:
74
+ base: BaseOperations instance for Redis client access
75
+ """
76
+ self._base = base
77
+ self._mock_lists: dict[str, list[str]] = {}
78
+
79
+ def push(
80
+ self,
81
+ queue_name: str,
82
+ task: dict,
83
+ credentials: AgentCredentials,
84
+ priority: bool = False,
85
+ ) -> int:
86
+ """Push a task to a queue.
87
+
88
+ Args:
89
+ queue_name: Name of the queue
90
+ task: Task data
91
+ credentials: Agent credentials (must be CONTRIBUTOR+)
92
+ priority: If True, push to front (high priority)
93
+
94
+ Returns:
95
+ New queue length
96
+
97
+ Raises:
98
+ PermissionError: If credentials lack write access
99
+
100
+ Example:
101
+ >>> task = {"type": "analyze", "file": "main.py"}
102
+ >>> queues.push("agent_tasks", task, creds)
103
+ 1
104
+ """
105
+ if not credentials.can_stage():
106
+ raise PermissionError(
107
+ f"Agent {credentials.agent_id} cannot push to queue. "
108
+ "Requires CONTRIBUTOR tier or higher.",
109
+ )
110
+
111
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
112
+ payload = json.dumps(
113
+ {
114
+ "task": task,
115
+ "queued_by": credentials.agent_id,
116
+ "queued_at": datetime.now().isoformat(),
117
+ },
118
+ )
119
+
120
+ # Handle mock mode
121
+ if self._base.use_mock:
122
+ if full_queue not in self._mock_lists:
123
+ self._mock_lists[full_queue] = []
124
+ if priority:
125
+ self._mock_lists[full_queue].insert(0, payload)
126
+ else:
127
+ self._mock_lists[full_queue].append(payload)
128
+ return len(self._mock_lists[full_queue])
129
+
130
+ # Handle real Redis client
131
+ if self._base._client is None:
132
+ return 0
133
+
134
+ if priority:
135
+ return int(self._base._client.lpush(full_queue, payload))
136
+ return int(self._base._client.rpush(full_queue, payload))
137
+
138
+ def pop(
139
+ self,
140
+ queue_name: str,
141
+ credentials: AgentCredentials,
142
+ timeout: int = 0,
143
+ ) -> dict | None:
144
+ """Pop a task from a queue.
145
+
146
+ Args:
147
+ queue_name: Name of the queue
148
+ credentials: Agent credentials
149
+ timeout: Seconds to block waiting (0 = no block)
150
+
151
+ Returns:
152
+ Task data or None if queue empty
153
+
154
+ Example:
155
+ >>> task = queues.pop("agent_tasks", creds, timeout=5)
156
+ >>> if task:
157
+ ... process(task["task"])
158
+ """
159
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
160
+
161
+ # Handle mock mode
162
+ if self._base.use_mock:
163
+ if full_queue not in self._mock_lists or not self._mock_lists[full_queue]:
164
+ return None
165
+ payload = self._mock_lists[full_queue].pop(0)
166
+ data: dict = json.loads(payload)
167
+ return data
168
+
169
+ # Handle real Redis client
170
+ if self._base._client is None:
171
+ return None
172
+
173
+ if timeout > 0:
174
+ result = self._base._client.blpop(full_queue, timeout=timeout)
175
+ if result:
176
+ data = json.loads(result[1])
177
+ return data
178
+ return None
179
+
180
+ result = self._base._client.lpop(full_queue)
181
+ if result:
182
+ data = json.loads(result)
183
+ return data
184
+ return None
185
+
186
+ def length(self, queue_name: str) -> int:
187
+ """Get the length of a queue.
188
+
189
+ Args:
190
+ queue_name: Name of the queue
191
+
192
+ Returns:
193
+ Number of items in the queue
194
+
195
+ Example:
196
+ >>> count = queues.length("agent_tasks")
197
+ >>> print(f"Tasks pending: {count}")
198
+ """
199
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
200
+
201
+ # Handle mock mode
202
+ if self._base.use_mock:
203
+ return len(self._mock_lists.get(full_queue, []))
204
+
205
+ # Handle real Redis client
206
+ if self._base._client is None:
207
+ return 0
208
+
209
+ return int(self._base._client.llen(full_queue))
210
+
211
+ def peek(
212
+ self,
213
+ queue_name: str,
214
+ credentials: AgentCredentials,
215
+ count: int = 1,
216
+ ) -> list[dict]:
217
+ """Peek at tasks in a queue without removing them.
218
+
219
+ Args:
220
+ queue_name: Name of the queue
221
+ credentials: Agent credentials
222
+ count: Number of items to peek
223
+
224
+ Returns:
225
+ List of task data
226
+
227
+ Example:
228
+ >>> tasks = queues.peek("agent_tasks", creds, count=5)
229
+ >>> for task in tasks:
230
+ ... print(task["task"]["type"])
231
+ """
232
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
233
+
234
+ # Handle mock mode
235
+ if self._base.use_mock:
236
+ items = self._mock_lists.get(full_queue, [])[:count]
237
+ return [json.loads(item) for item in items]
238
+
239
+ # Handle real Redis client
240
+ if self._base._client is None:
241
+ return []
242
+
243
+ items = self._base._client.lrange(full_queue, 0, count - 1)
244
+ return [json.loads(item) for item in items]