empathy-framework 5.0.1__py3-none-any.whl → 5.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/METADATA +53 -9
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/RECORD +28 -31
- empathy_llm_toolkit/providers.py +175 -35
- empathy_llm_toolkit/utils/tokens.py +150 -30
- empathy_os/__init__.py +1 -1
- empathy_os/cli/commands/batch.py +256 -0
- empathy_os/cli/commands/cache.py +248 -0
- empathy_os/cli/commands/inspect.py +1 -2
- empathy_os/cli/commands/metrics.py +1 -1
- empathy_os/cli/commands/routing.py +285 -0
- empathy_os/cli/commands/workflow.py +2 -2
- empathy_os/cli/parsers/__init__.py +6 -0
- empathy_os/cli/parsers/batch.py +118 -0
- empathy_os/cli/parsers/cache.py +65 -0
- empathy_os/cli/parsers/routing.py +110 -0
- empathy_os/dashboard/standalone_server.py +22 -11
- empathy_os/metrics/collector.py +31 -0
- empathy_os/models/token_estimator.py +21 -13
- empathy_os/telemetry/agent_coordination.py +12 -14
- empathy_os/telemetry/agent_tracking.py +18 -19
- empathy_os/telemetry/approval_gates.py +27 -39
- empathy_os/telemetry/event_streaming.py +19 -19
- empathy_os/telemetry/feedback_loop.py +13 -16
- empathy_os/workflows/batch_processing.py +56 -10
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/README 2.md +0 -454
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/WHEEL +0 -0
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
"""Argument parser for batch processing commands.
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Smart-AI-Memory
|
|
4
|
+
Licensed under Fair Source License 0.9
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def register_parsers(subparsers):
|
|
9
|
+
"""Register batch command parsers.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
subparsers: Subparser object from main argument parser
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
None: Adds batch subparser with submit, status, results, wait subcommands
|
|
16
|
+
"""
|
|
17
|
+
from ..commands.batch import (
|
|
18
|
+
cmd_batch_results,
|
|
19
|
+
cmd_batch_status,
|
|
20
|
+
cmd_batch_submit,
|
|
21
|
+
cmd_batch_wait,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# Main batch command
|
|
25
|
+
batch_parser = subparsers.add_parser(
|
|
26
|
+
"batch",
|
|
27
|
+
help="Batch processing via Anthropic Batch API (50% cost savings)",
|
|
28
|
+
description="Submit and manage batch processing jobs for non-urgent tasks",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Batch subcommands
|
|
32
|
+
batch_subparsers = batch_parser.add_subparsers(dest="batch_command", required=True)
|
|
33
|
+
|
|
34
|
+
# batch submit command
|
|
35
|
+
submit_parser = batch_subparsers.add_parser(
|
|
36
|
+
"submit",
|
|
37
|
+
help="Submit a batch processing job from JSON file",
|
|
38
|
+
description="Submit batch requests for asynchronous processing (50% cost savings)",
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
submit_parser.add_argument(
|
|
42
|
+
"input_file",
|
|
43
|
+
help='JSON file with batch requests. Format: [{"task_id": "...", "task_type": "...", "input_data": {...}}]',
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
submit_parser.set_defaults(func=cmd_batch_submit)
|
|
47
|
+
|
|
48
|
+
# batch status command
|
|
49
|
+
status_parser = batch_subparsers.add_parser(
|
|
50
|
+
"status",
|
|
51
|
+
help="Check status of a batch processing job",
|
|
52
|
+
description="Display current status and request counts for a batch",
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
status_parser.add_argument(
|
|
56
|
+
"batch_id",
|
|
57
|
+
help="Batch ID (e.g., msgbatch_abc123)",
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
status_parser.add_argument(
|
|
61
|
+
"--json",
|
|
62
|
+
action="store_true",
|
|
63
|
+
help="Output raw JSON status",
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
status_parser.set_defaults(func=cmd_batch_status)
|
|
67
|
+
|
|
68
|
+
# batch results command
|
|
69
|
+
results_parser = batch_subparsers.add_parser(
|
|
70
|
+
"results",
|
|
71
|
+
help="Retrieve results from completed batch",
|
|
72
|
+
description="Download and save batch results to JSON file",
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
results_parser.add_argument(
|
|
76
|
+
"batch_id",
|
|
77
|
+
help="Batch ID (e.g., msgbatch_abc123)",
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
results_parser.add_argument(
|
|
81
|
+
"output_file",
|
|
82
|
+
help="Path to output JSON file",
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
results_parser.set_defaults(func=cmd_batch_results)
|
|
86
|
+
|
|
87
|
+
# batch wait command
|
|
88
|
+
wait_parser = batch_subparsers.add_parser(
|
|
89
|
+
"wait",
|
|
90
|
+
help="Wait for batch to complete and retrieve results",
|
|
91
|
+
description="Poll batch status until completion, then download results",
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
wait_parser.add_argument(
|
|
95
|
+
"batch_id",
|
|
96
|
+
help="Batch ID (e.g., msgbatch_abc123)",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
wait_parser.add_argument(
|
|
100
|
+
"output_file",
|
|
101
|
+
help="Path to output JSON file",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
wait_parser.add_argument(
|
|
105
|
+
"--poll-interval",
|
|
106
|
+
type=int,
|
|
107
|
+
default=300,
|
|
108
|
+
help="Seconds between status checks (default: 300 = 5 minutes)",
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
wait_parser.add_argument(
|
|
112
|
+
"--timeout",
|
|
113
|
+
type=int,
|
|
114
|
+
default=86400,
|
|
115
|
+
help="Maximum wait time in seconds (default: 86400 = 24 hours)",
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
wait_parser.set_defaults(func=cmd_batch_wait)
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""Argument parser for cache commands.
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Smart-AI-Memory
|
|
4
|
+
Licensed under Fair Source License 0.9
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def register_parsers(subparsers):
|
|
9
|
+
"""Register cache command parsers.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
subparsers: Subparser object from main argument parser
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
None: Adds cache subparser with stats and clear subcommands
|
|
16
|
+
"""
|
|
17
|
+
from ..commands.cache import cmd_cache_clear, cmd_cache_stats
|
|
18
|
+
# Main cache command
|
|
19
|
+
cache_parser = subparsers.add_parser(
|
|
20
|
+
"cache",
|
|
21
|
+
help="Cache monitoring and management",
|
|
22
|
+
description="Monitor prompt caching performance and cost savings",
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
# Cache subcommands
|
|
26
|
+
cache_subparsers = cache_parser.add_subparsers(dest="cache_command", required=True)
|
|
27
|
+
|
|
28
|
+
# cache stats command
|
|
29
|
+
stats_parser = cache_subparsers.add_parser(
|
|
30
|
+
"stats",
|
|
31
|
+
help="Show cache performance statistics",
|
|
32
|
+
description="Display prompt caching metrics including hit rate and cost savings",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
stats_parser.add_argument(
|
|
36
|
+
"--days",
|
|
37
|
+
type=int,
|
|
38
|
+
default=7,
|
|
39
|
+
help="Number of days to analyze (default: 7)",
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
stats_parser.add_argument(
|
|
43
|
+
"--format",
|
|
44
|
+
choices=["table", "json"],
|
|
45
|
+
default="table",
|
|
46
|
+
help="Output format (default: table)",
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
stats_parser.add_argument(
|
|
50
|
+
"--verbose",
|
|
51
|
+
"-v",
|
|
52
|
+
action="store_true",
|
|
53
|
+
help="Show detailed token metrics",
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
stats_parser.set_defaults(func=cmd_cache_stats)
|
|
57
|
+
|
|
58
|
+
# cache clear command (placeholder)
|
|
59
|
+
clear_parser = cache_subparsers.add_parser(
|
|
60
|
+
"clear",
|
|
61
|
+
help="Clear cache (note: Anthropic cache is server-side with 5min TTL)",
|
|
62
|
+
description="Information about cache clearing",
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
clear_parser.set_defaults(func=cmd_cache_clear)
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"""Argument parser for adaptive routing commands.
|
|
2
|
+
|
|
3
|
+
Copyright 2025 Smart-AI-Memory
|
|
4
|
+
Licensed under Fair Source License 0.9
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def register_parsers(subparsers):
|
|
9
|
+
"""Register routing command parsers.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
subparsers: Subparser object from main argument parser
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
None: Adds routing subparser with stats, check, models subcommands
|
|
16
|
+
"""
|
|
17
|
+
from ..commands.routing import (
|
|
18
|
+
cmd_routing_check,
|
|
19
|
+
cmd_routing_models,
|
|
20
|
+
cmd_routing_stats,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
# Main routing command
|
|
24
|
+
routing_parser = subparsers.add_parser(
|
|
25
|
+
"routing",
|
|
26
|
+
help="Adaptive model routing statistics and recommendations",
|
|
27
|
+
description="Analyze model routing performance based on historical telemetry",
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Routing subcommands
|
|
31
|
+
routing_subparsers = routing_parser.add_subparsers(
|
|
32
|
+
dest="routing_command", required=True
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# routing stats command
|
|
36
|
+
stats_parser = routing_subparsers.add_parser(
|
|
37
|
+
"stats",
|
|
38
|
+
help="Show routing statistics for a workflow",
|
|
39
|
+
description="Display model performance metrics and recommendations",
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
stats_parser.add_argument("workflow", help="Workflow name (e.g., 'code-review')")
|
|
43
|
+
|
|
44
|
+
stats_parser.add_argument(
|
|
45
|
+
"--stage",
|
|
46
|
+
help="Stage name (optional, shows all stages if not specified)",
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
stats_parser.add_argument(
|
|
50
|
+
"--days",
|
|
51
|
+
type=int,
|
|
52
|
+
default=7,
|
|
53
|
+
help="Number of days to analyze (default: 7)",
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
stats_parser.set_defaults(func=cmd_routing_stats)
|
|
57
|
+
|
|
58
|
+
# routing check command
|
|
59
|
+
check_parser = routing_subparsers.add_parser(
|
|
60
|
+
"check",
|
|
61
|
+
help="Check for tier upgrade recommendations",
|
|
62
|
+
description="Analyze failure rates and recommend tier upgrades",
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
check_parser.add_argument(
|
|
66
|
+
"--workflow",
|
|
67
|
+
help="Workflow name (required unless --all is used)",
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
check_parser.add_argument(
|
|
71
|
+
"--stage",
|
|
72
|
+
help="Stage name (optional)",
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
check_parser.add_argument(
|
|
76
|
+
"--all",
|
|
77
|
+
action="store_true",
|
|
78
|
+
help="Check all workflows",
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
check_parser.add_argument(
|
|
82
|
+
"--days",
|
|
83
|
+
type=int,
|
|
84
|
+
default=7,
|
|
85
|
+
help="Number of days to analyze (default: 7)",
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
check_parser.set_defaults(func=cmd_routing_check)
|
|
89
|
+
|
|
90
|
+
# routing models command
|
|
91
|
+
models_parser = routing_subparsers.add_parser(
|
|
92
|
+
"models",
|
|
93
|
+
help="Compare model performance",
|
|
94
|
+
description="Show performance metrics for all models from a provider",
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
models_parser.add_argument(
|
|
98
|
+
"--provider",
|
|
99
|
+
default="anthropic",
|
|
100
|
+
help="Provider name (default: anthropic)",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
models_parser.add_argument(
|
|
104
|
+
"--days",
|
|
105
|
+
type=int,
|
|
106
|
+
default=7,
|
|
107
|
+
help="Number of days to analyze (default: 7)",
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
models_parser.set_defaults(func=cmd_routing_models)
|
|
@@ -290,17 +290,28 @@ class StandaloneDashboardHandler(BaseHTTPRequestHandler):
|
|
|
290
290
|
# Get last N entries from stream
|
|
291
291
|
entries = r.xrevrange(stream_key, count=limit)
|
|
292
292
|
for entry_id, fields in entries:
|
|
293
|
+
# Parse event structure: top-level fields + data payload
|
|
294
|
+
event_type = fields.get(b"event_type", b"unknown").decode("utf-8")
|
|
295
|
+
timestamp = fields.get(b"timestamp", b"").decode("utf-8")
|
|
296
|
+
source = fields.get(b"source", b"empathy_os").decode("utf-8")
|
|
297
|
+
|
|
298
|
+
# Parse the data payload (JSON-encoded)
|
|
299
|
+
data = {}
|
|
293
300
|
if b"data" in fields:
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
301
|
+
try:
|
|
302
|
+
data = json.loads(fields[b"data"].decode("utf-8"))
|
|
303
|
+
except json.JSONDecodeError:
|
|
304
|
+
data = {}
|
|
305
|
+
|
|
306
|
+
result.append(
|
|
307
|
+
{
|
|
308
|
+
"event_id": entry_id.decode("utf-8") if isinstance(entry_id, bytes) else entry_id,
|
|
309
|
+
"event_type": event_type,
|
|
310
|
+
"timestamp": timestamp,
|
|
311
|
+
"data": data,
|
|
312
|
+
"source": source,
|
|
313
|
+
}
|
|
314
|
+
)
|
|
304
315
|
except Exception as e:
|
|
305
316
|
logger.debug(f"Stream {stream_key} not found or empty: {e}")
|
|
306
317
|
|
|
@@ -320,7 +331,7 @@ class StandaloneDashboardHandler(BaseHTTPRequestHandler):
|
|
|
320
331
|
return
|
|
321
332
|
|
|
322
333
|
result = []
|
|
323
|
-
for key in r.keys(b"
|
|
334
|
+
for key in r.keys(b"approval_request:*"):
|
|
324
335
|
try:
|
|
325
336
|
data = r.get(key)
|
|
326
337
|
if data:
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Metrics collector stub (deprecated).
|
|
2
|
+
|
|
3
|
+
This module is a placeholder for legacy code compatibility.
|
|
4
|
+
The functionality has been moved to other modules.
|
|
5
|
+
|
|
6
|
+
Copyright 2025 Smart-AI-Memory
|
|
7
|
+
Licensed under Fair Source License 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MetricsCollector:
|
|
12
|
+
"""Deprecated metrics collector class.
|
|
13
|
+
|
|
14
|
+
This class is maintained for backward compatibility but is deprecated.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self, db_path: str | None = None):
|
|
18
|
+
"""Initialize metrics collector.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
db_path: Path to database (deprecated parameter)
|
|
22
|
+
"""
|
|
23
|
+
self.db_path = db_path
|
|
24
|
+
|
|
25
|
+
def collect(self):
|
|
26
|
+
"""Collect metrics (deprecated)."""
|
|
27
|
+
return {}
|
|
28
|
+
|
|
29
|
+
def get_stats(self):
|
|
30
|
+
"""Get statistics (deprecated)."""
|
|
31
|
+
return {}
|
|
@@ -44,16 +44,17 @@ def _get_encoding(model_id: str) -> Any:
|
|
|
44
44
|
|
|
45
45
|
|
|
46
46
|
def estimate_tokens(text: str, model_id: str = "claude-sonnet-4-5-20250514") -> int:
|
|
47
|
-
"""Estimate token count for text.
|
|
47
|
+
"""Estimate token count for text using accurate token counting.
|
|
48
48
|
|
|
49
|
-
Uses
|
|
49
|
+
Uses empathy_llm_toolkit's token counter which leverages tiktoken for fast,
|
|
50
|
+
accurate local counting (~98% accurate). Falls back to heuristic if unavailable.
|
|
50
51
|
|
|
51
52
|
Args:
|
|
52
53
|
text: The text to count tokens for
|
|
53
54
|
model_id: The model ID to use for encoding selection
|
|
54
55
|
|
|
55
56
|
Returns:
|
|
56
|
-
|
|
57
|
+
Accurate token count
|
|
57
58
|
|
|
58
59
|
Raises:
|
|
59
60
|
ValueError: If model_id is empty
|
|
@@ -66,16 +67,23 @@ def estimate_tokens(text: str, model_id: str = "claude-sonnet-4-5-20250514") ->
|
|
|
66
67
|
if not text:
|
|
67
68
|
return 0
|
|
68
69
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
70
|
+
# Use new accurate token counting from empathy_llm_toolkit
|
|
71
|
+
try:
|
|
72
|
+
from empathy_llm_toolkit.utils.tokens import count_tokens
|
|
73
|
+
|
|
74
|
+
return count_tokens(text, model=model_id, use_api=False)
|
|
75
|
+
except ImportError:
|
|
76
|
+
# Fallback to tiktoken if toolkit not available
|
|
77
|
+
if TIKTOKEN_AVAILABLE:
|
|
78
|
+
try:
|
|
79
|
+
encoding = _get_encoding(model_id)
|
|
80
|
+
if encoding:
|
|
81
|
+
return len(encoding.encode(text))
|
|
82
|
+
except Exception:
|
|
83
|
+
pass # Fall through to heuristic
|
|
84
|
+
|
|
85
|
+
# Last resort: heuristic fallback
|
|
86
|
+
return max(1, int(len(text) * TOKENS_PER_CHAR_HEURISTIC))
|
|
79
87
|
|
|
80
88
|
|
|
81
89
|
def estimate_workflow_cost(
|
|
@@ -222,15 +222,13 @@ class CoordinationSignals:
|
|
|
222
222
|
key = f"{self.KEY_PREFIX}{target_key}:{signal_type}:{signal_id}"
|
|
223
223
|
|
|
224
224
|
try:
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
self.memory.stash(key=key, data=signal.to_dict(), credentials=credentials, ttl_seconds=ttl)
|
|
228
|
-
elif hasattr(self.memory, "_redis"):
|
|
225
|
+
# Use direct Redis access for custom TTL
|
|
226
|
+
if hasattr(self.memory, "_client") and self.memory._client:
|
|
229
227
|
import json
|
|
230
228
|
|
|
231
|
-
self.memory.
|
|
229
|
+
self.memory._client.setex(key, ttl, json.dumps(signal.to_dict()))
|
|
232
230
|
else:
|
|
233
|
-
logger.warning(f"Cannot send signal:
|
|
231
|
+
logger.warning(f"Cannot send signal: no Redis backend available")
|
|
234
232
|
except Exception as e:
|
|
235
233
|
logger.error(f"Failed to send signal {signal_id}: {e}")
|
|
236
234
|
|
|
@@ -341,8 +339,8 @@ class CoordinationSignals:
|
|
|
341
339
|
]
|
|
342
340
|
|
|
343
341
|
for pattern in patterns:
|
|
344
|
-
if hasattr(self.memory, "
|
|
345
|
-
keys = self.memory.
|
|
342
|
+
if hasattr(self.memory, "_client"):
|
|
343
|
+
keys = self.memory._client.keys(pattern)
|
|
346
344
|
else:
|
|
347
345
|
continue
|
|
348
346
|
|
|
@@ -393,8 +391,8 @@ class CoordinationSignals:
|
|
|
393
391
|
|
|
394
392
|
signals = []
|
|
395
393
|
for pattern in patterns:
|
|
396
|
-
if hasattr(self.memory, "
|
|
397
|
-
keys = self.memory.
|
|
394
|
+
if hasattr(self.memory, "_client"):
|
|
395
|
+
keys = self.memory._client.keys(pattern)
|
|
398
396
|
else:
|
|
399
397
|
continue
|
|
400
398
|
|
|
@@ -451,10 +449,10 @@ class CoordinationSignals:
|
|
|
451
449
|
try:
|
|
452
450
|
if hasattr(self.memory, "retrieve"):
|
|
453
451
|
return self.memory.retrieve(key, credentials=None)
|
|
454
|
-
elif hasattr(self.memory, "
|
|
452
|
+
elif hasattr(self.memory, "_client"):
|
|
455
453
|
import json
|
|
456
454
|
|
|
457
|
-
data = self.memory.
|
|
455
|
+
data = self.memory._client.get(key)
|
|
458
456
|
if data:
|
|
459
457
|
if isinstance(data, bytes):
|
|
460
458
|
data = data.decode("utf-8")
|
|
@@ -470,8 +468,8 @@ class CoordinationSignals:
|
|
|
470
468
|
return False
|
|
471
469
|
|
|
472
470
|
try:
|
|
473
|
-
if hasattr(self.memory, "
|
|
474
|
-
return self.memory.
|
|
471
|
+
if hasattr(self.memory, "_client"):
|
|
472
|
+
return self.memory._client.delete(key) > 0
|
|
475
473
|
return False
|
|
476
474
|
except Exception as e:
|
|
477
475
|
logger.debug(f"Failed to delete signal {key}: {e}")
|
|
@@ -211,18 +211,14 @@ class HeartbeatCoordinator:
|
|
|
211
211
|
# Store in Redis with TTL (Pattern 1)
|
|
212
212
|
key = f"heartbeat:{self.agent_id}"
|
|
213
213
|
try:
|
|
214
|
-
# Use
|
|
215
|
-
if hasattr(self.memory, "
|
|
216
|
-
|
|
217
|
-
key=key, data=heartbeat.to_dict(), credentials=None, ttl_seconds=self.HEARTBEAT_TTL
|
|
218
|
-
)
|
|
219
|
-
elif hasattr(self.memory, "_redis"):
|
|
220
|
-
# Direct Redis access for ShortTermMemory
|
|
214
|
+
# Use direct Redis access for heartbeats (need custom 30s TTL)
|
|
215
|
+
if hasattr(self.memory, "_client") and self.memory._client:
|
|
216
|
+
# Direct Redis access with setex for custom TTL
|
|
221
217
|
import json
|
|
222
218
|
|
|
223
|
-
self.memory.
|
|
219
|
+
self.memory._client.setex(key, self.HEARTBEAT_TTL, json.dumps(heartbeat.to_dict()))
|
|
224
220
|
else:
|
|
225
|
-
logger.warning(f"Cannot publish heartbeat:
|
|
221
|
+
logger.warning(f"Cannot publish heartbeat: no Redis backend available")
|
|
226
222
|
except Exception as e:
|
|
227
223
|
logger.warning(f"Failed to publish heartbeat for {self.agent_id}: {e}")
|
|
228
224
|
|
|
@@ -249,8 +245,8 @@ class HeartbeatCoordinator:
|
|
|
249
245
|
|
|
250
246
|
try:
|
|
251
247
|
# Scan for heartbeat:* keys
|
|
252
|
-
if hasattr(self.memory, "
|
|
253
|
-
keys = self.memory.
|
|
248
|
+
if hasattr(self.memory, "_client") and self.memory._client:
|
|
249
|
+
keys = self.memory._client.keys("heartbeat:*")
|
|
254
250
|
else:
|
|
255
251
|
logger.warning("Cannot scan for heartbeats: no Redis access")
|
|
256
252
|
return []
|
|
@@ -305,23 +301,26 @@ class HeartbeatCoordinator:
|
|
|
305
301
|
return None
|
|
306
302
|
|
|
307
303
|
def _retrieve_heartbeat(self, key: str) -> dict[str, Any] | None:
|
|
308
|
-
"""Retrieve heartbeat data from memory.
|
|
304
|
+
"""Retrieve heartbeat data from memory.
|
|
305
|
+
|
|
306
|
+
Heartbeat keys are stored directly as 'heartbeat:{agent_id}' and must be
|
|
307
|
+
retrieved via direct Redis access, not through the standard retrieve() method
|
|
308
|
+
which expects keys with 'working:{agent_id}:{key}' format.
|
|
309
|
+
"""
|
|
309
310
|
if not self.memory:
|
|
310
311
|
return None
|
|
311
312
|
|
|
312
313
|
try:
|
|
313
|
-
#
|
|
314
|
-
if hasattr(self.memory, "
|
|
315
|
-
return self.memory.retrieve(key, credentials=None)
|
|
316
|
-
# Try direct Redis access
|
|
317
|
-
elif hasattr(self.memory, "_redis"):
|
|
314
|
+
# Use direct Redis access for heartbeat keys
|
|
315
|
+
if hasattr(self.memory, "_client") and self.memory._client:
|
|
318
316
|
import json
|
|
319
317
|
|
|
320
|
-
data = self.memory.
|
|
318
|
+
data = self.memory._client.get(key)
|
|
321
319
|
if data:
|
|
322
320
|
if isinstance(data, bytes):
|
|
323
321
|
data = data.decode("utf-8")
|
|
324
|
-
|
|
322
|
+
result = json.loads(data)
|
|
323
|
+
return result if isinstance(result, dict) else None
|
|
325
324
|
return None
|
|
326
325
|
except Exception as e:
|
|
327
326
|
logger.debug(f"Failed to retrieve heartbeat {key}: {e}")
|