empathy-framework 3.5.3__py3-none-any.whl → 3.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-3.5.3.dist-info → empathy_framework-3.5.6.dist-info}/METADATA +11 -3
- {empathy_framework-3.5.3.dist-info → empathy_framework-3.5.6.dist-info}/RECORD +13 -13
- empathy_llm_toolkit/__init__.py +2 -1
- empathy_llm_toolkit/core.py +9 -1
- empathy_llm_toolkit/providers.py +177 -0
- empathy_os/cli.py +181 -14
- empathy_os/cli_unified.py +1 -1
- empathy_os/redis_config.py +132 -45
- empathy_os/workflow_commands.py +2 -2
- {empathy_framework-3.5.3.dist-info → empathy_framework-3.5.6.dist-info}/WHEEL +0 -0
- {empathy_framework-3.5.3.dist-info → empathy_framework-3.5.6.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.5.3.dist-info → empathy_framework-3.5.6.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-3.5.3.dist-info → empathy_framework-3.5.6.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: empathy-framework
|
|
3
|
-
Version: 3.5.
|
|
3
|
+
Version: 3.5.6
|
|
4
4
|
Summary: AI collaboration framework with persistent memory, anticipatory intelligence, code inspection, and multi-agent orchestration
|
|
5
5
|
Author-email: Patrick Roebuck <patrick.roebuck@smartAImemory.com>
|
|
6
6
|
Maintainer-email: Smart-AI-Memory <patrick.roebuck@smartAImemory.com>
|
|
@@ -225,6 +225,8 @@ Requires-Dist: ruff<1.0,>=0.1; extra == "dev"
|
|
|
225
225
|
Requires-Dist: coverage<8.0,>=7.0; extra == "dev"
|
|
226
226
|
Requires-Dist: bandit<2.0,>=1.7; extra == "dev"
|
|
227
227
|
Requires-Dist: pre-commit<4.0,>=3.0; extra == "dev"
|
|
228
|
+
Requires-Dist: httpx<1.0.0,>=0.27.0; extra == "dev"
|
|
229
|
+
Requires-Dist: fastapi<1.0.0,>=0.109.1; extra == "dev"
|
|
228
230
|
Provides-Extra: full
|
|
229
231
|
Requires-Dist: anthropic<1.0.0,>=0.8.0; extra == "full"
|
|
230
232
|
Requires-Dist: openai<2.0.0,>=1.6.0; extra == "full"
|
|
@@ -276,8 +278,8 @@ Dynamic: license-file
|
|
|
276
278
|
**The AI collaboration framework that predicts problems before they happen.**
|
|
277
279
|
|
|
278
280
|
[](https://pypi.org/project/empathy-framework/)
|
|
279
|
-
[](https://github.com/Smart-AI-Memory/empathy-framework/actions)
|
|
282
|
+
[](https://github.com/Smart-AI-Memory/empathy-framework)
|
|
281
283
|
[](LICENSE)
|
|
282
284
|
[](https://www.python.org)
|
|
283
285
|
|
|
@@ -287,6 +289,12 @@ pip install empathy-framework[full]
|
|
|
287
289
|
|
|
288
290
|
## What's New in v3.5.x
|
|
289
291
|
|
|
292
|
+
### Project Indexing & Test Suite Expansion (v3.5.4)
|
|
293
|
+
|
|
294
|
+
- **Project Indexing System** — JSON-based file tracking with automatic structure scanning, metadata tracking, and CrewAI integration
|
|
295
|
+
- **5,603 Tests** — Comprehensive test coverage at 64% with 30+ new test modules
|
|
296
|
+
- **BaselineManager Fix** — Resolved test isolation bug affecting suppression system
|
|
297
|
+
|
|
290
298
|
### Memory API Security Hardening (v3.5.0)
|
|
291
299
|
|
|
292
300
|
- **Input Validation** — Pattern IDs, agent IDs, and classifications validated to prevent path traversal and injection attacks
|
|
@@ -32,29 +32,29 @@ coach_wizards/refactoring_wizard.py,sha256=1AuRyX45KI63n_-fvvbRXamqvPbrB-O1B7TPP
|
|
|
32
32
|
coach_wizards/scaling_wizard.py,sha256=yLULCkflLoBKS4hOSBPQuKKGBGHgKExnuEp5WLTIY-8,2596
|
|
33
33
|
coach_wizards/security_wizard.py,sha256=tr1iq0egAMLCM-wOFhTDN5dHQRFuhSshXSkv17Jm7eM,2603
|
|
34
34
|
coach_wizards/testing_wizard.py,sha256=M2RtaTa1WHsk42svJAEZpLySU3PXJJZn2jigouMJrG0,2561
|
|
35
|
-
empathy_framework-3.5.
|
|
35
|
+
empathy_framework-3.5.6.dist-info/licenses/LICENSE,sha256=IJ9eeI5KSrD5P7alsn7sI_6_1bDihxBA5S4Sen4jf2k,4937
|
|
36
36
|
empathy_healthcare_plugin/__init__.py,sha256=FvVcD7WQTlmCCLgSPfM-FPT2l-ma1oAACBZWhtYFAUA,296
|
|
37
37
|
empathy_healthcare_plugin/protocols/cardiac.json,sha256=uShOvI2RQJYLZacLT2R_aHfsjvJdyCu_gYfpMfK3N74,2088
|
|
38
38
|
empathy_healthcare_plugin/protocols/post_operative.json,sha256=nqh3ydPY8FNSLv-Q3QmH8Dsyc1c4LvQxUSP84B8W6xk,2021
|
|
39
39
|
empathy_healthcare_plugin/protocols/respiratory.json,sha256=wNDprggFDGRxxHNwchC19N8aoyaN74RnhYN7lNookDI,2136
|
|
40
40
|
empathy_healthcare_plugin/protocols/sepsis.json,sha256=yXKt8QmDaAeTgHitqJJ-N9J9pkHRqGxZM_jJl_wDG6A,3631
|
|
41
41
|
empathy_llm_toolkit/README.md,sha256=wKfp80nOvQkyU2qkBMAdF9cPPR3iaHuia_2AfiXVaFM,12273
|
|
42
|
-
empathy_llm_toolkit/__init__.py,sha256=
|
|
42
|
+
empathy_llm_toolkit/__init__.py,sha256=hpDQmnshhXdcTyvdGRDNv6hqLSzj0RBPGixFHvmAjcQ,705
|
|
43
43
|
empathy_llm_toolkit/claude_memory.py,sha256=L4XaIDR_5yugYz4ITJw3ofWBxYQWeI3W3Cfs09TB2_Y,14872
|
|
44
44
|
empathy_llm_toolkit/code_health.py,sha256=hc0dRN00xb4An3KPXAbNp3Tp076a2GnJ1MlPGH7HHM0,42438
|
|
45
45
|
empathy_llm_toolkit/contextual_patterns.py,sha256=pC2LU4z8dNRcCj0TWZB_LSyXeAdt7me5WKmdt2dfXFk,12056
|
|
46
|
-
empathy_llm_toolkit/core.py,sha256=
|
|
46
|
+
empathy_llm_toolkit/core.py,sha256=Ts8OUASBLjNxiRvDbkvCiXo5aN-lmYRXUVQO_OW4b7w,34499
|
|
47
47
|
empathy_llm_toolkit/git_pattern_extractor.py,sha256=L_BFi5ZLOhKbXZqLon4bJpHRrZk4dt-ICQ_R3YQftZg,14756
|
|
48
48
|
empathy_llm_toolkit/levels.py,sha256=8iH_mPRh72yFZ0wJgSB6K20XZTdfnw4gBanX6_4P6n8,7178
|
|
49
49
|
empathy_llm_toolkit/pattern_confidence.py,sha256=M9w37N621c7gA21U0cI0ApaV9TFKoQtP4dhUfjmzf7I,14207
|
|
50
50
|
empathy_llm_toolkit/pattern_resolver.py,sha256=uvrRZfROMQkaghTLHr7b6OtB6MlW-mgAV3_Il0LWBMk,9330
|
|
51
51
|
empathy_llm_toolkit/pattern_summary.py,sha256=q3gPMZtk5TIG9hs61mEZzaBtpry0qVfbu2lXryunhQs,12265
|
|
52
|
-
empathy_llm_toolkit/providers.py,sha256=
|
|
52
|
+
empathy_llm_toolkit/providers.py,sha256=vfN5u_9e4BVUj8hR4HSZTGOLPfiH4k7ffk_Q7Zqf57o,19811
|
|
53
53
|
empathy_llm_toolkit/session_status.py,sha256=pJwqHwbVwR2Q6coRkB_34CWRCMoF-r4-YBtQWEO1Mj8,25724
|
|
54
54
|
empathy_llm_toolkit/state.py,sha256=oi8bPqUHkmfgkfT4_4eD1ndIGH_THyLQDYlIWZLUx5s,8051
|
|
55
55
|
empathy_os/__init__.py,sha256=pvaca4oCfdL4MG5WO-RKJeXBOk0oj02Mhh_E0h7zSyY,5896
|
|
56
|
-
empathy_os/cli.py,sha256=
|
|
57
|
-
empathy_os/cli_unified.py,sha256=
|
|
56
|
+
empathy_os/cli.py,sha256=BKZHQeOLcocO6FIHn5wgnEK84KY4APlGulWwL2tpKOE,93218
|
|
57
|
+
empathy_os/cli_unified.py,sha256=YLNjgZRaeaJn72_4vUpkDK9g70as0upfyDQTmcC7aeY,14029
|
|
58
58
|
empathy_os/config.py,sha256=itgEYHR3QOxtjObHqnYGoQ48863Mf16UoGPyc_l8gNE,14803
|
|
59
59
|
empathy_os/coordination.py,sha256=0jKt2DzzJmFjpXJs4pMXBcUktCFHsa9i3rkXzXxykGk,28656
|
|
60
60
|
empathy_os/core.py,sha256=kL_37DajqIV1_b0ldee8rGG0xUTrSzAqYuQ4dowSxuw,53229
|
|
@@ -70,11 +70,11 @@ empathy_os/monitoring.py,sha256=76Fiwqd8prqi6H_mMX79_yEPbfbPdx58E9ZfLld6fvw,1343
|
|
|
70
70
|
empathy_os/pattern_library.py,sha256=jUeWRnRHbhB05Rm9kL-OFdMajRCOqOzOb9ow_23JdY0,14040
|
|
71
71
|
empathy_os/persistence.py,sha256=2jNqPmW6TrCH2quYph2SVMQnAnhBDDVk9DqNuEhLhGE,17637
|
|
72
72
|
empathy_os/platform_utils.py,sha256=8R35nql5f1cuMwWz9JKM_Nx_Gf9rGhCiAleEmIk8WVY,7343
|
|
73
|
-
empathy_os/redis_config.py,sha256=
|
|
73
|
+
empathy_os/redis_config.py,sha256=sX7EAXxRd8pL3r1E-Oa5yke_j-wYIQ1PI9jzaNZjlrs,9778
|
|
74
74
|
empathy_os/redis_memory.py,sha256=lWS_F4FeDkmEI-jIgkPTzs3D8TTDB0627WsOxYMT-XM,23276
|
|
75
75
|
empathy_os/templates.py,sha256=ap4u9i5O9KA83wWLfoUCS7phDHKb6wj8M1Zcm218lN0,17069
|
|
76
76
|
empathy_os/trust_building.py,sha256=8ZvNwJmeDyKeUIkk_331M9jwKcqrsn6K43gnGtnIXbM,18790
|
|
77
|
-
empathy_os/workflow_commands.py,sha256=
|
|
77
|
+
empathy_os/workflow_commands.py,sha256=Kqyr8ICTsx4S-_ThXB52SYzH1mYFsHGVDYiG2KgkQGo,21904
|
|
78
78
|
empathy_software_plugin/SOFTWARE_PLUGIN_README.md,sha256=RXIOB9Mt-8JrfGAA3ZUuRPT34sThubrwUgg5iNcSKIc,22591
|
|
79
79
|
empathy_software_plugin/__init__.py,sha256=Ylyj95pSsoN9Zasam96DH61uBHoMJh3kbhO7k_VaCWo,310
|
|
80
80
|
empathy_software_plugin/cli.py,sha256=GrZWpnFJ9allM9sYrh8rSxSlVDU6RZVnEy4FYg-dSG8,22366
|
|
@@ -96,8 +96,8 @@ wizards/sbar_wizard.py,sha256=CJ63JAXwcfBf6C3aYyxY2LODbARP9GPl0ZGJWLbx88E,21790
|
|
|
96
96
|
wizards/shift_handoff_wizard.py,sha256=SkoNB0nLQGg92yz4j1j3NBR2mGVe_rw1pTjOFDy-JH0,19092
|
|
97
97
|
wizards/soap_note_wizard.py,sha256=DBzuuuOvIONhwdfn8jaE4PCuGeKsFwM65XTb6gKFIy4,23572
|
|
98
98
|
wizards/treatment_plan.py,sha256=t2Qk5eCa1gobEUaBztnwem_p9OuJK5BKqJ-Po8vXuns,512
|
|
99
|
-
empathy_framework-3.5.
|
|
100
|
-
empathy_framework-3.5.
|
|
101
|
-
empathy_framework-3.5.
|
|
102
|
-
empathy_framework-3.5.
|
|
103
|
-
empathy_framework-3.5.
|
|
99
|
+
empathy_framework-3.5.6.dist-info/METADATA,sha256=Ay9yi3cPZaMwrSBVgh8V8syC3YzRy8mF76AEQcEBrVw,30238
|
|
100
|
+
empathy_framework-3.5.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
101
|
+
empathy_framework-3.5.6.dist-info/entry_points.txt,sha256=zMu7sKCiLndbEEXjTecltS-1P_JZoEUKrifuRBBbroc,1268
|
|
102
|
+
empathy_framework-3.5.6.dist-info/top_level.txt,sha256=8zHB-_f0MI2K55LIEjCeaFNcog3_KgLBa_dDfzE8ESI,110
|
|
103
|
+
empathy_framework-3.5.6.dist-info/RECORD,,
|
empathy_llm_toolkit/__init__.py
CHANGED
|
@@ -12,7 +12,7 @@ Licensed under Fair Source 0.9
|
|
|
12
12
|
|
|
13
13
|
from .core import EmpathyLLM
|
|
14
14
|
from .levels import EmpathyLevel
|
|
15
|
-
from .providers import AnthropicProvider, LocalProvider, OpenAIProvider
|
|
15
|
+
from .providers import AnthropicProvider, GeminiProvider, LocalProvider, OpenAIProvider
|
|
16
16
|
from .state import CollaborationState, UserPattern
|
|
17
17
|
|
|
18
18
|
__version__ = "1.9.5"
|
|
@@ -21,6 +21,7 @@ __all__ = [
|
|
|
21
21
|
"EmpathyLLM",
|
|
22
22
|
"OpenAIProvider",
|
|
23
23
|
"AnthropicProvider",
|
|
24
|
+
"GeminiProvider",
|
|
24
25
|
"LocalProvider",
|
|
25
26
|
"CollaborationState",
|
|
26
27
|
"UserPattern",
|
empathy_llm_toolkit/core.py
CHANGED
|
@@ -23,7 +23,13 @@ from empathy_os.memory import (
|
|
|
23
23
|
)
|
|
24
24
|
|
|
25
25
|
from .levels import EmpathyLevel
|
|
26
|
-
from .providers import
|
|
26
|
+
from .providers import (
|
|
27
|
+
AnthropicProvider,
|
|
28
|
+
BaseLLMProvider,
|
|
29
|
+
GeminiProvider,
|
|
30
|
+
LocalProvider,
|
|
31
|
+
OpenAIProvider,
|
|
32
|
+
)
|
|
27
33
|
from .routing import ModelRouter
|
|
28
34
|
from .state import CollaborationState, PatternType, UserPattern
|
|
29
35
|
|
|
@@ -218,6 +224,8 @@ class EmpathyLLM:
|
|
|
218
224
|
)
|
|
219
225
|
elif provider == "openai":
|
|
220
226
|
return OpenAIProvider(api_key=api_key, model=model or "gpt-4-turbo-preview", **kwargs)
|
|
227
|
+
elif provider in ("google", "gemini"):
|
|
228
|
+
return GeminiProvider(api_key=api_key, model=model or "gemini-1.5-pro", **kwargs)
|
|
221
229
|
elif provider == "local":
|
|
222
230
|
return LocalProvider(
|
|
223
231
|
endpoint=kwargs.get("endpoint", "http://localhost:11434"),
|
empathy_llm_toolkit/providers.py
CHANGED
|
@@ -372,6 +372,183 @@ class OpenAIProvider(BaseLLMProvider):
|
|
|
372
372
|
)
|
|
373
373
|
|
|
374
374
|
|
|
375
|
+
class GeminiProvider(BaseLLMProvider):
|
|
376
|
+
"""
|
|
377
|
+
Google Gemini provider with cost tracking integration.
|
|
378
|
+
|
|
379
|
+
Supports Gemini models:
|
|
380
|
+
- gemini-2.0-flash-exp: Fast, cheap tier (1M context)
|
|
381
|
+
- gemini-1.5-pro: Balanced, capable tier (2M context)
|
|
382
|
+
- gemini-2.5-pro: Premium reasoning tier
|
|
383
|
+
"""
|
|
384
|
+
|
|
385
|
+
def __init__(
|
|
386
|
+
self,
|
|
387
|
+
api_key: str | None = None,
|
|
388
|
+
model: str = "gemini-1.5-pro",
|
|
389
|
+
**kwargs,
|
|
390
|
+
):
|
|
391
|
+
super().__init__(api_key, **kwargs)
|
|
392
|
+
self.model = model
|
|
393
|
+
|
|
394
|
+
# Validate API key is provided
|
|
395
|
+
if not api_key or not api_key.strip():
|
|
396
|
+
raise ValueError(
|
|
397
|
+
"API key is required for Gemini provider. "
|
|
398
|
+
"Provide via api_key parameter or GOOGLE_API_KEY environment variable"
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
# Lazy import to avoid requiring google-generativeai if not used
|
|
402
|
+
try:
|
|
403
|
+
import google.generativeai as genai
|
|
404
|
+
|
|
405
|
+
genai.configure(api_key=api_key)
|
|
406
|
+
self.genai = genai
|
|
407
|
+
self.client = genai.GenerativeModel(model)
|
|
408
|
+
except ImportError as e:
|
|
409
|
+
raise ImportError(
|
|
410
|
+
"google-generativeai package required. Install with: pip install google-generativeai"
|
|
411
|
+
) from e
|
|
412
|
+
|
|
413
|
+
async def generate(
|
|
414
|
+
self,
|
|
415
|
+
messages: list[dict[str, str]],
|
|
416
|
+
system_prompt: str | None = None,
|
|
417
|
+
temperature: float = 0.7,
|
|
418
|
+
max_tokens: int = 1024,
|
|
419
|
+
**kwargs,
|
|
420
|
+
) -> LLMResponse:
|
|
421
|
+
"""
|
|
422
|
+
Generate response using Google Gemini API.
|
|
423
|
+
|
|
424
|
+
Gemini-specific features:
|
|
425
|
+
- Large context windows (1M-2M tokens)
|
|
426
|
+
- Multimodal support
|
|
427
|
+
- Grounding with Google Search
|
|
428
|
+
"""
|
|
429
|
+
import asyncio
|
|
430
|
+
|
|
431
|
+
# Convert messages to Gemini format
|
|
432
|
+
gemini_messages = []
|
|
433
|
+
for msg in messages:
|
|
434
|
+
role = "user" if msg["role"] == "user" else "model"
|
|
435
|
+
gemini_messages.append({"role": role, "parts": [msg["content"]]})
|
|
436
|
+
|
|
437
|
+
# Build generation config
|
|
438
|
+
generation_config = self.genai.GenerationConfig(
|
|
439
|
+
temperature=temperature,
|
|
440
|
+
max_output_tokens=max_tokens,
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
# Create model with system instruction if provided
|
|
444
|
+
if system_prompt:
|
|
445
|
+
model = self.genai.GenerativeModel(
|
|
446
|
+
self.model,
|
|
447
|
+
system_instruction=system_prompt,
|
|
448
|
+
)
|
|
449
|
+
else:
|
|
450
|
+
model = self.client
|
|
451
|
+
|
|
452
|
+
# Call Gemini API (run sync in thread pool for async compatibility)
|
|
453
|
+
loop = asyncio.get_event_loop()
|
|
454
|
+
response = await loop.run_in_executor(
|
|
455
|
+
None,
|
|
456
|
+
lambda: model.generate_content(
|
|
457
|
+
gemini_messages,
|
|
458
|
+
generation_config=generation_config,
|
|
459
|
+
),
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Extract token counts from usage metadata
|
|
463
|
+
input_tokens = 0
|
|
464
|
+
output_tokens = 0
|
|
465
|
+
if hasattr(response, "usage_metadata"):
|
|
466
|
+
input_tokens = getattr(response.usage_metadata, "prompt_token_count", 0)
|
|
467
|
+
output_tokens = getattr(response.usage_metadata, "candidates_token_count", 0)
|
|
468
|
+
|
|
469
|
+
# Log to cost tracker
|
|
470
|
+
try:
|
|
471
|
+
from empathy_os.cost_tracker import log_request
|
|
472
|
+
|
|
473
|
+
tier = self._get_tier()
|
|
474
|
+
log_request(
|
|
475
|
+
model=self.model,
|
|
476
|
+
input_tokens=input_tokens,
|
|
477
|
+
output_tokens=output_tokens,
|
|
478
|
+
task_type=kwargs.get("task_type", "gemini_generate"),
|
|
479
|
+
tier=tier,
|
|
480
|
+
)
|
|
481
|
+
except ImportError:
|
|
482
|
+
pass # Cost tracking not available
|
|
483
|
+
|
|
484
|
+
# Convert to standardized format
|
|
485
|
+
content = ""
|
|
486
|
+
if response.candidates:
|
|
487
|
+
content = response.candidates[0].content.parts[0].text
|
|
488
|
+
|
|
489
|
+
finish_reason = "stop"
|
|
490
|
+
if response.candidates and hasattr(response.candidates[0], "finish_reason"):
|
|
491
|
+
finish_reason = str(response.candidates[0].finish_reason.name).lower()
|
|
492
|
+
|
|
493
|
+
return LLMResponse(
|
|
494
|
+
content=content,
|
|
495
|
+
model=self.model,
|
|
496
|
+
tokens_used=input_tokens + output_tokens,
|
|
497
|
+
finish_reason=finish_reason,
|
|
498
|
+
metadata={
|
|
499
|
+
"input_tokens": input_tokens,
|
|
500
|
+
"output_tokens": output_tokens,
|
|
501
|
+
"provider": "google",
|
|
502
|
+
"model_family": "gemini",
|
|
503
|
+
},
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
def _get_tier(self) -> str:
|
|
507
|
+
"""Determine tier from model name."""
|
|
508
|
+
if "flash" in self.model.lower():
|
|
509
|
+
return "cheap"
|
|
510
|
+
elif "2.5" in self.model or "ultra" in self.model.lower():
|
|
511
|
+
return "premium"
|
|
512
|
+
else:
|
|
513
|
+
return "capable"
|
|
514
|
+
|
|
515
|
+
def get_model_info(self) -> dict[str, Any]:
|
|
516
|
+
"""Get Gemini model information"""
|
|
517
|
+
model_info = {
|
|
518
|
+
"gemini-2.0-flash-exp": {
|
|
519
|
+
"max_tokens": 1000000,
|
|
520
|
+
"cost_per_1m_input": 0.075,
|
|
521
|
+
"cost_per_1m_output": 0.30,
|
|
522
|
+
"supports_vision": True,
|
|
523
|
+
"ideal_for": "Fast responses, simple tasks, large context",
|
|
524
|
+
},
|
|
525
|
+
"gemini-1.5-pro": {
|
|
526
|
+
"max_tokens": 2000000,
|
|
527
|
+
"cost_per_1m_input": 1.25,
|
|
528
|
+
"cost_per_1m_output": 5.00,
|
|
529
|
+
"supports_vision": True,
|
|
530
|
+
"ideal_for": "Complex reasoning, large codebases",
|
|
531
|
+
},
|
|
532
|
+
"gemini-2.5-pro": {
|
|
533
|
+
"max_tokens": 1000000,
|
|
534
|
+
"cost_per_1m_input": 2.50,
|
|
535
|
+
"cost_per_1m_output": 10.00,
|
|
536
|
+
"supports_vision": True,
|
|
537
|
+
"ideal_for": "Advanced reasoning, complex tasks",
|
|
538
|
+
},
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
return model_info.get(
|
|
542
|
+
self.model,
|
|
543
|
+
{
|
|
544
|
+
"max_tokens": 1000000,
|
|
545
|
+
"cost_per_1m_input": 1.25,
|
|
546
|
+
"cost_per_1m_output": 5.00,
|
|
547
|
+
"supports_vision": True,
|
|
548
|
+
},
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
|
|
375
552
|
class LocalProvider(BaseLLMProvider):
|
|
376
553
|
"""
|
|
377
554
|
Local model provider (Ollama, LM Studio, etc.).
|
empathy_os/cli.py
CHANGED
|
@@ -28,7 +28,6 @@ from empathy_os.persistence import MetricsCollector, PatternPersistence, StateMa
|
|
|
28
28
|
from empathy_os.platform_utils import setup_asyncio_policy
|
|
29
29
|
from empathy_os.templates import cmd_new
|
|
30
30
|
from empathy_os.workflows import (
|
|
31
|
-
WorkflowConfig,
|
|
32
31
|
cmd_fix_all,
|
|
33
32
|
cmd_learn,
|
|
34
33
|
cmd_morning,
|
|
@@ -1414,13 +1413,29 @@ def cmd_wizard(args):
|
|
|
1414
1413
|
print("\n3. Which LLM provider will you use?")
|
|
1415
1414
|
print(" [1] Anthropic Claude ⭐ Recommended")
|
|
1416
1415
|
print(" [2] OpenAI GPT-4")
|
|
1417
|
-
print(" [3]
|
|
1418
|
-
print(" [4]
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1416
|
+
print(" [3] Google Gemini (2M context)")
|
|
1417
|
+
print(" [4] Local (Ollama)")
|
|
1418
|
+
print(" [5] Hybrid (mix best models from each provider)")
|
|
1419
|
+
print(" [6] Skip (configure later)")
|
|
1420
|
+
|
|
1421
|
+
llm_choice = input("\nYour choice (1-6) [1]: ").strip() or "1"
|
|
1422
|
+
llm_map = {
|
|
1423
|
+
"1": "anthropic",
|
|
1424
|
+
"2": "openai",
|
|
1425
|
+
"3": "google",
|
|
1426
|
+
"4": "ollama",
|
|
1427
|
+
"5": "hybrid",
|
|
1428
|
+
"6": None,
|
|
1429
|
+
}
|
|
1422
1430
|
llm_provider = llm_map.get(llm_choice, "anthropic")
|
|
1423
1431
|
|
|
1432
|
+
# If hybrid selected, launch interactive tier selection
|
|
1433
|
+
if llm_provider == "hybrid":
|
|
1434
|
+
from empathy_os.models.provider_config import configure_hybrid_interactive
|
|
1435
|
+
|
|
1436
|
+
configure_hybrid_interactive()
|
|
1437
|
+
llm_provider = None # Already saved by hybrid config
|
|
1438
|
+
|
|
1424
1439
|
# Step 4: User ID
|
|
1425
1440
|
print("\n4. What user ID should we use?")
|
|
1426
1441
|
user_id = input("User ID [default_user]: ").strip() or "default_user"
|
|
@@ -1481,14 +1496,94 @@ llm_provider: "{llm_provider}"
|
|
|
1481
1496
|
print("\nNext steps:")
|
|
1482
1497
|
print(f" 1. Edit {output_file} to customize settings")
|
|
1483
1498
|
|
|
1484
|
-
if llm_provider in ["anthropic", "openai"]:
|
|
1485
|
-
|
|
1499
|
+
if llm_provider in ["anthropic", "openai", "google"]:
|
|
1500
|
+
env_var_map = {
|
|
1501
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
|
1502
|
+
"openai": "OPENAI_API_KEY",
|
|
1503
|
+
"google": "GOOGLE_API_KEY",
|
|
1504
|
+
}
|
|
1505
|
+
env_var = env_var_map.get(llm_provider, "API_KEY")
|
|
1486
1506
|
print(f" 2. Set {env_var} environment variable")
|
|
1487
1507
|
|
|
1488
1508
|
print(" 3. Run: empathy-framework run --config empathy.config.yml")
|
|
1489
1509
|
print("\nHappy empathizing! 🧠✨\n")
|
|
1490
1510
|
|
|
1491
1511
|
|
|
1512
|
+
def cmd_provider_hybrid(args):
|
|
1513
|
+
"""Configure hybrid mode - pick best models for each tier."""
|
|
1514
|
+
from empathy_os.models.provider_config import configure_hybrid_interactive
|
|
1515
|
+
|
|
1516
|
+
configure_hybrid_interactive()
|
|
1517
|
+
|
|
1518
|
+
|
|
1519
|
+
def cmd_provider_show(args):
|
|
1520
|
+
"""Show current provider configuration."""
|
|
1521
|
+
from empathy_os.models.provider_config import ProviderConfig
|
|
1522
|
+
from empathy_os.workflows.config import WorkflowConfig
|
|
1523
|
+
|
|
1524
|
+
print("\n" + "=" * 60)
|
|
1525
|
+
print("Provider Configuration")
|
|
1526
|
+
print("=" * 60)
|
|
1527
|
+
|
|
1528
|
+
# Detect available providers
|
|
1529
|
+
config = ProviderConfig.auto_detect()
|
|
1530
|
+
print(
|
|
1531
|
+
f"\nDetected API keys for: {', '.join(config.available_providers) if config.available_providers else 'None'}"
|
|
1532
|
+
)
|
|
1533
|
+
|
|
1534
|
+
# Load workflow config
|
|
1535
|
+
wf_config = WorkflowConfig.load()
|
|
1536
|
+
print(f"\nDefault provider: {wf_config.default_provider}")
|
|
1537
|
+
|
|
1538
|
+
# Show effective models
|
|
1539
|
+
print("\nEffective model mapping:")
|
|
1540
|
+
if wf_config.custom_models and "hybrid" in wf_config.custom_models:
|
|
1541
|
+
hybrid = wf_config.custom_models["hybrid"]
|
|
1542
|
+
for tier in ["cheap", "capable", "premium"]:
|
|
1543
|
+
model = hybrid.get(tier, "not configured")
|
|
1544
|
+
print(f" {tier:8} → {model}")
|
|
1545
|
+
else:
|
|
1546
|
+
from empathy_os.models import MODEL_REGISTRY
|
|
1547
|
+
|
|
1548
|
+
provider = wf_config.default_provider
|
|
1549
|
+
if provider in MODEL_REGISTRY:
|
|
1550
|
+
for tier in ["cheap", "capable", "premium"]:
|
|
1551
|
+
model_info = MODEL_REGISTRY[provider].get(tier)
|
|
1552
|
+
if model_info:
|
|
1553
|
+
print(f" {tier:8} → {model_info.id} ({provider})")
|
|
1554
|
+
|
|
1555
|
+
print()
|
|
1556
|
+
|
|
1557
|
+
|
|
1558
|
+
def cmd_provider_set(args):
|
|
1559
|
+
"""Set default provider."""
|
|
1560
|
+
from pathlib import Path
|
|
1561
|
+
|
|
1562
|
+
import yaml
|
|
1563
|
+
|
|
1564
|
+
provider = args.name
|
|
1565
|
+
workflows_path = Path(".empathy/workflows.yaml")
|
|
1566
|
+
|
|
1567
|
+
# Load existing config or create new
|
|
1568
|
+
if workflows_path.exists():
|
|
1569
|
+
with open(workflows_path) as f:
|
|
1570
|
+
config = yaml.safe_load(f) or {}
|
|
1571
|
+
else:
|
|
1572
|
+
config = {}
|
|
1573
|
+
workflows_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1574
|
+
|
|
1575
|
+
config["default_provider"] = provider
|
|
1576
|
+
|
|
1577
|
+
with open(workflows_path, "w") as f:
|
|
1578
|
+
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
|
|
1579
|
+
|
|
1580
|
+
print(f"✓ Default provider set to: {provider}")
|
|
1581
|
+
print(f" Saved to: {workflows_path}")
|
|
1582
|
+
|
|
1583
|
+
if provider == "hybrid":
|
|
1584
|
+
print("\n Tip: Run 'empathy provider hybrid' to customize tier models")
|
|
1585
|
+
|
|
1586
|
+
|
|
1492
1587
|
def cmd_sync_claude(args):
|
|
1493
1588
|
"""Sync patterns to Claude Code rules directory."""
|
|
1494
1589
|
import json as json_mod
|
|
@@ -1764,8 +1859,14 @@ def cmd_workflow(args):
|
|
|
1764
1859
|
try:
|
|
1765
1860
|
workflow_cls = get_workflow(name)
|
|
1766
1861
|
|
|
1767
|
-
# Get provider
|
|
1768
|
-
|
|
1862
|
+
# Get provider from CLI arg, or fall back to config's default_provider
|
|
1863
|
+
if args.provider:
|
|
1864
|
+
provider = args.provider
|
|
1865
|
+
else:
|
|
1866
|
+
from empathy_os.workflows.config import WorkflowConfig
|
|
1867
|
+
|
|
1868
|
+
wf_config = WorkflowConfig.load()
|
|
1869
|
+
provider = wf_config.default_provider
|
|
1769
1870
|
workflow = workflow_cls(provider=provider)
|
|
1770
1871
|
|
|
1771
1872
|
# Parse input
|
|
@@ -1773,8 +1874,17 @@ def cmd_workflow(args):
|
|
|
1773
1874
|
if args.input:
|
|
1774
1875
|
input_data = json_mod.loads(args.input)
|
|
1775
1876
|
|
|
1776
|
-
|
|
1777
|
-
|
|
1877
|
+
# Add test-gen specific flags to input_data (only for test-gen workflow)
|
|
1878
|
+
if name == "test-gen":
|
|
1879
|
+
if getattr(args, "write_tests", False):
|
|
1880
|
+
input_data["write_tests"] = True
|
|
1881
|
+
if getattr(args, "output_dir", None):
|
|
1882
|
+
input_data["output_dir"] = args.output_dir
|
|
1883
|
+
|
|
1884
|
+
# Only print header when not in JSON mode
|
|
1885
|
+
if not args.json:
|
|
1886
|
+
print(f"\n Running workflow: {name} (provider: {provider})")
|
|
1887
|
+
print("=" * 50)
|
|
1778
1888
|
|
|
1779
1889
|
# Execute workflow
|
|
1780
1890
|
result = asyncio.run(workflow.execute(**input_data))
|
|
@@ -1819,9 +1929,29 @@ def cmd_workflow(args):
|
|
|
1819
1929
|
error = metadata.get("error") if isinstance(metadata, dict) else None
|
|
1820
1930
|
|
|
1821
1931
|
# JSON output includes both content and metadata
|
|
1932
|
+
# Include final_output for programmatic access (VSCode panels, etc.)
|
|
1933
|
+
raw_final_output = getattr(result, "final_output", None)
|
|
1934
|
+
if raw_final_output and isinstance(raw_final_output, dict):
|
|
1935
|
+
# Make a copy to avoid modifying the original
|
|
1936
|
+
final_output_serializable = {}
|
|
1937
|
+
for k, v in raw_final_output.items():
|
|
1938
|
+
# Skip non-serializable items
|
|
1939
|
+
if isinstance(v, set):
|
|
1940
|
+
final_output_serializable[k] = list(v)
|
|
1941
|
+
elif v is None or isinstance(v, str | int | float | bool | list | dict):
|
|
1942
|
+
final_output_serializable[k] = v
|
|
1943
|
+
else:
|
|
1944
|
+
try:
|
|
1945
|
+
final_output_serializable[k] = str(v)
|
|
1946
|
+
except Exception:
|
|
1947
|
+
pass
|
|
1948
|
+
else:
|
|
1949
|
+
final_output_serializable = None
|
|
1950
|
+
|
|
1822
1951
|
output = {
|
|
1823
1952
|
"success": result.success,
|
|
1824
1953
|
"output": output_content,
|
|
1954
|
+
"final_output": final_output_serializable,
|
|
1825
1955
|
"cost": total_cost,
|
|
1826
1956
|
"savings": savings,
|
|
1827
1957
|
"duration_ms": duration_ms or 0,
|
|
@@ -2135,6 +2265,33 @@ def main():
|
|
|
2135
2265
|
)
|
|
2136
2266
|
parser_wizard.set_defaults(func=cmd_wizard)
|
|
2137
2267
|
|
|
2268
|
+
# Provider command (Model provider configuration)
|
|
2269
|
+
parser_provider = subparsers.add_parser(
|
|
2270
|
+
"provider", help="Configure model providers and hybrid mode"
|
|
2271
|
+
)
|
|
2272
|
+
provider_subparsers = parser_provider.add_subparsers(dest="provider_cmd")
|
|
2273
|
+
|
|
2274
|
+
# provider hybrid - Interactive hybrid configuration
|
|
2275
|
+
parser_provider_hybrid = provider_subparsers.add_parser(
|
|
2276
|
+
"hybrid", help="Configure hybrid mode - pick best models for each tier"
|
|
2277
|
+
)
|
|
2278
|
+
parser_provider_hybrid.set_defaults(func=cmd_provider_hybrid)
|
|
2279
|
+
|
|
2280
|
+
# provider show - Show current configuration
|
|
2281
|
+
parser_provider_show = provider_subparsers.add_parser(
|
|
2282
|
+
"show", help="Show current provider configuration"
|
|
2283
|
+
)
|
|
2284
|
+
parser_provider_show.set_defaults(func=cmd_provider_show)
|
|
2285
|
+
|
|
2286
|
+
# provider set - Quick set single provider
|
|
2287
|
+
parser_provider_set = provider_subparsers.add_parser(
|
|
2288
|
+
"set", help="Set default provider (anthropic, openai, google, ollama)"
|
|
2289
|
+
)
|
|
2290
|
+
parser_provider_set.add_argument(
|
|
2291
|
+
"name", choices=["anthropic", "openai", "google", "ollama", "hybrid"], help="Provider name"
|
|
2292
|
+
)
|
|
2293
|
+
parser_provider_set.set_defaults(func=cmd_provider_set)
|
|
2294
|
+
|
|
2138
2295
|
# Status command (Session status assistant)
|
|
2139
2296
|
parser_status = subparsers.add_parser(
|
|
2140
2297
|
"status", help="Session status - prioritized project status report"
|
|
@@ -2338,9 +2495,9 @@ def main():
|
|
|
2338
2495
|
parser_workflow.add_argument(
|
|
2339
2496
|
"--provider",
|
|
2340
2497
|
"-p",
|
|
2341
|
-
choices=["anthropic", "openai", "ollama", "hybrid"],
|
|
2498
|
+
choices=["anthropic", "openai", "google", "ollama", "hybrid"],
|
|
2342
2499
|
default=None, # None means use config
|
|
2343
|
-
help="Model provider: anthropic, openai, ollama, or hybrid (mix of best models)",
|
|
2500
|
+
help="Model provider: anthropic, openai, google, ollama, or hybrid (mix of best models)",
|
|
2344
2501
|
)
|
|
2345
2502
|
parser_workflow.add_argument(
|
|
2346
2503
|
"--force",
|
|
@@ -2348,6 +2505,16 @@ def main():
|
|
|
2348
2505
|
help="Force overwrite existing config file",
|
|
2349
2506
|
)
|
|
2350
2507
|
parser_workflow.add_argument("--json", action="store_true", help="Output as JSON")
|
|
2508
|
+
parser_workflow.add_argument(
|
|
2509
|
+
"--write-tests",
|
|
2510
|
+
action="store_true",
|
|
2511
|
+
help="(test-gen workflow) Write generated tests to disk",
|
|
2512
|
+
)
|
|
2513
|
+
parser_workflow.add_argument(
|
|
2514
|
+
"--output-dir",
|
|
2515
|
+
default="tests/generated",
|
|
2516
|
+
help="(test-gen workflow) Output directory for generated tests",
|
|
2517
|
+
)
|
|
2351
2518
|
parser_workflow.set_defaults(func=cmd_workflow)
|
|
2352
2519
|
|
|
2353
2520
|
# Sync-claude command (sync patterns to Claude Code)
|
empathy_os/cli_unified.py
CHANGED
|
@@ -136,7 +136,7 @@ app.add_typer(provider_app, name="provider")
|
|
|
136
136
|
def provider_show(
|
|
137
137
|
ctx: typer.Context,
|
|
138
138
|
set_provider: str | None = typer.Option(
|
|
139
|
-
None, "--set", "-s", help="Set provider (anthropic, openai, ollama, hybrid)"
|
|
139
|
+
None, "--set", "-s", help="Set provider (anthropic, openai, google, ollama, hybrid)"
|
|
140
140
|
),
|
|
141
141
|
interactive: bool = typer.Option(False, "--interactive", "-i", help="Interactive setup wizard"),
|
|
142
142
|
format_out: str = typer.Option("table", "--format", "-f", help="Output format (table, json)"),
|
empathy_os/redis_config.py
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
Redis Configuration for Empathy Framework
|
|
3
3
|
|
|
4
4
|
Handles connection to Redis from environment variables.
|
|
5
|
-
Supports Railway, redis.com, local Docker, or mock mode.
|
|
5
|
+
Supports Railway, redis.com, local Docker, managed Redis, or mock mode.
|
|
6
6
|
|
|
7
7
|
Environment Variables:
|
|
8
8
|
REDIS_URL: Full Redis URL (redis://user:pass@host:port)
|
|
@@ -12,8 +12,29 @@ Environment Variables:
|
|
|
12
12
|
REDIS_DB: Redis database number (default: 0)
|
|
13
13
|
EMPATHY_REDIS_MOCK: Set to "true" to use mock mode
|
|
14
14
|
|
|
15
|
+
# SSL/TLS (for managed Redis services)
|
|
16
|
+
REDIS_SSL: Set to "true" to enable SSL
|
|
17
|
+
REDIS_SSL_CERT_REQS: Certificate requirement ("required", "optional", "none")
|
|
18
|
+
REDIS_SSL_CA_CERTS: Path to CA certificate file
|
|
19
|
+
REDIS_SSL_CERTFILE: Path to client certificate
|
|
20
|
+
REDIS_SSL_KEYFILE: Path to client key
|
|
21
|
+
|
|
22
|
+
# Connection settings
|
|
23
|
+
REDIS_SOCKET_TIMEOUT: Socket timeout in seconds (default: 5.0)
|
|
24
|
+
REDIS_MAX_CONNECTIONS: Connection pool size (default: 10)
|
|
25
|
+
|
|
26
|
+
# Retry settings
|
|
27
|
+
REDIS_RETRY_MAX_ATTEMPTS: Max retry attempts (default: 3)
|
|
28
|
+
REDIS_RETRY_BASE_DELAY: Base retry delay in seconds (default: 0.1)
|
|
29
|
+
REDIS_RETRY_MAX_DELAY: Max retry delay in seconds (default: 2.0)
|
|
30
|
+
|
|
31
|
+
# Sentinel (for high availability)
|
|
32
|
+
REDIS_SENTINEL_HOSTS: Comma-separated host:port pairs
|
|
33
|
+
REDIS_SENTINEL_MASTER: Sentinel master name
|
|
34
|
+
|
|
15
35
|
Railway Auto-Detection:
|
|
16
36
|
When deployed on Railway, REDIS_URL is automatically set.
|
|
37
|
+
For Railway Redis with SSL, the URL starts with "rediss://"
|
|
17
38
|
|
|
18
39
|
Usage:
|
|
19
40
|
from empathy_os.redis_config import get_redis_memory
|
|
@@ -21,8 +42,13 @@ Usage:
|
|
|
21
42
|
# Automatically uses environment variables
|
|
22
43
|
memory = get_redis_memory()
|
|
23
44
|
|
|
24
|
-
# Or with explicit URL
|
|
25
|
-
memory = get_redis_memory(url="
|
|
45
|
+
# Or with explicit URL (SSL auto-detected from rediss://)
|
|
46
|
+
memory = get_redis_memory(url="rediss://user:pass@managed-redis.com:6379")
|
|
47
|
+
|
|
48
|
+
# Or with explicit config
|
|
49
|
+
from empathy_os.memory.short_term import RedisConfig
|
|
50
|
+
config = RedisConfig(host="localhost", ssl=True)
|
|
51
|
+
memory = get_redis_memory(config=config)
|
|
26
52
|
|
|
27
53
|
Copyright 2025 Smart AI Memory, LLC
|
|
28
54
|
Licensed under Fair Source 0.9
|
|
@@ -31,65 +57,119 @@ Licensed under Fair Source 0.9
|
|
|
31
57
|
import os
|
|
32
58
|
from urllib.parse import urlparse
|
|
33
59
|
|
|
34
|
-
from .
|
|
60
|
+
from .memory.short_term import RedisConfig, RedisShortTermMemory
|
|
35
61
|
|
|
36
62
|
|
|
37
63
|
def parse_redis_url(url: str) -> dict:
|
|
38
64
|
"""
|
|
39
65
|
Parse Redis URL into connection parameters.
|
|
40
66
|
|
|
67
|
+
Supports:
|
|
68
|
+
- redis://user:pass@host:port/db (standard)
|
|
69
|
+
- rediss://user:pass@host:port/db (SSL enabled)
|
|
70
|
+
|
|
41
71
|
Args:
|
|
42
|
-
url: Redis URL (redis://
|
|
72
|
+
url: Redis URL (redis:// or rediss://)
|
|
43
73
|
|
|
44
74
|
Returns:
|
|
45
|
-
Dict with host, port, password, db
|
|
75
|
+
Dict with host, port, password, db, ssl
|
|
46
76
|
"""
|
|
47
77
|
parsed = urlparse(url)
|
|
48
78
|
|
|
79
|
+
# Detect SSL from scheme
|
|
80
|
+
ssl = parsed.scheme == "rediss"
|
|
81
|
+
|
|
49
82
|
return {
|
|
50
83
|
"host": parsed.hostname or "localhost",
|
|
51
84
|
"port": parsed.port or 6379,
|
|
52
85
|
"password": parsed.password,
|
|
53
86
|
"db": int(parsed.path.lstrip("/") or 0) if parsed.path else 0,
|
|
87
|
+
"ssl": ssl,
|
|
54
88
|
}
|
|
55
89
|
|
|
56
90
|
|
|
57
|
-
def get_redis_config() ->
|
|
91
|
+
def get_redis_config() -> RedisConfig:
|
|
58
92
|
"""
|
|
59
93
|
Get Redis configuration from environment variables.
|
|
60
94
|
|
|
61
95
|
Priority:
|
|
62
|
-
1. REDIS_URL (full URL, used by Railway)
|
|
96
|
+
1. REDIS_URL (full URL, used by Railway/Heroku/managed services)
|
|
63
97
|
2. Individual env vars (REDIS_HOST, REDIS_PORT, etc.)
|
|
64
98
|
3. Defaults (localhost:6379)
|
|
65
99
|
|
|
66
100
|
Returns:
|
|
67
|
-
|
|
101
|
+
RedisConfig with all connection parameters
|
|
68
102
|
"""
|
|
69
103
|
# Check for mock mode
|
|
70
104
|
if os.getenv("EMPATHY_REDIS_MOCK", "").lower() == "true":
|
|
71
|
-
return
|
|
105
|
+
return RedisConfig(use_mock=True)
|
|
72
106
|
|
|
73
|
-
# Check for full URL (Railway, Heroku,
|
|
107
|
+
# Check for full URL (Railway, Heroku, managed services)
|
|
74
108
|
redis_url = os.getenv("REDIS_URL") or os.getenv("REDIS_PRIVATE_URL")
|
|
75
109
|
if redis_url:
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
110
|
+
url_config = parse_redis_url(redis_url)
|
|
111
|
+
return RedisConfig(
|
|
112
|
+
host=url_config["host"],
|
|
113
|
+
port=url_config["port"],
|
|
114
|
+
password=url_config["password"],
|
|
115
|
+
db=url_config["db"],
|
|
116
|
+
ssl=url_config.get("ssl", False),
|
|
117
|
+
use_mock=False,
|
|
118
|
+
# Apply additional env var overrides
|
|
119
|
+
socket_timeout=float(os.getenv("REDIS_SOCKET_TIMEOUT", "5.0")),
|
|
120
|
+
max_connections=int(os.getenv("REDIS_MAX_CONNECTIONS", "10")),
|
|
121
|
+
retry_max_attempts=int(os.getenv("REDIS_RETRY_MAX_ATTEMPTS", "3")),
|
|
122
|
+
retry_base_delay=float(os.getenv("REDIS_RETRY_BASE_DELAY", "0.1")),
|
|
123
|
+
retry_max_delay=float(os.getenv("REDIS_RETRY_MAX_DELAY", "2.0")),
|
|
124
|
+
)
|
|
79
125
|
|
|
80
|
-
#
|
|
126
|
+
# Build config from individual env vars
|
|
127
|
+
return RedisConfig(
|
|
128
|
+
host=os.getenv("REDIS_HOST", "localhost"),
|
|
129
|
+
port=int(os.getenv("REDIS_PORT", "6379")),
|
|
130
|
+
password=os.getenv("REDIS_PASSWORD"),
|
|
131
|
+
db=int(os.getenv("REDIS_DB", "0")),
|
|
132
|
+
use_mock=False,
|
|
133
|
+
# SSL settings
|
|
134
|
+
ssl=os.getenv("REDIS_SSL", "").lower() == "true",
|
|
135
|
+
ssl_cert_reqs=os.getenv("REDIS_SSL_CERT_REQS"),
|
|
136
|
+
ssl_ca_certs=os.getenv("REDIS_SSL_CA_CERTS"),
|
|
137
|
+
ssl_certfile=os.getenv("REDIS_SSL_CERTFILE"),
|
|
138
|
+
ssl_keyfile=os.getenv("REDIS_SSL_KEYFILE"),
|
|
139
|
+
# Connection settings
|
|
140
|
+
socket_timeout=float(os.getenv("REDIS_SOCKET_TIMEOUT", "5.0")),
|
|
141
|
+
socket_connect_timeout=float(os.getenv("REDIS_SOCKET_CONNECT_TIMEOUT", "5.0")),
|
|
142
|
+
max_connections=int(os.getenv("REDIS_MAX_CONNECTIONS", "10")),
|
|
143
|
+
# Retry settings
|
|
144
|
+
retry_on_timeout=os.getenv("REDIS_RETRY_ON_TIMEOUT", "true").lower() == "true",
|
|
145
|
+
retry_max_attempts=int(os.getenv("REDIS_RETRY_MAX_ATTEMPTS", "3")),
|
|
146
|
+
retry_base_delay=float(os.getenv("REDIS_RETRY_BASE_DELAY", "0.1")),
|
|
147
|
+
retry_max_delay=float(os.getenv("REDIS_RETRY_MAX_DELAY", "2.0")),
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def get_redis_config_dict() -> dict:
|
|
152
|
+
"""
|
|
153
|
+
Get Redis configuration as a dictionary (legacy compatibility).
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Dict with connection parameters
|
|
157
|
+
"""
|
|
158
|
+
config = get_redis_config()
|
|
81
159
|
return {
|
|
82
|
-
"host":
|
|
83
|
-
"port":
|
|
84
|
-
"password":
|
|
85
|
-
"db":
|
|
86
|
-
"use_mock":
|
|
160
|
+
"host": config.host,
|
|
161
|
+
"port": config.port,
|
|
162
|
+
"password": config.password,
|
|
163
|
+
"db": config.db,
|
|
164
|
+
"use_mock": config.use_mock,
|
|
165
|
+
"ssl": config.ssl,
|
|
87
166
|
}
|
|
88
167
|
|
|
89
168
|
|
|
90
169
|
def get_redis_memory(
|
|
91
170
|
url: str | None = None,
|
|
92
171
|
use_mock: bool | None = None,
|
|
172
|
+
config: RedisConfig | None = None,
|
|
93
173
|
) -> RedisShortTermMemory:
|
|
94
174
|
"""
|
|
95
175
|
Create a RedisShortTermMemory instance with environment-based config.
|
|
@@ -97,6 +177,7 @@ def get_redis_memory(
|
|
|
97
177
|
Args:
|
|
98
178
|
url: Optional explicit Redis URL (overrides env vars)
|
|
99
179
|
use_mock: Optional explicit mock mode (overrides env vars)
|
|
180
|
+
config: Optional explicit RedisConfig (overrides all other options)
|
|
100
181
|
|
|
101
182
|
Returns:
|
|
102
183
|
Configured RedisShortTermMemory instance
|
|
@@ -105,40 +186,46 @@ def get_redis_memory(
|
|
|
105
186
|
# Auto-configure from environment
|
|
106
187
|
memory = get_redis_memory()
|
|
107
188
|
|
|
108
|
-
# Explicit URL
|
|
109
|
-
memory = get_redis_memory(url="
|
|
189
|
+
# Explicit URL (SSL auto-detected from rediss://)
|
|
190
|
+
memory = get_redis_memory(url="rediss://user:pass@managed-redis.com:6379")
|
|
110
191
|
|
|
111
192
|
# Force mock mode
|
|
112
193
|
memory = get_redis_memory(use_mock=True)
|
|
194
|
+
|
|
195
|
+
# Explicit config with all options
|
|
196
|
+
from empathy_os.memory.short_term import RedisConfig
|
|
197
|
+
config = RedisConfig(
|
|
198
|
+
host="redis.example.com",
|
|
199
|
+
port=6379,
|
|
200
|
+
ssl=True,
|
|
201
|
+
retry_max_attempts=5,
|
|
202
|
+
)
|
|
203
|
+
memory = get_redis_memory(config=config)
|
|
113
204
|
"""
|
|
205
|
+
# Explicit config takes highest priority
|
|
206
|
+
if config is not None:
|
|
207
|
+
return RedisShortTermMemory(config=config)
|
|
208
|
+
|
|
114
209
|
# Explicit mock mode
|
|
115
210
|
if use_mock is True:
|
|
116
211
|
return RedisShortTermMemory(use_mock=True)
|
|
117
212
|
|
|
118
213
|
# Explicit URL
|
|
119
214
|
if url:
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
host=
|
|
123
|
-
port=
|
|
124
|
-
password=
|
|
125
|
-
db=
|
|
215
|
+
url_config = parse_redis_url(url)
|
|
216
|
+
redis_config = RedisConfig(
|
|
217
|
+
host=url_config["host"],
|
|
218
|
+
port=url_config["port"],
|
|
219
|
+
password=url_config["password"],
|
|
220
|
+
db=url_config["db"],
|
|
221
|
+
ssl=url_config.get("ssl", False),
|
|
126
222
|
use_mock=False,
|
|
127
223
|
)
|
|
224
|
+
return RedisShortTermMemory(config=redis_config)
|
|
128
225
|
|
|
129
226
|
# Environment-based config
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
if config.get("use_mock"):
|
|
133
|
-
return RedisShortTermMemory(use_mock=True)
|
|
134
|
-
|
|
135
|
-
return RedisShortTermMemory(
|
|
136
|
-
host=config["host"],
|
|
137
|
-
port=config["port"],
|
|
138
|
-
password=config.get("password"),
|
|
139
|
-
db=config.get("db", 0),
|
|
140
|
-
use_mock=False,
|
|
141
|
-
)
|
|
227
|
+
env_config = get_redis_config()
|
|
228
|
+
return RedisShortTermMemory(config=env_config)
|
|
142
229
|
|
|
143
230
|
|
|
144
231
|
def check_redis_connection() -> dict:
|
|
@@ -157,11 +244,11 @@ def check_redis_connection() -> dict:
|
|
|
157
244
|
|
|
158
245
|
result = {
|
|
159
246
|
"config_source": "environment",
|
|
160
|
-
"use_mock": config.
|
|
161
|
-
"host": config.
|
|
162
|
-
"port": config.
|
|
163
|
-
"has_password": bool(config.
|
|
164
|
-
"db": config.
|
|
247
|
+
"use_mock": config.use_mock,
|
|
248
|
+
"host": config.host,
|
|
249
|
+
"port": config.port,
|
|
250
|
+
"has_password": bool(config.password),
|
|
251
|
+
"db": config.db,
|
|
165
252
|
"connected": False,
|
|
166
253
|
"error": None,
|
|
167
254
|
}
|
empathy_os/workflow_commands.py
CHANGED
|
@@ -347,8 +347,8 @@ def ship_workflow(
|
|
|
347
347
|
# Import here to avoid circular imports
|
|
348
348
|
try:
|
|
349
349
|
from empathy_llm_toolkit.cli.sync_claude import (
|
|
350
|
-
sync_patterns_to_claude,
|
|
351
|
-
)
|
|
350
|
+
sync_patterns_to_claude,
|
|
351
|
+
) # type: ignore[attr-defined]
|
|
352
352
|
|
|
353
353
|
result = sync_patterns_to_claude(
|
|
354
354
|
patterns_dir=patterns_dir, output_dir=".claude/rules/empathy"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|