empathy-framework 4.7.0__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. empathy_framework-4.8.0.dist-info/METADATA +753 -0
  2. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +83 -37
  3. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. empathy_os/__init__.py +2 -0
  6. empathy_os/cache/hash_only.py +6 -3
  7. empathy_os/cache/hybrid.py +6 -3
  8. empathy_os/cli/__init__.py +128 -238
  9. empathy_os/cli/__main__.py +5 -33
  10. empathy_os/cli/commands/__init__.py +1 -8
  11. empathy_os/cli/commands/help.py +331 -0
  12. empathy_os/cli/commands/info.py +140 -0
  13. empathy_os/cli/commands/inspect.py +437 -0
  14. empathy_os/cli/commands/metrics.py +92 -0
  15. empathy_os/cli/commands/orchestrate.py +184 -0
  16. empathy_os/cli/commands/patterns.py +207 -0
  17. empathy_os/cli/commands/provider.py +93 -81
  18. empathy_os/cli/commands/setup.py +96 -0
  19. empathy_os/cli/commands/status.py +235 -0
  20. empathy_os/cli/commands/sync.py +166 -0
  21. empathy_os/cli/commands/tier.py +121 -0
  22. empathy_os/cli/commands/workflow.py +574 -0
  23. empathy_os/cli/parsers/__init__.py +62 -0
  24. empathy_os/cli/parsers/help.py +41 -0
  25. empathy_os/cli/parsers/info.py +26 -0
  26. empathy_os/cli/parsers/inspect.py +66 -0
  27. empathy_os/cli/parsers/metrics.py +42 -0
  28. empathy_os/cli/parsers/orchestrate.py +61 -0
  29. empathy_os/cli/parsers/patterns.py +54 -0
  30. empathy_os/cli/parsers/provider.py +40 -0
  31. empathy_os/cli/parsers/setup.py +42 -0
  32. empathy_os/cli/parsers/status.py +47 -0
  33. empathy_os/cli/parsers/sync.py +31 -0
  34. empathy_os/cli/parsers/tier.py +33 -0
  35. empathy_os/cli/parsers/workflow.py +77 -0
  36. empathy_os/cli/utils/__init__.py +1 -0
  37. empathy_os/cli/utils/data.py +242 -0
  38. empathy_os/cli/utils/helpers.py +68 -0
  39. empathy_os/{cli.py → cli_legacy.py} +27 -27
  40. empathy_os/cli_minimal.py +662 -0
  41. empathy_os/cli_router.py +384 -0
  42. empathy_os/cli_unified.py +38 -2
  43. empathy_os/memory/__init__.py +19 -5
  44. empathy_os/memory/short_term.py +14 -404
  45. empathy_os/memory/types.py +437 -0
  46. empathy_os/memory/unified.py +61 -48
  47. empathy_os/models/fallback.py +1 -1
  48. empathy_os/models/provider_config.py +59 -344
  49. empathy_os/models/registry.py +31 -180
  50. empathy_os/monitoring/alerts.py +14 -20
  51. empathy_os/monitoring/alerts_cli.py +24 -7
  52. empathy_os/project_index/__init__.py +2 -0
  53. empathy_os/project_index/index.py +210 -5
  54. empathy_os/project_index/scanner.py +45 -14
  55. empathy_os/project_index/scanner_parallel.py +291 -0
  56. empathy_os/socratic/ab_testing.py +1 -1
  57. empathy_os/vscode_bridge 2.py +173 -0
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/progressive/README 2.md +454 -0
  69. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  70. empathy_os/workflows/progressive/cli 2.py +242 -0
  71. empathy_os/workflows/progressive/core 2.py +488 -0
  72. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  73. empathy_os/workflows/progressive/reports 2.py +528 -0
  74. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  75. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  76. empathy_os/workflows/progressive/workflow 2.py +628 -0
  77. empathy_os/workflows/routing.py +168 -0
  78. empathy_os/workflows/secure_release.py +1 -0
  79. empathy_os/workflows/security_audit.py +190 -0
  80. empathy_os/workflows/security_audit_phase3.py +328 -0
  81. empathy_os/workflows/telemetry_mixin.py +269 -0
  82. empathy_framework-4.7.0.dist-info/METADATA +0 -1598
  83. empathy_os/dashboard/__init__.py +0 -15
  84. empathy_os/dashboard/server.py +0 -941
  85. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
  86. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,574 @@
1
+ """Workflow commands for multi-model execution.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+ import asyncio
8
+ import inspect
9
+ import json as json_mod
10
+ from pathlib import Path
11
+
12
+ from empathy_os.config import _validate_file_path
13
+ from empathy_os.logging_config import get_logger
14
+ from empathy_os.workflows import list_workflows as get_workflow_list
15
+ from empathy_os.workflows.config import WorkflowConfig, create_example_config, get_workflow
16
+
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ def _extract_workflow_content(final_output):
21
+ """Extract readable content from workflow final_output.
22
+
23
+ Workflows return their results in various formats - this extracts
24
+ the actual content users want to see.
25
+ """
26
+ if final_output is None:
27
+ return None
28
+
29
+ # If it's already a string, return it
30
+ if isinstance(final_output, str):
31
+ return final_output
32
+
33
+ # If it's a dict, try to extract meaningful content
34
+ if isinstance(final_output, dict):
35
+ # Common keys that contain the main output
36
+ # formatted_report is first - preferred for security-audit and other formatted outputs
37
+ content_keys = [
38
+ "formatted_report", # Human-readable formatted output (security-audit, etc.)
39
+ "answer",
40
+ "synthesis",
41
+ "result",
42
+ "output",
43
+ "content",
44
+ "report",
45
+ "summary",
46
+ "analysis",
47
+ "review",
48
+ "documentation",
49
+ "response",
50
+ "recommendations",
51
+ "findings",
52
+ "tests",
53
+ "plan",
54
+ ]
55
+ for key in content_keys:
56
+ if final_output.get(key):
57
+ val = final_output[key]
58
+ if isinstance(val, str):
59
+ return val
60
+ if isinstance(val, dict):
61
+ # Recursively extract
62
+ return _extract_workflow_content(val)
63
+
64
+ # If no common key found, try to format the dict nicely
65
+ # Look for any string value that's substantial
66
+ for _key, val in final_output.items():
67
+ if isinstance(val, str) and len(val) > 100:
68
+ return val
69
+
70
+ # Last resort: return a formatted version
71
+ return json_mod.dumps(final_output, indent=2)
72
+
73
+ # For lists or other types, convert to string
74
+ return str(final_output)
75
+
76
+
77
+ def cmd_workflow(args):
78
+ """Multi-model workflow management and execution.
79
+
80
+ Supports listing, describing, and running workflows with tier-based models.
81
+
82
+ Args:
83
+ args: Namespace object from argparse with attributes:
84
+ - action (str): Action to perform ('list', 'describe', 'run').
85
+ - name (str | None): Workflow name (for describe/run).
86
+ - input (str | None): JSON input for workflow execution.
87
+ - provider (str | None): LLM provider override.
88
+ - json (bool): If True, output as JSON format.
89
+ - use_recommended_tier (bool): Enable tier fallback.
90
+ - write_tests (bool): For test-gen, write tests to files.
91
+ - output_dir (str | None): For test-gen, output directory.
92
+
93
+ Returns:
94
+ int | None: 0 on success, 1 on failure, None for list action.
95
+ """
96
+ action = args.action
97
+
98
+ if action == "list":
99
+ # List available workflows
100
+ workflows = get_workflow_list()
101
+
102
+ if args.json:
103
+ print(json_mod.dumps(workflows, indent=2))
104
+ else:
105
+ print("\n" + "=" * 60)
106
+ print(" MULTI-MODEL WORKFLOWS")
107
+ print("=" * 60 + "\n")
108
+
109
+ for wf in workflows:
110
+ print(f" {wf['name']:15} {wf['description']}")
111
+ stages = " → ".join(f"{s}({wf['tier_map'][s]})" for s in wf["stages"])
112
+ print(f" Stages: {stages}")
113
+ print()
114
+
115
+ print("-" * 60)
116
+ print(" Use: empathy workflow describe <name>")
117
+ print(" Use: empathy workflow run <name> [--input JSON]")
118
+ print("=" * 60 + "\n")
119
+
120
+ elif action == "describe":
121
+ # Describe a specific workflow
122
+ name = args.name
123
+ if not name:
124
+ print("Error: workflow name required")
125
+ print("Usage: empathy workflow describe <name>")
126
+ return 1
127
+
128
+ try:
129
+ workflow_cls = get_workflow(name)
130
+ provider = getattr(args, "provider", None)
131
+ workflow = workflow_cls(provider=provider)
132
+
133
+ # Get actual provider from workflow (may come from config)
134
+ actual_provider = getattr(workflow, "_provider_str", provider or "anthropic")
135
+
136
+ if args.json:
137
+ info = {
138
+ "name": workflow.name,
139
+ "description": workflow.description,
140
+ "provider": actual_provider,
141
+ "stages": workflow.stages,
142
+ "tier_map": {k: v.value for k, v in workflow.tier_map.items()},
143
+ "models": {
144
+ stage: workflow.get_model_for_tier(workflow.tier_map[stage])
145
+ for stage in workflow.stages
146
+ },
147
+ }
148
+ print(json_mod.dumps(info, indent=2))
149
+ else:
150
+ print(f"Provider: {actual_provider}")
151
+ print(workflow.describe())
152
+
153
+ except KeyError as e:
154
+ print(f"Error: {e}")
155
+ return 1
156
+
157
+ elif action == "run":
158
+ # Run a workflow
159
+ name = args.name
160
+ if not name:
161
+ print("Error: workflow name required")
162
+ print('Usage: empathy workflow run <name> --input \'{"key": "value"}\'')
163
+ return 1
164
+
165
+ try:
166
+ workflow_cls = get_workflow(name)
167
+
168
+ # Get provider from CLI arg, or fall back to config's default_provider
169
+ if args.provider:
170
+ provider = args.provider
171
+ else:
172
+ wf_config = WorkflowConfig.load()
173
+ provider = wf_config.default_provider
174
+
175
+ # Initialize workflow with provider and optional tier fallback
176
+ # Note: Not all workflows support enable_tier_fallback, so we check first
177
+ use_tier_fallback = getattr(args, "use_recommended_tier", False)
178
+
179
+ # Get the workflow's __init__ signature to know what params it accepts
180
+ init_sig = inspect.signature(workflow_cls.__init__)
181
+ init_params = set(init_sig.parameters.keys())
182
+
183
+ workflow_kwargs = {}
184
+
185
+ # Add provider if supported
186
+ if "provider" in init_params:
187
+ workflow_kwargs["provider"] = provider
188
+
189
+ # Add enable_tier_fallback only if the workflow supports it
190
+ if "enable_tier_fallback" in init_params and use_tier_fallback:
191
+ workflow_kwargs["enable_tier_fallback"] = use_tier_fallback
192
+
193
+ # Add health-check specific parameters
194
+ if name == "health-check" and "health_score_threshold" in init_params:
195
+ health_score_threshold = getattr(args, "health_score_threshold", 100)
196
+ workflow_kwargs["health_score_threshold"] = health_score_threshold
197
+
198
+ workflow = workflow_cls(**workflow_kwargs)
199
+
200
+ # Parse input
201
+ input_data = {}
202
+ if args.input:
203
+ input_data = json_mod.loads(args.input)
204
+
205
+ # Add test-gen specific flags to input_data (only for test-gen workflow)
206
+ if name == "test-gen":
207
+ if getattr(args, "write_tests", False):
208
+ input_data["write_tests"] = True
209
+ if getattr(args, "output_dir", None):
210
+ input_data["output_dir"] = args.output_dir
211
+
212
+ # Only print header when not in JSON mode
213
+ if not args.json:
214
+ print(f"\n Running workflow: {name} (provider: {provider})")
215
+ print("=" * 50)
216
+
217
+ # Execute workflow
218
+ result = asyncio.run(workflow.execute(**input_data))
219
+
220
+ # Extract the actual content - handle different result types
221
+ if hasattr(result, "final_output"):
222
+ output_content = _extract_workflow_content(result.final_output)
223
+ elif hasattr(result, "metadata") and isinstance(result.metadata, dict):
224
+ # Check for formatted_report in metadata (e.g., HealthCheckResult)
225
+ output_content = result.metadata.get("formatted_report")
226
+ if not output_content and hasattr(result, "summary"):
227
+ output_content = result.summary
228
+ elif hasattr(result, "summary"):
229
+ output_content = result.summary
230
+ else:
231
+ output_content = str(result)
232
+
233
+ # Get timing - handle different attribute names
234
+ duration_ms = getattr(result, "total_duration_ms", None)
235
+ if duration_ms is None and hasattr(result, "duration_seconds"):
236
+ duration_ms = int(result.duration_seconds * 1000)
237
+
238
+ # Get cost info if available (check cost_report first, then direct cost attribute)
239
+ cost_report = getattr(result, "cost_report", None)
240
+ if cost_report and hasattr(cost_report, "total_cost"):
241
+ total_cost = cost_report.total_cost
242
+ savings = getattr(cost_report, "savings", 0.0)
243
+ else:
244
+ # Fall back to direct cost attribute (e.g., CodeReviewPipelineResult)
245
+ total_cost = getattr(result, "cost", 0.0)
246
+ savings = 0.0
247
+
248
+ if args.json:
249
+ # Extract error from various result types
250
+ error = getattr(result, "error", None)
251
+ is_successful = getattr(result, "success", getattr(result, "approved", True))
252
+ if not error and not is_successful:
253
+ blockers = getattr(result, "blockers", [])
254
+ if blockers:
255
+ error = "; ".join(blockers)
256
+ else:
257
+ metadata = getattr(result, "metadata", {})
258
+ error = metadata.get("error") if isinstance(metadata, dict) else None
259
+
260
+ # JSON output includes both content and metadata
261
+ # Include final_output for programmatic access (VSCode panels, etc.)
262
+ raw_final_output = getattr(result, "final_output", None)
263
+ if raw_final_output and isinstance(raw_final_output, dict):
264
+ # Make a copy to avoid modifying the original
265
+ final_output_serializable = {}
266
+ for k, v in raw_final_output.items():
267
+ # Skip non-serializable items
268
+ if isinstance(v, set):
269
+ final_output_serializable[k] = list(v)
270
+ elif v is None or isinstance(v, str | int | float | bool | list | dict):
271
+ final_output_serializable[k] = v
272
+ else:
273
+ try:
274
+ final_output_serializable[k] = str(v)
275
+ except Exception as e: # noqa: BLE001
276
+ # INTENTIONAL: Silently skip any non-serializable objects
277
+ # This is a best-effort serialization for JSON output
278
+ # We cannot predict all possible object types users might return
279
+ logger.debug(f"Cannot serialize field {k}: {e}")
280
+ pass
281
+ else:
282
+ final_output_serializable = None
283
+
284
+ output = {
285
+ "success": is_successful,
286
+ "output": output_content,
287
+ "final_output": final_output_serializable,
288
+ "cost": total_cost,
289
+ "savings": savings,
290
+ "duration_ms": duration_ms or 0,
291
+ "error": error,
292
+ }
293
+ print(json_mod.dumps(output, indent=2))
294
+ # Display the actual results - this is what users want to see
295
+ else:
296
+ # Show tier progression if tier fallback was used
297
+ if use_tier_fallback and hasattr(workflow, "_tier_progression"):
298
+ tier_progression = workflow._tier_progression
299
+ if tier_progression:
300
+ print("\n" + "=" * 60)
301
+ print(" TIER PROGRESSION (Intelligent Fallback)")
302
+ print("=" * 60)
303
+
304
+ # Group by stage
305
+ stage_tiers: dict[str, list[tuple[str, bool]]] = {}
306
+ for stage, tier, success in tier_progression:
307
+ if stage not in stage_tiers:
308
+ stage_tiers[stage] = []
309
+ stage_tiers[stage].append((tier, success))
310
+
311
+ # Display progression for each stage
312
+ for stage, attempts in stage_tiers.items():
313
+ status = "✓" if any(success for _, success in attempts) else "✗"
314
+ print(f"\n{status} Stage: {stage}")
315
+
316
+ for idx, (tier, success) in enumerate(attempts, 1):
317
+ attempt_status = "✓ SUCCESS" if success else "✗ FAILED"
318
+ if idx == 1:
319
+ print(f" Attempt {idx}: {tier.upper():8} → {attempt_status}")
320
+ else:
321
+ prev_tier = attempts[idx - 2][0]
322
+ print(
323
+ f" Attempt {idx}: {tier.upper():8} → {attempt_status} "
324
+ f"(upgraded from {prev_tier.upper()})"
325
+ )
326
+
327
+ # Calculate cost savings (only if result has stages attribute)
328
+ if hasattr(result, "stages") and result.stages:
329
+ actual_cost = sum(stage.cost for stage in result.stages if stage.cost)
330
+ # Estimate what cost would be if all stages used PREMIUM
331
+ premium_cost = actual_cost * 3 # Conservative estimate
332
+
333
+ savings = premium_cost - actual_cost
334
+ savings_pct = (savings / premium_cost * 100) if premium_cost > 0 else 0
335
+
336
+ print("\n" + "-" * 60)
337
+ print("💰 Cost Savings:")
338
+ print(f" Actual cost: ${actual_cost:.4f}")
339
+ print(f" Premium cost: ${premium_cost:.4f} (if all PREMIUM)")
340
+ print(f" Savings: ${savings:.4f} ({savings_pct:.1f}%)")
341
+ print("=" * 60 + "\n")
342
+
343
+ # Display workflow result
344
+ # Handle different result types (success, approved, etc.)
345
+ is_successful = getattr(result, "success", getattr(result, "approved", True))
346
+ if is_successful:
347
+ if output_content:
348
+ print(f"\n{output_content}\n")
349
+ else:
350
+ print("\n✓ Workflow completed successfully.\n")
351
+ else:
352
+ # Extract error from various result types
353
+ error_msg = getattr(result, "error", None)
354
+ if not error_msg:
355
+ # Check for blockers (CodeReviewPipelineResult)
356
+ blockers = getattr(result, "blockers", [])
357
+ if blockers:
358
+ error_msg = "; ".join(blockers)
359
+ else:
360
+ # Check metadata for error
361
+ metadata = getattr(result, "metadata", {})
362
+ error_msg = (
363
+ metadata.get("error") if isinstance(metadata, dict) else None
364
+ )
365
+ error_msg = error_msg or "Unknown error"
366
+ print(f"\n✗ Workflow failed: {error_msg}\n")
367
+
368
+ except KeyError as e:
369
+ print(f"Error: {e}")
370
+ return 1
371
+ except json_mod.JSONDecodeError as e:
372
+ print(f"Error parsing input JSON: {e}")
373
+ return 1
374
+
375
+ elif action == "config":
376
+ # Generate or show workflow configuration
377
+ config_path = Path(".empathy/workflows.yaml")
378
+
379
+ if config_path.exists() and not getattr(args, "force", False):
380
+ print(f"Config already exists: {config_path}")
381
+ print("Use --force to overwrite")
382
+ print("\nCurrent configuration:")
383
+ print("-" * 40)
384
+ config = WorkflowConfig.load()
385
+ print(f" Default provider: {config.default_provider}")
386
+ if config.workflow_providers:
387
+ print(" Workflow providers:")
388
+ for wf, prov in config.workflow_providers.items():
389
+ print(f" {wf}: {prov}")
390
+ if config.custom_models:
391
+ print(" Custom models configured")
392
+ return 0
393
+
394
+ # Create config directory and file
395
+ config_path.parent.mkdir(parents=True, exist_ok=True)
396
+ validated_config_path = _validate_file_path(str(config_path))
397
+ validated_config_path.write_text(create_example_config())
398
+ print(f"✓ Created workflow config: {validated_config_path}")
399
+ print("\nEdit this file to customize:")
400
+ print(" - Default provider (anthropic, openai, ollama)")
401
+ print(" - Per-workflow provider overrides")
402
+ print(" - Custom model mappings")
403
+ print(" - Model pricing")
404
+ print("\nOr use environment variables:")
405
+ print(" EMPATHY_WORKFLOW_PROVIDER=openai")
406
+ print(" EMPATHY_MODEL_PREMIUM=gpt-5.2")
407
+
408
+ else:
409
+ print(f"Unknown action: {action}")
410
+ print("Available: list, describe, run, config")
411
+ return 1
412
+
413
+ return 0
414
+
415
+
416
+ def cmd_workflow_legacy(args):
417
+ """Interactive setup workflow (DEPRECATED).
418
+
419
+ DEPRECATED: This command is deprecated in favor of 'empathy init'.
420
+ It will be removed in version 5.0.
421
+
422
+ Guides user through initial framework configuration step by step.
423
+
424
+ Args:
425
+ args: Namespace object from argparse (no additional attributes used).
426
+
427
+ Returns:
428
+ None: Creates empathy.config.yml with user's choices.
429
+ """
430
+ import warnings
431
+
432
+ warnings.warn(
433
+ "The 'workflow-setup' command is deprecated. "
434
+ "Use 'empathy init' instead. "
435
+ "This command will be removed in version 5.0.",
436
+ DeprecationWarning,
437
+ stacklevel=2,
438
+ )
439
+
440
+ print("⚠️ DEPRECATED: This command is being replaced by 'empathy init'")
441
+ print(" Please use 'empathy init' for interactive setup.")
442
+ print(" This command will be removed in version 5.0.\n")
443
+ print("=" * 60)
444
+
445
+ print("🧙 Empathy Framework Setup Workflow")
446
+ print("=" * 50)
447
+ print("\nI'll help you set up your Empathy Framework configuration.\n")
448
+
449
+ # Step 1: Use case
450
+ print("1. What's your primary use case?")
451
+ print(" [1] Software development")
452
+ print(" [2] Healthcare applications")
453
+ print(" [3] Customer support")
454
+ print(" [4] Other")
455
+
456
+ use_case_choice = input("\nYour choice (1-4): ").strip()
457
+ use_case_map = {
458
+ "1": "software_development",
459
+ "2": "healthcare",
460
+ "3": "customer_support",
461
+ "4": "general",
462
+ }
463
+ use_case = use_case_map.get(use_case_choice, "general")
464
+
465
+ # Step 2: Empathy level
466
+ print("\n2. What empathy level do you want to target?")
467
+ print(" [1] Level 1 - Reactive (basic Q&A)")
468
+ print(" [2] Level 2 - Guided (asks clarifying questions)")
469
+ print(" [3] Level 3 - Proactive (offers improvements)")
470
+ print(" [4] Level 4 - Anticipatory (predicts problems) ⭐ Recommended")
471
+ print(" [5] Level 5 - Transformative (reshapes workflows)")
472
+
473
+ level_choice = input("\nYour choice (1-5) [4]: ").strip() or "4"
474
+ target_level = int(level_choice) if level_choice in ["1", "2", "3", "4", "5"] else 4
475
+
476
+ # Step 3: LLM provider
477
+ print("\n3. Which LLM provider will you use?")
478
+ print(" [1] Anthropic Claude ⭐ Recommended")
479
+ print(" [2] OpenAI GPT-4")
480
+ print(" [3] Google Gemini (2M context)")
481
+ print(" [4] Local (Ollama)")
482
+ print(" [5] Hybrid (mix best models from each provider)")
483
+ print(" [6] Skip (configure later)")
484
+
485
+ llm_choice = input("\nYour choice (1-6) [1]: ").strip() or "1"
486
+ llm_map = {
487
+ "1": "anthropic",
488
+ "2": "openai",
489
+ "3": "google",
490
+ "4": "ollama",
491
+ "5": "hybrid",
492
+ "6": None,
493
+ }
494
+ llm_provider = llm_map.get(llm_choice, "anthropic")
495
+
496
+ # If hybrid selected, launch interactive tier selection
497
+ if llm_provider == "hybrid":
498
+ from empathy_os.models.provider_config import configure_hybrid_interactive
499
+
500
+ configure_hybrid_interactive()
501
+ llm_provider = None # Already saved by hybrid config
502
+
503
+ # Step 4: User ID
504
+ print("\n4. What user ID should we use?")
505
+ user_id = input("User ID [default_user]: ").strip() or "default_user"
506
+
507
+ # Generate configuration
508
+ config = {
509
+ "user_id": user_id,
510
+ "target_level": target_level,
511
+ "confidence_threshold": 0.75,
512
+ "persistence_enabled": True,
513
+ "persistence_backend": "sqlite",
514
+ "persistence_path": ".empathy",
515
+ "metrics_enabled": True,
516
+ "use_case": use_case,
517
+ }
518
+
519
+ if llm_provider:
520
+ config["llm_provider"] = llm_provider
521
+
522
+ # Save configuration
523
+ output_file = "empathy.config.yml"
524
+ print(f"\n5. Creating configuration file: {output_file}")
525
+
526
+ # Write YAML config
527
+ yaml_content = f"""# Empathy Framework Configuration
528
+ # Generated by setup workflow
529
+
530
+ # Core settings
531
+ user_id: "{config["user_id"]}"
532
+ target_level: {config["target_level"]}
533
+ confidence_threshold: {config["confidence_threshold"]}
534
+
535
+ # Use case
536
+ use_case: "{config["use_case"]}"
537
+
538
+ # Persistence
539
+ persistence_enabled: {str(config["persistence_enabled"]).lower()}
540
+ persistence_backend: "{config["persistence_backend"]}"
541
+ persistence_path: "{config["persistence_path"]}"
542
+
543
+ # Metrics
544
+ metrics_enabled: {str(config["metrics_enabled"]).lower()}
545
+ """
546
+
547
+ if llm_provider:
548
+ yaml_content += f"""
549
+ # LLM Provider
550
+ llm_provider: "{llm_provider}"
551
+ """
552
+
553
+ validated_output = _validate_file_path(output_file)
554
+ with open(validated_output, "w") as f:
555
+ f.write(yaml_content)
556
+
557
+ print(f" ✓ Created {validated_output}")
558
+
559
+ print("\n" + "=" * 50)
560
+ print("✅ Setup complete!")
561
+ print("\nNext steps:")
562
+ print(f" 1. Edit {output_file} to customize settings")
563
+
564
+ if llm_provider in ["anthropic", "openai", "google"]:
565
+ env_var_map = {
566
+ "anthropic": "ANTHROPIC_API_KEY",
567
+ "openai": "OPENAI_API_KEY",
568
+ "google": "GOOGLE_API_KEY",
569
+ }
570
+ env_var = env_var_map.get(llm_provider, "API_KEY")
571
+ print(f" 2. Set {env_var} environment variable")
572
+
573
+ print(" 3. Run: empathy-framework run --config empathy.config.yml")
574
+ print("\nHappy empathizing! 🧠✨\n")
@@ -0,0 +1,62 @@
1
+ """CLI parser registration.
2
+
3
+ This module coordinates parser registration for all CLI commands.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ from . import (
10
+ help,
11
+ info,
12
+ inspect,
13
+ metrics,
14
+ orchestrate,
15
+ patterns,
16
+ provider,
17
+ setup,
18
+ status,
19
+ sync,
20
+ tier,
21
+ workflow,
22
+ )
23
+
24
+
25
+ def register_all_parsers(subparsers):
26
+ """Register all command parsers.
27
+
28
+ This function is called from the main CLI entry point to set up
29
+ all subcommands and their argument parsers.
30
+
31
+ Args:
32
+ subparsers: ArgumentParser subparsers object from main parser
33
+
34
+ Note:
35
+ All 30 commands have been extracted from the monolithic cli.py
36
+ and organized into focused modules.
37
+ """
38
+ # Core commands
39
+ help.register_parsers(subparsers)
40
+ tier.register_parsers(subparsers)
41
+ info.register_parsers(subparsers)
42
+
43
+ # Pattern and state management
44
+ patterns.register_parsers(subparsers)
45
+ status.register_parsers(subparsers)
46
+
47
+ # Workflow and execution
48
+ workflow.register_parsers(subparsers)
49
+ inspect.register_parsers(subparsers)
50
+
51
+ # Provider configuration
52
+ provider.register_parsers(subparsers)
53
+
54
+ # Orchestration and sync
55
+ orchestrate.register_parsers(subparsers)
56
+ sync.register_parsers(subparsers)
57
+
58
+ # Metrics and state
59
+ metrics.register_parsers(subparsers)
60
+
61
+ # Setup and initialization
62
+ setup.register_parsers(subparsers)
@@ -0,0 +1,41 @@
1
+ """Parser definitions for help commands.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+ from ..commands import help as help_commands
8
+
9
+
10
+ def register_parsers(subparsers):
11
+ """Register help command parsers.
12
+
13
+ Args:
14
+ subparsers: ArgumentParser subparsers object
15
+ """
16
+ # version command
17
+ parser_version = subparsers.add_parser("version", help="Display version information")
18
+ parser_version.set_defaults(func=help_commands.cmd_version)
19
+
20
+ # cheatsheet command
21
+ parser_cheatsheet = subparsers.add_parser("cheatsheet", help="Quick reference guide")
22
+ parser_cheatsheet.add_argument("--category", help="Specific category to show")
23
+ parser_cheatsheet.add_argument(
24
+ "--compact", action="store_true", help="Show commands only"
25
+ )
26
+ parser_cheatsheet.set_defaults(func=help_commands.cmd_cheatsheet)
27
+
28
+ # onboard command
29
+ parser_onboard = subparsers.add_parser("onboard", help="Interactive tutorial")
30
+ parser_onboard.add_argument("--step", type=int, help="Jump to specific step")
31
+ parser_onboard.add_argument("--reset", action="store_true", help="Reset progress")
32
+ parser_onboard.set_defaults(func=help_commands.cmd_onboard)
33
+
34
+ # explain command
35
+ parser_explain = subparsers.add_parser("explain", help="Explain a command in detail")
36
+ parser_explain.add_argument("command", help="Command to explain")
37
+ parser_explain.set_defaults(func=help_commands.cmd_explain)
38
+
39
+ # achievements command
40
+ parser_achievements = subparsers.add_parser("achievements", help="Show user progress")
41
+ parser_achievements.set_defaults(func=help_commands.cmd_achievements)