empathy-framework 4.7.0__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. empathy_framework-4.8.0.dist-info/METADATA +753 -0
  2. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +83 -37
  3. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. empathy_os/__init__.py +2 -0
  6. empathy_os/cache/hash_only.py +6 -3
  7. empathy_os/cache/hybrid.py +6 -3
  8. empathy_os/cli/__init__.py +128 -238
  9. empathy_os/cli/__main__.py +5 -33
  10. empathy_os/cli/commands/__init__.py +1 -8
  11. empathy_os/cli/commands/help.py +331 -0
  12. empathy_os/cli/commands/info.py +140 -0
  13. empathy_os/cli/commands/inspect.py +437 -0
  14. empathy_os/cli/commands/metrics.py +92 -0
  15. empathy_os/cli/commands/orchestrate.py +184 -0
  16. empathy_os/cli/commands/patterns.py +207 -0
  17. empathy_os/cli/commands/provider.py +93 -81
  18. empathy_os/cli/commands/setup.py +96 -0
  19. empathy_os/cli/commands/status.py +235 -0
  20. empathy_os/cli/commands/sync.py +166 -0
  21. empathy_os/cli/commands/tier.py +121 -0
  22. empathy_os/cli/commands/workflow.py +574 -0
  23. empathy_os/cli/parsers/__init__.py +62 -0
  24. empathy_os/cli/parsers/help.py +41 -0
  25. empathy_os/cli/parsers/info.py +26 -0
  26. empathy_os/cli/parsers/inspect.py +66 -0
  27. empathy_os/cli/parsers/metrics.py +42 -0
  28. empathy_os/cli/parsers/orchestrate.py +61 -0
  29. empathy_os/cli/parsers/patterns.py +54 -0
  30. empathy_os/cli/parsers/provider.py +40 -0
  31. empathy_os/cli/parsers/setup.py +42 -0
  32. empathy_os/cli/parsers/status.py +47 -0
  33. empathy_os/cli/parsers/sync.py +31 -0
  34. empathy_os/cli/parsers/tier.py +33 -0
  35. empathy_os/cli/parsers/workflow.py +77 -0
  36. empathy_os/cli/utils/__init__.py +1 -0
  37. empathy_os/cli/utils/data.py +242 -0
  38. empathy_os/cli/utils/helpers.py +68 -0
  39. empathy_os/{cli.py → cli_legacy.py} +27 -27
  40. empathy_os/cli_minimal.py +662 -0
  41. empathy_os/cli_router.py +384 -0
  42. empathy_os/cli_unified.py +38 -2
  43. empathy_os/memory/__init__.py +19 -5
  44. empathy_os/memory/short_term.py +14 -404
  45. empathy_os/memory/types.py +437 -0
  46. empathy_os/memory/unified.py +61 -48
  47. empathy_os/models/fallback.py +1 -1
  48. empathy_os/models/provider_config.py +59 -344
  49. empathy_os/models/registry.py +31 -180
  50. empathy_os/monitoring/alerts.py +14 -20
  51. empathy_os/monitoring/alerts_cli.py +24 -7
  52. empathy_os/project_index/__init__.py +2 -0
  53. empathy_os/project_index/index.py +210 -5
  54. empathy_os/project_index/scanner.py +45 -14
  55. empathy_os/project_index/scanner_parallel.py +291 -0
  56. empathy_os/socratic/ab_testing.py +1 -1
  57. empathy_os/vscode_bridge 2.py +173 -0
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/progressive/README 2.md +454 -0
  69. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  70. empathy_os/workflows/progressive/cli 2.py +242 -0
  71. empathy_os/workflows/progressive/core 2.py +488 -0
  72. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  73. empathy_os/workflows/progressive/reports 2.py +528 -0
  74. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  75. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  76. empathy_os/workflows/progressive/workflow 2.py +628 -0
  77. empathy_os/workflows/routing.py +168 -0
  78. empathy_os/workflows/secure_release.py +1 -0
  79. empathy_os/workflows/security_audit.py +190 -0
  80. empathy_os/workflows/security_audit_phase3.py +328 -0
  81. empathy_os/workflows/telemetry_mixin.py +269 -0
  82. empathy_framework-4.7.0.dist-info/METADATA +0 -1598
  83. empathy_os/dashboard/__init__.py +0 -15
  84. empathy_os/dashboard/server.py +0 -941
  85. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
  86. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -0
@@ -19,11 +19,9 @@ from .registry import MODEL_REGISTRY, ModelInfo, ModelTier
19
19
 
20
20
 
21
21
  class ProviderMode(str, Enum):
22
- """How the system selects models across providers."""
22
+ """Provider selection mode (Anthropic-only as of v5.0.0)."""
23
23
 
24
- SINGLE = "single" # Use one provider for all tiers
25
- HYBRID = "hybrid" # Best-of across providers (requires multiple API keys)
26
- CUSTOM = "custom" # User-defined per-tier mapping
24
+ SINGLE = "single" # Anthropic for all tiers
27
25
 
28
26
 
29
27
  @dataclass
@@ -43,32 +41,20 @@ class ProviderConfig:
43
41
  available_providers: list[str] = field(default_factory=list)
44
42
 
45
43
  # User preferences
46
- prefer_local: bool = False # Prefer Ollama when available
44
+ prefer_local: bool = False # Deprecated (v5.0.0)
47
45
  cost_optimization: bool = True # Use cheaper tiers when appropriate
48
46
 
49
47
  @classmethod
50
48
  def detect_available_providers(cls) -> list[str]:
51
- """Detect which providers have API keys configured."""
49
+ """Detect if Anthropic API key is configured (Anthropic-only as of v5.0.0)."""
52
50
  available = []
53
51
 
54
52
  # Load .env files if they exist (project root and home)
55
53
  env_keys = cls._load_env_files()
56
54
 
57
- # Check environment variables for API keys
58
- provider_env_vars = {
59
- "anthropic": ["ANTHROPIC_API_KEY"],
60
- "openai": ["OPENAI_API_KEY"],
61
- "google": ["GOOGLE_API_KEY", "GEMINI_API_KEY"],
62
- "ollama": [], # Ollama is local, check if running
63
- }
64
-
65
- for provider, env_vars in provider_env_vars.items():
66
- if provider == "ollama":
67
- # Check if Ollama is available (local)
68
- if cls._check_ollama_available():
69
- available.append(provider)
70
- elif any(os.getenv(var) or env_keys.get(var) for var in env_vars):
71
- available.append(provider)
55
+ # Check for ANTHROPIC_API_KEY
56
+ if os.getenv("ANTHROPIC_API_KEY") or env_keys.get("ANTHROPIC_API_KEY"):
57
+ available.append("anthropic")
72
58
 
73
59
  return available
74
60
 
@@ -101,68 +87,32 @@ class ProviderConfig:
101
87
 
102
88
  return env_keys
103
89
 
104
- @staticmethod
105
- def _check_ollama_available() -> bool:
106
- """Check if Ollama is running locally."""
107
- try:
108
- import socket
109
-
110
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
111
- sock.settimeout(1)
112
- result = sock.connect_ex(("localhost", 11434))
113
- sock.close()
114
- return result == 0
115
- except Exception:
116
- return False
117
-
118
90
  @classmethod
119
91
  def auto_detect(cls) -> ProviderConfig:
120
- """Auto-detect the best configuration based on available API keys.
92
+ """Auto-detect configuration (Anthropic-only as of v5.0.0).
121
93
 
122
- Logic:
123
- - If only one provider available → SINGLE mode with that provider
124
- - If multiple providers available → SINGLE mode with first cloud provider
125
- - If no providers available → SINGLE mode with anthropic (will prompt for key)
94
+ Returns:
95
+ ProviderConfig with Anthropic as primary provider
126
96
  """
127
97
  available = cls.detect_available_providers()
128
98
 
129
- if len(available) == 0:
130
- # No providers detected, default to anthropic
131
- return cls(
132
- mode=ProviderMode.SINGLE,
133
- primary_provider="anthropic",
134
- available_providers=[],
135
- )
136
- if len(available) == 1:
137
- # Single provider available, use it
138
- return cls(
139
- mode=ProviderMode.SINGLE,
140
- primary_provider=available[0],
141
- available_providers=available,
142
- )
143
- # Multiple providers available
144
- # Default to first cloud provider (prefer anthropic > openai > google > ollama)
145
- priority = ["anthropic", "openai", "google", "ollama"]
146
- primary = next((p for p in priority if p in available), available[0])
147
99
  return cls(
148
100
  mode=ProviderMode.SINGLE,
149
- primary_provider=primary,
101
+ primary_provider="anthropic",
150
102
  available_providers=available,
151
103
  )
152
104
 
153
105
  def get_model_for_tier(self, tier: str | ModelTier) -> ModelInfo | None:
154
- """Get the model to use for a given tier based on current config."""
155
- tier_str = tier.value if isinstance(tier, ModelTier) else tier
106
+ """Get the model to use for a given tier (Anthropic-only as of v5.0.0).
156
107
 
157
- if self.mode == ProviderMode.HYBRID:
158
- # Use hybrid provider from registry
159
- return MODEL_REGISTRY.get("hybrid", {}).get(tier_str)
160
- if self.mode == ProviderMode.CUSTOM:
161
- # Use per-tier provider mapping
162
- provider = self.tier_providers.get(tier_str, self.primary_provider)
163
- return MODEL_REGISTRY.get(provider, {}).get(tier_str)
164
- # SINGLE mode: use primary provider for all tiers
165
- return MODEL_REGISTRY.get(self.primary_provider, {}).get(tier_str)
108
+ Args:
109
+ tier: Tier level (cheap, capable, premium)
110
+
111
+ Returns:
112
+ ModelInfo for the Anthropic model at the specified tier
113
+ """
114
+ tier_str = tier.value if isinstance(tier, ModelTier) else tier
115
+ return MODEL_REGISTRY.get("anthropic", {}).get(tier_str)
166
116
 
167
117
  def get_effective_registry(self) -> dict[str, ModelInfo]:
168
118
  """Get the effective model registry based on current config."""
@@ -224,108 +174,52 @@ class ProviderConfig:
224
174
 
225
175
  # Interactive configuration for install/update
226
176
  def configure_provider_interactive() -> ProviderConfig:
227
- """Interactive provider configuration for install/update.
177
+ """Interactive provider configuration for install/update (Anthropic-only as of v5.0.0).
228
178
 
229
- Returns configured ProviderConfig after user selection.
179
+ Returns:
180
+ ProviderConfig configured for Anthropic
230
181
  """
231
182
  print("\n" + "=" * 60)
232
- print("Empathy Framework - Provider Configuration")
183
+ print("Empathy Framework - Provider Configuration (Claude-Native v5.0.0)")
233
184
  print("=" * 60)
234
185
 
235
- # Detect available providers
186
+ # Check for Anthropic API key
236
187
  config = ProviderConfig.auto_detect()
237
188
  available = config.available_providers
238
189
 
239
- print(f"\nDetected API keys for: {', '.join(available) if available else 'None'}")
240
-
241
190
  if not available:
242
- print("\n⚠️ No API keys detected. Please set one of:")
243
- print(" - ANTHROPIC_API_KEY (recommended)")
244
- print(" - OPENAI_API_KEY")
245
- print(" - GOOGLE_API_KEY or GEMINI_API_KEY (2M context window)")
246
- print(" - Or run Ollama locally")
247
- print("\nDefaulting to Anthropic. You'll need to set ANTHROPIC_API_KEY.")
191
+ print("\n⚠️ ANTHROPIC_API_KEY not detected.")
192
+ print("\nPlease set your Anthropic API key:")
193
+ print(" export ANTHROPIC_API_KEY='your-key-here'")
194
+ print("\nGet your API key at: https://console.anthropic.com/settings/keys")
195
+ print("\nDefaulting to Anthropic configuration.")
196
+ print("You'll need to set ANTHROPIC_API_KEY before running workflows.")
248
197
  return ProviderConfig(
249
198
  mode=ProviderMode.SINGLE,
250
199
  primary_provider="anthropic",
251
200
  available_providers=[],
252
201
  )
253
202
 
254
- # Show options
255
- print("\nSelect your provider configuration:")
256
- print("-" * 40)
257
-
258
- options = []
259
-
260
- # Option 1: Single provider (for each available)
261
- for i, provider in enumerate(available, 1):
262
- provider_name = provider.capitalize()
263
- if provider == "anthropic":
264
- desc = "Claude models (Haiku/Sonnet/Opus)"
265
- elif provider == "openai":
266
- desc = "GPT models (GPT-4o-mini/GPT-4o/o1)"
267
- elif provider == "google":
268
- desc = "Gemini models (Flash/Pro - 2M context window)"
269
- elif provider == "ollama":
270
- desc = "Local models (Llama 3.2)"
271
- else:
272
- desc = "Unknown provider"
273
- options.append((provider, ProviderMode.SINGLE))
274
- print(f" [{i}] {provider_name} only - {desc}")
275
-
276
- # Option: Hybrid (if multiple providers available)
277
- if len(available) > 1:
278
- options.append(("hybrid", ProviderMode.HYBRID))
279
- print(f" [{len(options)}] Hybrid - Best model from each provider per tier")
280
- print(" (Recommended if you have multiple API keys)")
281
-
282
- # Default selection
283
- default_idx = 0
284
- if len(available) == 1:
285
- default_idx = 0
286
- elif "anthropic" in available:
287
- default_idx = available.index("anthropic")
288
-
289
- print(f"\nDefault: [{default_idx + 1}]")
290
-
291
- # Get user input
292
- try:
293
- choice = input(f"\nYour choice [1-{len(options)}]: ").strip()
294
- if not choice:
295
- choice = str(default_idx + 1)
296
- idx = int(choice) - 1
297
- if idx < 0 or idx >= len(options):
298
- idx = default_idx
299
- except (ValueError, EOFError):
300
- idx = default_idx
301
-
302
- selected_provider, selected_mode = options[idx]
303
-
304
- if selected_mode == ProviderMode.HYBRID:
305
- config = ProviderConfig(
306
- mode=ProviderMode.HYBRID,
307
- primary_provider="hybrid",
308
- available_providers=available,
309
- )
310
- print("\n✓ Configured: Hybrid mode (best-of across providers)")
311
- else:
312
- config = ProviderConfig(
313
- mode=ProviderMode.SINGLE,
314
- primary_provider=selected_provider,
315
- available_providers=available,
316
- )
317
- print(f"\n✓ Configured: {selected_provider.capitalize()} as primary provider")
203
+ # Anthropic API key detected
204
+ print("\n✓ ANTHROPIC_API_KEY detected")
205
+ print("\nConfiguring Anthropic as provider...")
206
+
207
+ config = ProviderConfig(
208
+ mode=ProviderMode.SINGLE,
209
+ primary_provider="anthropic",
210
+ available_providers=available,
211
+ )
318
212
 
319
213
  # Show effective models
320
214
  print("\nEffective model mapping:")
321
215
  effective = config.get_effective_registry()
322
216
  for tier, model in effective.items():
323
217
  if model:
324
- print(f" {tier:8} → {model.id} ({model.provider})")
218
+ print(f" {tier:8} → {model.id}")
325
219
 
326
220
  # Save configuration
327
221
  config.save()
328
- print("\nConfiguration saved to ~/.empathy/provider_config.json")
222
+ print("\n✓ Configuration saved to ~/.empathy/provider_config.json")
329
223
 
330
224
  return config
331
225
 
@@ -334,33 +228,33 @@ def configure_provider_cli(
334
228
  provider: str | None = None,
335
229
  mode: str | None = None,
336
230
  ) -> ProviderConfig:
337
- """CLI-based provider configuration (non-interactive).
231
+ """CLI-based provider configuration (Anthropic-only as of v5.0.0).
338
232
 
339
233
  Args:
340
- provider: Provider name (anthropic, openai, google, ollama, hybrid)
341
- mode: Mode (single, hybrid, custom)
234
+ provider: Provider name (must be 'anthropic' or None)
235
+ mode: Mode (must be 'single' or None)
342
236
 
343
237
  Returns:
344
- Configured ProviderConfig
238
+ Configured ProviderConfig for Anthropic
345
239
 
240
+ Raises:
241
+ ValueError: If provider is not 'anthropic'
346
242
  """
347
- available = ProviderConfig.detect_available_providers()
348
-
349
- if provider == "hybrid" or mode == "hybrid":
350
- return ProviderConfig(
351
- mode=ProviderMode.HYBRID,
352
- primary_provider="hybrid",
353
- available_providers=available,
243
+ if provider and provider.lower() != "anthropic":
244
+ raise ValueError(
245
+ f"Provider '{provider}' is not supported. "
246
+ f"Empathy Framework is now Claude-native (v5.0.0). "
247
+ f"Only 'anthropic' provider is available. "
248
+ f"See docs/CLAUDE_NATIVE.md for migration guide."
354
249
  )
355
250
 
356
- if provider:
357
- return ProviderConfig(
358
- mode=ProviderMode.SINGLE,
359
- primary_provider=provider,
360
- available_providers=available,
251
+ if mode and mode.lower() != "single":
252
+ raise ValueError(
253
+ f"Mode '{mode}' is not supported. "
254
+ f"Only 'single' mode is available in v5.0.0 (Anthropic-only)."
361
255
  )
362
256
 
363
- # Auto-detect
257
+ # Always return Anthropic configuration
364
258
  return ProviderConfig.auto_detect()
365
259
 
366
260
 
@@ -386,182 +280,3 @@ def reset_provider_config() -> None:
386
280
  """Reset the global provider configuration (forces reload)."""
387
281
  global _global_config
388
282
  _global_config = None
389
-
390
-
391
- def configure_hybrid_interactive() -> ProviderConfig:
392
- """Interactive hybrid configuration - let users pick models for each tier.
393
-
394
- Shows available models from all providers with detected API keys,
395
- allowing users to mix and match the best models for their workflow.
396
-
397
- Returns:
398
- ProviderConfig with custom tier mappings
399
-
400
- """
401
- print("\n" + "=" * 60)
402
- print("🔀 Hybrid Model Configuration")
403
- print("=" * 60)
404
- print("\nSelect the best model for each tier from available providers.")
405
- print("This creates a custom mix optimized for your workflow.\n")
406
-
407
- # Detect available providers
408
- available = ProviderConfig.detect_available_providers()
409
-
410
- if not available:
411
- print("⚠️ No API keys detected. Please set at least one of:")
412
- print(" - ANTHROPIC_API_KEY")
413
- print(" - OPENAI_API_KEY")
414
- print(" - GOOGLE_API_KEY")
415
- print(" - Or run Ollama locally")
416
- return ProviderConfig.auto_detect()
417
-
418
- print(f"✓ Available providers: {', '.join(available)}\n")
419
-
420
- # Collect models for each tier from available providers
421
- tier_selections: dict[str, str] = {}
422
-
423
- for tier in ["cheap", "capable", "premium"]:
424
- tier_upper = tier.upper()
425
- print("-" * 60)
426
- print(f" {tier_upper} TIER - Select a model:")
427
- print("-" * 60)
428
-
429
- # Build options from available providers
430
- options: list[tuple[str, ModelInfo]] = []
431
- for provider in available:
432
- model_info = MODEL_REGISTRY.get(provider, {}).get(tier)
433
- if model_info:
434
- options.append((provider, model_info))
435
-
436
- if not options:
437
- print(f" No models available for {tier} tier")
438
- continue
439
-
440
- # Display options with pricing info
441
- for i, (provider, info) in enumerate(options, 1):
442
- provider_label = provider.capitalize()
443
- cost_info = f"${info.input_cost_per_million:.2f}/${info.output_cost_per_million:.2f} per M tokens"
444
- if provider == "ollama":
445
- cost_info = "FREE (local)"
446
-
447
- # Add feature badges
448
- features = []
449
- if info.supports_vision:
450
- features.append("👁 vision")
451
- if info.supports_tools:
452
- features.append("🔧 tools")
453
- if provider == "google":
454
- features.append("📚 2M context")
455
-
456
- features_str = f" [{', '.join(features)}]" if features else ""
457
-
458
- print(f" [{i}] {info.id}")
459
- print(f" Provider: {provider_label} | {cost_info}{features_str}")
460
-
461
- # Get user choice
462
- default_idx = 0
463
- # Set smart defaults based on tier
464
- if tier == "cheap":
465
- # Prefer cheapest: ollama > google > openai > anthropic
466
- for pref in ["ollama", "google", "openai", "anthropic"]:
467
- for i, (p, _) in enumerate(options):
468
- if p == pref:
469
- default_idx = i
470
- break
471
- else:
472
- continue
473
- break
474
- elif tier == "capable":
475
- # Prefer best reasoning: anthropic > openai > google > ollama
476
- for pref in ["anthropic", "openai", "google", "ollama"]:
477
- for i, (p, _) in enumerate(options):
478
- if p == pref:
479
- default_idx = i
480
- break
481
- else:
482
- continue
483
- break
484
- elif tier == "premium":
485
- # Prefer most capable: anthropic > openai > google > ollama
486
- for pref in ["anthropic", "openai", "google", "ollama"]:
487
- for i, (p, _) in enumerate(options):
488
- if p == pref:
489
- default_idx = i
490
- break
491
- else:
492
- continue
493
- break
494
-
495
- print(f"\n Recommended: [{default_idx + 1}] {options[default_idx][1].id}")
496
-
497
- try:
498
- choice = input(f" Your choice [1-{len(options)}]: ").strip()
499
- if not choice:
500
- idx = default_idx
501
- else:
502
- idx = int(choice) - 1
503
- if idx < 0 or idx >= len(options):
504
- idx = default_idx
505
- except (ValueError, EOFError):
506
- idx = default_idx
507
-
508
- selected_provider, selected_model = options[idx]
509
- tier_selections[tier] = selected_model.id
510
- print(f" ✓ Selected: {selected_model.id} ({selected_provider})\n")
511
-
512
- # Create custom config
513
- config = ProviderConfig(
514
- mode=ProviderMode.CUSTOM,
515
- primary_provider="custom",
516
- tier_providers={}, # Not used in CUSTOM mode
517
- available_providers=available,
518
- )
519
-
520
- # Store the custom tier->model mapping
521
- # We'll save this to workflows.yaml custom_models section
522
- print("\n" + "=" * 60)
523
- print("✅ Hybrid Configuration Complete!")
524
- print("=" * 60)
525
- print("\nYour custom model mapping:")
526
- for tier, model_id in tier_selections.items():
527
- print(f" {tier:8} → {model_id}")
528
-
529
- # Save to workflows.yaml
530
- _save_hybrid_to_workflows_yaml(tier_selections)
531
-
532
- print("\n✓ Configuration saved to .empathy/workflows.yaml")
533
- print(" Run workflows with: python -m empathy_os.cli workflow run <name>")
534
-
535
- return config
536
-
537
-
538
- def _save_hybrid_to_workflows_yaml(tier_selections: dict[str, str]) -> None:
539
- """Save hybrid tier selections to workflows.yaml."""
540
- from pathlib import Path
541
-
542
- import yaml
543
-
544
- workflows_path = Path(".empathy/workflows.yaml")
545
-
546
- # Load existing config or create new
547
- if workflows_path.exists():
548
- with open(workflows_path) as f:
549
- config = yaml.safe_load(f) or {}
550
- else:
551
- config = {}
552
- workflows_path.parent.mkdir(parents=True, exist_ok=True)
553
-
554
- # Update config
555
- config["default_provider"] = "hybrid"
556
-
557
- # Ensure custom_models exists
558
- if "custom_models" not in config or config["custom_models"] is None:
559
- config["custom_models"] = {}
560
-
561
- # Set hybrid model mapping
562
- config["custom_models"]["hybrid"] = tier_selections
563
-
564
- # Write back
565
- validated_workflows_path = _validate_file_path(str(workflows_path))
566
- with open(validated_workflows_path, "w") as f:
567
- yaml.dump(config, f, default_flow_style=False, sort_keys=False)