empathy-framework 4.6.2__py3-none-any.whl → 4.6.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. {empathy_framework-4.6.2.dist-info → empathy_framework-4.6.3.dist-info}/METADATA +1 -1
  2. {empathy_framework-4.6.2.dist-info → empathy_framework-4.6.3.dist-info}/RECORD +53 -20
  3. {empathy_framework-4.6.2.dist-info → empathy_framework-4.6.3.dist-info}/WHEEL +1 -1
  4. empathy_os/__init__.py +1 -1
  5. empathy_os/cli.py +361 -32
  6. empathy_os/config/xml_config.py +8 -3
  7. empathy_os/core.py +37 -4
  8. empathy_os/leverage_points.py +2 -1
  9. empathy_os/memory/short_term.py +45 -1
  10. empathy_os/meta_workflows/agent_creator 2.py +254 -0
  11. empathy_os/meta_workflows/builtin_templates 2.py +567 -0
  12. empathy_os/meta_workflows/cli_meta_workflows 2.py +1551 -0
  13. empathy_os/meta_workflows/form_engine 2.py +304 -0
  14. empathy_os/meta_workflows/intent_detector 2.py +298 -0
  15. empathy_os/meta_workflows/pattern_learner 2.py +754 -0
  16. empathy_os/meta_workflows/session_context 2.py +398 -0
  17. empathy_os/meta_workflows/template_registry 2.py +229 -0
  18. empathy_os/meta_workflows/workflow 2.py +980 -0
  19. empathy_os/models/token_estimator.py +16 -9
  20. empathy_os/models/validation.py +7 -1
  21. empathy_os/orchestration/pattern_learner 2.py +699 -0
  22. empathy_os/orchestration/real_tools 2.py +938 -0
  23. empathy_os/orchestration/real_tools.py +4 -2
  24. empathy_os/socratic/__init__ 2.py +273 -0
  25. empathy_os/socratic/ab_testing 2.py +969 -0
  26. empathy_os/socratic/blueprint 2.py +532 -0
  27. empathy_os/socratic/cli 2.py +689 -0
  28. empathy_os/socratic/collaboration 2.py +1112 -0
  29. empathy_os/socratic/domain_templates 2.py +916 -0
  30. empathy_os/socratic/embeddings 2.py +734 -0
  31. empathy_os/socratic/engine 2.py +729 -0
  32. empathy_os/socratic/explainer 2.py +663 -0
  33. empathy_os/socratic/feedback 2.py +767 -0
  34. empathy_os/socratic/forms 2.py +624 -0
  35. empathy_os/socratic/generator 2.py +716 -0
  36. empathy_os/socratic/llm_analyzer 2.py +635 -0
  37. empathy_os/socratic/mcp_server 2.py +751 -0
  38. empathy_os/socratic/session 2.py +306 -0
  39. empathy_os/socratic/storage 2.py +635 -0
  40. empathy_os/socratic/storage.py +2 -1
  41. empathy_os/socratic/success 2.py +719 -0
  42. empathy_os/socratic/visual_editor 2.py +812 -0
  43. empathy_os/socratic/web_ui 2.py +925 -0
  44. empathy_os/tier_recommender.py +5 -2
  45. empathy_os/workflow_commands.py +11 -6
  46. empathy_os/workflows/base.py +1 -1
  47. empathy_os/workflows/batch_processing 2.py +310 -0
  48. empathy_os/workflows/release_prep_crew 2.py +968 -0
  49. empathy_os/workflows/test_coverage_boost_crew 2.py +848 -0
  50. empathy_os/workflows/test_maintenance.py +3 -2
  51. {empathy_framework-4.6.2.dist-info → empathy_framework-4.6.3.dist-info}/entry_points.txt +0 -0
  52. {empathy_framework-4.6.2.dist-info → empathy_framework-4.6.3.dist-info}/licenses/LICENSE +0 -0
  53. {empathy_framework-4.6.2.dist-info → empathy_framework-4.6.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,699 @@
1
+ """Pattern Learning System - Grammar that evolves from experience.
2
+
3
+ This module implements the learning grammar that tracks pattern success
4
+ and recommends optimal compositions based on historical data.
5
+
6
+ Features:
7
+ - Track success metrics for each pattern execution
8
+ - Memory + file storage for fast access and persistence
9
+ - Hybrid recommendation: similarity matching → statistical fallback
10
+
11
+ Security:
12
+ - No eval() or exec() usage
13
+ - File paths validated before writing
14
+ - JSON serialization only (no pickle)
15
+ """
16
+
17
+ import json
18
+ import logging
19
+ from collections import defaultdict
20
+ from dataclasses import asdict, dataclass, field
21
+ from datetime import datetime
22
+ from pathlib import Path
23
+ from typing import Any
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ # =============================================================================
29
+ # Data Models
30
+ # =============================================================================
31
+
32
+
33
+ @dataclass
34
+ class ExecutionRecord:
35
+ """Record of a single pattern execution.
36
+
37
+ Captures the essential metrics for learning.
38
+
39
+ Attributes:
40
+ pattern: Pattern/strategy name used
41
+ success: Whether execution succeeded
42
+ duration_seconds: Execution time
43
+ cost: Estimated cost (tokens * rate)
44
+ confidence: Aggregate confidence score
45
+ context_features: Key features of the execution context
46
+ timestamp: When the execution occurred
47
+ """
48
+
49
+ pattern: str
50
+ success: bool
51
+ duration_seconds: float
52
+ cost: float = 0.0
53
+ confidence: float = 0.0
54
+ context_features: dict[str, Any] = field(default_factory=dict)
55
+ timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
56
+
57
+ def to_dict(self) -> dict[str, Any]:
58
+ """Convert to dictionary for serialization."""
59
+ return asdict(self)
60
+
61
+ @classmethod
62
+ def from_dict(cls, data: dict[str, Any]) -> "ExecutionRecord":
63
+ """Create from dictionary."""
64
+ return cls(**data)
65
+
66
+
67
+ @dataclass
68
+ class PatternStats:
69
+ """Aggregated statistics for a pattern.
70
+
71
+ Attributes:
72
+ pattern: Pattern/strategy name
73
+ total_executions: Number of times executed
74
+ success_count: Number of successful executions
75
+ total_duration: Sum of all execution durations
76
+ total_cost: Sum of all execution costs
77
+ avg_confidence: Average confidence across executions
78
+ """
79
+
80
+ pattern: str
81
+ total_executions: int = 0
82
+ success_count: int = 0
83
+ total_duration: float = 0.0
84
+ total_cost: float = 0.0
85
+ avg_confidence: float = 0.0
86
+
87
+ @property
88
+ def success_rate(self) -> float:
89
+ """Calculate success rate (0.0 - 1.0)."""
90
+ if self.total_executions == 0:
91
+ return 0.0
92
+ return self.success_count / self.total_executions
93
+
94
+ @property
95
+ def avg_duration(self) -> float:
96
+ """Calculate average execution duration."""
97
+ if self.total_executions == 0:
98
+ return 0.0
99
+ return self.total_duration / self.total_executions
100
+
101
+ @property
102
+ def avg_cost(self) -> float:
103
+ """Calculate average execution cost."""
104
+ if self.total_executions == 0:
105
+ return 0.0
106
+ return self.total_cost / self.total_executions
107
+
108
+ def update(self, record: ExecutionRecord) -> None:
109
+ """Update stats with a new execution record.
110
+
111
+ Args:
112
+ record: Execution record to incorporate
113
+ """
114
+ self.total_executions += 1
115
+ if record.success:
116
+ self.success_count += 1
117
+ self.total_duration += record.duration_seconds
118
+ self.total_cost += record.cost
119
+
120
+ # Running average for confidence
121
+ n = self.total_executions
122
+ self.avg_confidence = (
123
+ (self.avg_confidence * (n - 1) + record.confidence) / n
124
+ )
125
+
126
+ def to_dict(self) -> dict[str, Any]:
127
+ """Convert to dictionary for serialization."""
128
+ return {
129
+ "pattern": self.pattern,
130
+ "total_executions": self.total_executions,
131
+ "success_count": self.success_count,
132
+ "total_duration": self.total_duration,
133
+ "total_cost": self.total_cost,
134
+ "avg_confidence": self.avg_confidence,
135
+ # Computed properties
136
+ "success_rate": self.success_rate,
137
+ "avg_duration": self.avg_duration,
138
+ "avg_cost": self.avg_cost,
139
+ }
140
+
141
+ @classmethod
142
+ def from_dict(cls, data: dict[str, Any]) -> "PatternStats":
143
+ """Create from dictionary."""
144
+ return cls(
145
+ pattern=data["pattern"],
146
+ total_executions=data.get("total_executions", 0),
147
+ success_count=data.get("success_count", 0),
148
+ total_duration=data.get("total_duration", 0.0),
149
+ total_cost=data.get("total_cost", 0.0),
150
+ avg_confidence=data.get("avg_confidence", 0.0),
151
+ )
152
+
153
+
154
+ @dataclass
155
+ class ContextSignature:
156
+ """Signature of a context for similarity matching.
157
+
158
+ Extracts key features from execution context for comparison.
159
+
160
+ Attributes:
161
+ task_type: Type of task (e.g., "code_review", "test_gen")
162
+ agent_count: Number of agents involved
163
+ has_conditions: Whether conditionals were used
164
+ has_nesting: Whether nested workflows were used
165
+ priority: Task priority level
166
+ """
167
+
168
+ task_type: str = ""
169
+ agent_count: int = 0
170
+ has_conditions: bool = False
171
+ has_nesting: bool = False
172
+ priority: str = "normal"
173
+
174
+ @classmethod
175
+ def from_context(cls, context: dict[str, Any]) -> "ContextSignature":
176
+ """Extract signature from execution context.
177
+
178
+ Args:
179
+ context: Execution context dictionary
180
+
181
+ Returns:
182
+ ContextSignature with extracted features
183
+ """
184
+ return cls(
185
+ task_type=context.get("task_type", context.get("_task_type", "")),
186
+ agent_count=len(context.get("agents", [])),
187
+ has_conditions="_conditional" in context,
188
+ has_nesting="_nesting" in context,
189
+ priority=context.get("priority", "normal"),
190
+ )
191
+
192
+ def similarity(self, other: "ContextSignature") -> float:
193
+ """Calculate similarity score with another signature.
194
+
195
+ Args:
196
+ other: Signature to compare with
197
+
198
+ Returns:
199
+ Similarity score (0.0 - 1.0)
200
+ """
201
+ score = 0.0
202
+ max_score = 0.0
203
+
204
+ # Task type match (highest weight)
205
+ max_score += 3.0
206
+ if self.task_type and other.task_type:
207
+ if self.task_type == other.task_type:
208
+ score += 3.0
209
+ elif self.task_type.split("_")[0] == other.task_type.split("_")[0]:
210
+ score += 1.5 # Partial match
211
+
212
+ # Agent count similarity
213
+ max_score += 1.0
214
+ if self.agent_count > 0 and other.agent_count > 0:
215
+ ratio = min(self.agent_count, other.agent_count) / max(
216
+ self.agent_count, other.agent_count
217
+ )
218
+ score += ratio
219
+
220
+ # Boolean features
221
+ max_score += 2.0
222
+ if self.has_conditions == other.has_conditions:
223
+ score += 1.0
224
+ if self.has_nesting == other.has_nesting:
225
+ score += 1.0
226
+
227
+ # Priority match
228
+ max_score += 1.0
229
+ if self.priority == other.priority:
230
+ score += 1.0
231
+
232
+ return score / max_score if max_score > 0 else 0.0
233
+
234
+ def to_dict(self) -> dict[str, Any]:
235
+ """Convert to dictionary."""
236
+ return asdict(self)
237
+
238
+ @classmethod
239
+ def from_dict(cls, data: dict[str, Any]) -> "ContextSignature":
240
+ """Create from dictionary."""
241
+ return cls(**data)
242
+
243
+
244
+ # =============================================================================
245
+ # Storage Layer
246
+ # =============================================================================
247
+
248
+
249
+ class LearningStore:
250
+ """Memory + file storage for learning data.
251
+
252
+ Maintains an in-memory cache for fast access with
253
+ periodic persistence to a JSON file.
254
+
255
+ Attributes:
256
+ file_path: Path to persistence file
257
+ _records: In-memory execution records
258
+ _stats: In-memory pattern statistics
259
+ _dirty: Whether in-memory data needs saving
260
+ """
261
+
262
+ DEFAULT_FILE = "patterns/learning_memory.json"
263
+
264
+ def __init__(self, file_path: str | None = None):
265
+ """Initialize learning store.
266
+
267
+ Args:
268
+ file_path: Path to persistence file (default: patterns/learning_memory.json)
269
+ """
270
+ self.file_path = Path(file_path or self.DEFAULT_FILE)
271
+ self._records: list[ExecutionRecord] = []
272
+ self._stats: dict[str, PatternStats] = {}
273
+ self._context_index: dict[str, list[int]] = defaultdict(list)
274
+ self._dirty = False
275
+
276
+ # Load existing data if available
277
+ self._load()
278
+
279
+ def _load(self) -> None:
280
+ """Load data from file if it exists."""
281
+ if not self.file_path.exists():
282
+ logger.info(f"No existing learning data at {self.file_path}")
283
+ return
284
+
285
+ try:
286
+ with self.file_path.open("r") as f:
287
+ data = json.load(f)
288
+
289
+ # Load records
290
+ self._records = [
291
+ ExecutionRecord.from_dict(r) for r in data.get("records", [])
292
+ ]
293
+
294
+ # Load stats
295
+ self._stats = {
296
+ s["pattern"]: PatternStats.from_dict(s)
297
+ for s in data.get("stats", [])
298
+ }
299
+
300
+ # Rebuild context index
301
+ for i, record in enumerate(self._records):
302
+ sig = ContextSignature(
303
+ task_type=record.context_features.get("task_type", "")
304
+ )
305
+ self._context_index[sig.task_type].append(i)
306
+
307
+ logger.info(
308
+ f"Loaded {len(self._records)} records, "
309
+ f"{len(self._stats)} pattern stats from {self.file_path}"
310
+ )
311
+ except json.JSONDecodeError as e:
312
+ logger.error(f"Failed to parse learning data: {e}")
313
+ except Exception as e:
314
+ logger.exception(f"Failed to load learning data: {e}")
315
+
316
+ def save(self) -> None:
317
+ """Save data to file."""
318
+ if not self._dirty:
319
+ return
320
+
321
+ # Ensure directory exists
322
+ self.file_path.parent.mkdir(parents=True, exist_ok=True)
323
+
324
+ data = {
325
+ "records": [r.to_dict() for r in self._records],
326
+ "stats": [s.to_dict() for s in self._stats.values()],
327
+ "metadata": {
328
+ "saved_at": datetime.now().isoformat(),
329
+ "total_records": len(self._records),
330
+ "patterns_tracked": len(self._stats),
331
+ },
332
+ }
333
+
334
+ try:
335
+ with self.file_path.open("w") as f:
336
+ json.dump(data, f, indent=2)
337
+ self._dirty = False
338
+ logger.info(f"Saved learning data to {self.file_path}")
339
+ except Exception as e:
340
+ logger.exception(f"Failed to save learning data: {e}")
341
+
342
+ def add_record(self, record: ExecutionRecord) -> None:
343
+ """Add an execution record.
344
+
345
+ Args:
346
+ record: Record to add
347
+ """
348
+ self._records.append(record)
349
+
350
+ # Update stats
351
+ if record.pattern not in self._stats:
352
+ self._stats[record.pattern] = PatternStats(pattern=record.pattern)
353
+ self._stats[record.pattern].update(record)
354
+
355
+ # Update context index
356
+ task_type = record.context_features.get("task_type", "")
357
+ self._context_index[task_type].append(len(self._records) - 1)
358
+
359
+ self._dirty = True
360
+
361
+ # Auto-save periodically
362
+ if len(self._records) % 10 == 0:
363
+ self.save()
364
+
365
+ def get_stats(self, pattern: str) -> PatternStats | None:
366
+ """Get statistics for a pattern.
367
+
368
+ Args:
369
+ pattern: Pattern name
370
+
371
+ Returns:
372
+ PatternStats or None if not tracked
373
+ """
374
+ return self._stats.get(pattern)
375
+
376
+ def get_all_stats(self) -> list[PatternStats]:
377
+ """Get all pattern statistics.
378
+
379
+ Returns:
380
+ List of PatternStats sorted by success rate
381
+ """
382
+ return sorted(
383
+ self._stats.values(),
384
+ key=lambda s: s.success_rate,
385
+ reverse=True,
386
+ )
387
+
388
+ def find_similar_records(
389
+ self, signature: ContextSignature, limit: int = 10
390
+ ) -> list[tuple[ExecutionRecord, float]]:
391
+ """Find records with similar context.
392
+
393
+ Args:
394
+ signature: Context signature to match
395
+ limit: Maximum records to return
396
+
397
+ Returns:
398
+ List of (record, similarity_score) tuples
399
+ """
400
+ scored: list[tuple[ExecutionRecord, float]] = []
401
+
402
+ for record in self._records:
403
+ record_sig = ContextSignature(
404
+ task_type=record.context_features.get("task_type", ""),
405
+ agent_count=record.context_features.get("agent_count", 0),
406
+ has_conditions=record.context_features.get("has_conditions", False),
407
+ has_nesting=record.context_features.get("has_nesting", False),
408
+ priority=record.context_features.get("priority", "normal"),
409
+ )
410
+ score = signature.similarity(record_sig)
411
+ if score > 0.3: # Minimum threshold
412
+ scored.append((record, score))
413
+
414
+ # Sort by similarity and return top results
415
+ scored.sort(key=lambda x: x[1], reverse=True)
416
+ return scored[:limit]
417
+
418
+
419
+ # =============================================================================
420
+ # Recommendation Engine
421
+ # =============================================================================
422
+
423
+
424
+ @dataclass
425
+ class PatternRecommendation:
426
+ """A pattern recommendation.
427
+
428
+ Attributes:
429
+ pattern: Recommended pattern name
430
+ confidence: Confidence in recommendation (0.0 - 1.0)
431
+ reason: Why this pattern was recommended
432
+ expected_success_rate: Predicted success rate
433
+ expected_duration: Predicted duration
434
+ """
435
+
436
+ pattern: str
437
+ confidence: float
438
+ reason: str
439
+ expected_success_rate: float = 0.0
440
+ expected_duration: float = 0.0
441
+
442
+
443
+ class PatternRecommender:
444
+ """Hybrid recommendation engine for patterns.
445
+
446
+ Uses similarity matching first, falls back to statistical ranking.
447
+ """
448
+
449
+ def __init__(self, store: LearningStore):
450
+ """Initialize recommender.
451
+
452
+ Args:
453
+ store: Learning store with historical data
454
+ """
455
+ self.store = store
456
+
457
+ def recommend(
458
+ self, context: dict[str, Any], top_k: int = 3
459
+ ) -> list[PatternRecommendation]:
460
+ """Recommend patterns for a context.
461
+
462
+ Uses hybrid approach:
463
+ 1. Find similar past contexts
464
+ 2. Recommend patterns that worked for them
465
+ 3. Fall back to overall statistics if no matches
466
+
467
+ Args:
468
+ context: Current execution context
469
+ top_k: Number of recommendations to return
470
+
471
+ Returns:
472
+ List of PatternRecommendation
473
+ """
474
+ signature = ContextSignature.from_context(context)
475
+ recommendations: list[PatternRecommendation] = []
476
+
477
+ # Phase 1: Similarity matching
478
+ similar = self.store.find_similar_records(signature, limit=20)
479
+ if similar:
480
+ recommendations = self._recommend_from_similar(similar, top_k)
481
+
482
+ # Phase 2: Statistical fallback
483
+ if len(recommendations) < top_k:
484
+ statistical = self._recommend_statistical(top_k - len(recommendations))
485
+ recommendations.extend(statistical)
486
+
487
+ return recommendations[:top_k]
488
+
489
+ def _recommend_from_similar(
490
+ self, similar: list[tuple[ExecutionRecord, float]], top_k: int
491
+ ) -> list[PatternRecommendation]:
492
+ """Generate recommendations from similar records.
493
+
494
+ Args:
495
+ similar: List of (record, similarity) tuples
496
+ top_k: Number of recommendations
497
+
498
+ Returns:
499
+ List of recommendations
500
+ """
501
+ # Aggregate by pattern
502
+ pattern_scores: dict[str, dict[str, Any]] = defaultdict(
503
+ lambda: {"total_similarity": 0, "success_similarity": 0, "count": 0}
504
+ )
505
+
506
+ for record, similarity in similar:
507
+ pattern_scores[record.pattern]["count"] += 1
508
+ pattern_scores[record.pattern]["total_similarity"] += similarity
509
+ if record.success:
510
+ pattern_scores[record.pattern]["success_similarity"] += similarity
511
+
512
+ # Calculate weighted success rate
513
+ recommendations = []
514
+ for pattern, scores in pattern_scores.items():
515
+ if scores["total_similarity"] > 0:
516
+ weighted_success = (
517
+ scores["success_similarity"] / scores["total_similarity"]
518
+ )
519
+ stats = self.store.get_stats(pattern)
520
+
521
+ recommendations.append(
522
+ PatternRecommendation(
523
+ pattern=pattern,
524
+ confidence=min(weighted_success, 0.95),
525
+ reason=f"Worked in {scores['count']} similar contexts",
526
+ expected_success_rate=stats.success_rate if stats else 0,
527
+ expected_duration=stats.avg_duration if stats else 0,
528
+ )
529
+ )
530
+
531
+ # Sort by confidence
532
+ recommendations.sort(key=lambda r: r.confidence, reverse=True)
533
+ return recommendations[:top_k]
534
+
535
+ def _recommend_statistical(self, top_k: int) -> list[PatternRecommendation]:
536
+ """Generate recommendations from overall statistics.
537
+
538
+ Args:
539
+ top_k: Number of recommendations
540
+
541
+ Returns:
542
+ List of recommendations based on global stats
543
+ """
544
+ all_stats = self.store.get_all_stats()
545
+ recommendations = []
546
+
547
+ for stats in all_stats[:top_k]:
548
+ if stats.total_executions >= 3: # Minimum sample size
549
+ recommendations.append(
550
+ PatternRecommendation(
551
+ pattern=stats.pattern,
552
+ confidence=stats.success_rate * 0.8, # Slight penalty
553
+ reason=f"High overall success rate ({stats.success_rate:.0%})",
554
+ expected_success_rate=stats.success_rate,
555
+ expected_duration=stats.avg_duration,
556
+ )
557
+ )
558
+
559
+ return recommendations
560
+
561
+
562
+ # =============================================================================
563
+ # Main Interface
564
+ # =============================================================================
565
+
566
+
567
+ class PatternLearner:
568
+ """Main interface for the learning grammar system.
569
+
570
+ Provides a simple API for recording executions and getting recommendations.
571
+
572
+ Example:
573
+ >>> learner = PatternLearner()
574
+ >>> # Record an execution
575
+ >>> learner.record(
576
+ ... pattern="sequential",
577
+ ... success=True,
578
+ ... duration=2.5,
579
+ ... cost=0.05,
580
+ ... context={"task_type": "code_review"}
581
+ ... )
582
+ >>> # Get recommendations
583
+ >>> recs = learner.recommend({"task_type": "code_review"})
584
+ >>> print(recs[0].pattern, recs[0].confidence)
585
+ """
586
+
587
+ def __init__(self, storage_path: str | None = None):
588
+ """Initialize pattern learner.
589
+
590
+ Args:
591
+ storage_path: Path for persistence (default: patterns/learning_memory.json)
592
+ """
593
+ self.store = LearningStore(storage_path)
594
+ self.recommender = PatternRecommender(self.store)
595
+
596
+ def record(
597
+ self,
598
+ pattern: str,
599
+ success: bool,
600
+ duration: float,
601
+ cost: float = 0.0,
602
+ confidence: float = 0.0,
603
+ context: dict[str, Any] | None = None,
604
+ ) -> None:
605
+ """Record a pattern execution.
606
+
607
+ Args:
608
+ pattern: Pattern/strategy name
609
+ success: Whether execution succeeded
610
+ duration: Execution duration in seconds
611
+ cost: Estimated cost
612
+ confidence: Aggregate confidence score
613
+ context: Execution context (for similarity matching)
614
+ """
615
+ record = ExecutionRecord(
616
+ pattern=pattern,
617
+ success=success,
618
+ duration_seconds=duration,
619
+ cost=cost,
620
+ confidence=confidence,
621
+ context_features=context or {},
622
+ )
623
+ self.store.add_record(record)
624
+ logger.debug(f"Recorded {pattern} execution: success={success}")
625
+
626
+ def recommend(
627
+ self, context: dict[str, Any], top_k: int = 3
628
+ ) -> list[PatternRecommendation]:
629
+ """Get pattern recommendations for a context.
630
+
631
+ Args:
632
+ context: Execution context
633
+ top_k: Number of recommendations
634
+
635
+ Returns:
636
+ List of PatternRecommendation
637
+ """
638
+ return self.recommender.recommend(context, top_k)
639
+
640
+ def get_stats(self, pattern: str) -> PatternStats | None:
641
+ """Get statistics for a specific pattern.
642
+
643
+ Args:
644
+ pattern: Pattern name
645
+
646
+ Returns:
647
+ PatternStats or None
648
+ """
649
+ return self.store.get_stats(pattern)
650
+
651
+ def get_all_stats(self) -> list[PatternStats]:
652
+ """Get statistics for all patterns.
653
+
654
+ Returns:
655
+ List of PatternStats sorted by success rate
656
+ """
657
+ return self.store.get_all_stats()
658
+
659
+ def save(self) -> None:
660
+ """Force save to disk."""
661
+ self.store.save()
662
+
663
+ def report(self) -> str:
664
+ """Generate a human-readable report of learning data.
665
+
666
+ Returns:
667
+ Formatted report string
668
+ """
669
+ stats = self.get_all_stats()
670
+ if not stats:
671
+ return "No learning data recorded yet."
672
+
673
+ lines = ["Pattern Learning Report", "=" * 50, ""]
674
+
675
+ for s in stats:
676
+ lines.append(f"Pattern: {s.pattern}")
677
+ lines.append(f" Executions: {s.total_executions}")
678
+ lines.append(f" Success Rate: {s.success_rate:.1%}")
679
+ lines.append(f" Avg Duration: {s.avg_duration:.2f}s")
680
+ lines.append(f" Avg Cost: ${s.avg_cost:.4f}")
681
+ lines.append("")
682
+
683
+ return "\n".join(lines)
684
+
685
+
686
+ # Module-level singleton for convenience
687
+ _default_learner: PatternLearner | None = None
688
+
689
+
690
+ def get_learner() -> PatternLearner:
691
+ """Get the default pattern learner instance.
692
+
693
+ Returns:
694
+ PatternLearner singleton
695
+ """
696
+ global _default_learner
697
+ if _default_learner is None:
698
+ _default_learner = PatternLearner()
699
+ return _default_learner