nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,594 @@
1
+ """
2
+ Layer 5: Anticipation Engine
3
+
4
+ Predicts user needs before they ask:
5
+ - Workflow prediction based on history
6
+ - Proactive suggestions (next files to edit, tests to run)
7
+ - Context pre-loading for faster responses
8
+ - Issue prediction (potential bugs, conflicts)
9
+ - Smart defaults based on patterns
10
+
11
+ This layer answers: "What will the user probably need next?"
12
+ """
13
+
14
+ import logging
15
+ from dataclasses import dataclass, field
16
+ from typing import Dict, List, Optional, Any, Tuple
17
+ from datetime import datetime, timedelta
18
+ from enum import Enum
19
+ from collections import Counter, defaultdict
20
+ import threading
21
+ import heapq
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class SuggestionType(Enum):
27
+ """Types of proactive suggestions"""
28
+ NEXT_FILE = "next_file" # Files user might edit next
29
+ RUN_TESTS = "run_tests" # Suggest running tests
30
+ GIT_COMMIT = "git_commit" # Suggest committing changes
31
+ FIX_ERROR = "fix_error" # Potential error to address
32
+ DOCUMENTATION = "documentation" # Suggest adding docs
33
+ REFACTOR = "refactor" # Suggest refactoring
34
+ SECURITY = "security" # Security check suggestion
35
+ PERFORMANCE = "performance" # Performance improvement
36
+ CONTEXT_PRELOAD = "context_preload" # Pre-load relevant context
37
+ WORKFLOW_STEP = "workflow_step" # Next step in workflow
38
+
39
+
40
+ @dataclass
41
+ class Suggestion:
42
+ """A proactive suggestion"""
43
+ suggestion_type: SuggestionType
44
+ title: str
45
+ description: str
46
+ confidence: float # 0.0 to 1.0
47
+ priority: int = 0 # Higher = more important
48
+ action: Optional[str] = None # Command or action to take
49
+ metadata: Dict[str, Any] = field(default_factory=dict)
50
+ created_at: datetime = field(default_factory=datetime.now)
51
+ expires_at: Optional[datetime] = None # When suggestion becomes stale
52
+
53
+ def is_valid(self) -> bool:
54
+ """Check if suggestion is still valid"""
55
+ if self.expires_at and datetime.now() > self.expires_at:
56
+ return False
57
+ return True
58
+
59
+
60
+ @dataclass
61
+ class WorkflowPattern:
62
+ """A detected workflow pattern"""
63
+ name: str
64
+ steps: List[str] # Sequence of actions
65
+ frequency: int # How often this pattern occurs
66
+ last_seen: datetime = field(default_factory=datetime.now)
67
+ confidence: float = 0.5
68
+
69
+
70
+ @dataclass
71
+ class PredictionContext:
72
+ """Context for making predictions"""
73
+ current_file: Optional[str] = None
74
+ recent_files: List[str] = field(default_factory=list)
75
+ recent_tasks: List[str] = field(default_factory=list)
76
+ recent_errors: List[str] = field(default_factory=list)
77
+ uncommitted_changes: bool = False
78
+ tests_passing: Optional[bool] = None
79
+ time_of_day: int = 12 # Hour
80
+ session_duration_minutes: int = 0
81
+
82
+
83
+ class WorkflowPredictor:
84
+ """Predicts workflow patterns and next actions"""
85
+
86
+ def __init__(self, min_pattern_frequency: int = 2):
87
+ self.min_pattern_frequency = min_pattern_frequency
88
+ self._action_sequences: List[List[str]] = []
89
+ self._patterns: Dict[str, WorkflowPattern] = {}
90
+ self._current_sequence: List[str] = []
91
+ self._lock = threading.Lock()
92
+
93
+ def record_action(self, action: str) -> None:
94
+ """Record an action in the current sequence"""
95
+ with self._lock:
96
+ self._current_sequence.append(action)
97
+ # Keep sequence length manageable
98
+ if len(self._current_sequence) > 20:
99
+ self._current_sequence = self._current_sequence[-20:]
100
+
101
+ def end_sequence(self) -> None:
102
+ """End the current action sequence"""
103
+ with self._lock:
104
+ if len(self._current_sequence) >= 2:
105
+ self._action_sequences.append(self._current_sequence.copy())
106
+ # Keep last 100 sequences
107
+ if len(self._action_sequences) > 100:
108
+ self._action_sequences = self._action_sequences[-100:]
109
+ self._current_sequence = []
110
+
111
+ def detect_patterns(self) -> List[WorkflowPattern]:
112
+ """Detect repeated patterns in action sequences"""
113
+ patterns = []
114
+
115
+ with self._lock:
116
+ # Find common subsequences
117
+ subsequence_counts: Dict[tuple, int] = defaultdict(int)
118
+
119
+ for sequence in self._action_sequences:
120
+ # Look for patterns of length 2-5
121
+ for length in range(2, min(6, len(sequence) + 1)):
122
+ for i in range(len(sequence) - length + 1):
123
+ subseq = tuple(sequence[i:i + length])
124
+ subsequence_counts[subseq] += 1
125
+
126
+ # Create patterns from frequent subsequences
127
+ for subseq, count in subsequence_counts.items():
128
+ if count >= self.min_pattern_frequency:
129
+ pattern_name = f"pattern_{'_'.join(subseq[:2])}"
130
+ patterns.append(WorkflowPattern(
131
+ name=pattern_name,
132
+ steps=list(subseq),
133
+ frequency=count,
134
+ confidence=min(1.0, count / 10),
135
+ ))
136
+
137
+ # Store patterns
138
+ for pattern in patterns:
139
+ self._patterns[pattern.name] = pattern
140
+
141
+ return patterns
142
+
143
+ def predict_next_action(self, current_action: str) -> List[Tuple[str, float]]:
144
+ """Predict the next likely action given the current one"""
145
+ predictions = []
146
+
147
+ with self._lock:
148
+ # Look at the current sequence + all patterns
149
+ next_action_counts: Counter = Counter()
150
+
151
+ # From patterns
152
+ for pattern in self._patterns.values():
153
+ for i, step in enumerate(pattern.steps[:-1]):
154
+ if step == current_action:
155
+ next_action = pattern.steps[i + 1]
156
+ next_action_counts[next_action] += pattern.frequency
157
+
158
+ # From recent sequences
159
+ for sequence in self._action_sequences[-20:]:
160
+ for i, action in enumerate(sequence[:-1]):
161
+ if action == current_action:
162
+ next_action_counts[sequence[i + 1]] += 1
163
+
164
+ # Convert to probabilities
165
+ total = sum(next_action_counts.values())
166
+ if total > 0:
167
+ predictions = [
168
+ (action, count / total)
169
+ for action, count in next_action_counts.most_common(5)
170
+ ]
171
+
172
+ return predictions
173
+
174
+
175
+ class FilePredictor:
176
+ """Predicts which files user will work with next"""
177
+
178
+ def __init__(self):
179
+ self._file_transitions: Dict[str, Counter] = defaultdict(Counter)
180
+ self._file_access_times: Dict[str, datetime] = {}
181
+ self._file_clusters: Dict[str, set] = defaultdict(set) # Files often edited together
182
+ self._lock = threading.Lock()
183
+
184
+ def record_file_access(self, file_path: str, previous_file: Optional[str] = None) -> None:
185
+ """Record a file access"""
186
+ with self._lock:
187
+ self._file_access_times[file_path] = datetime.now()
188
+
189
+ if previous_file:
190
+ # Record transition
191
+ self._file_transitions[previous_file][file_path] += 1
192
+ # Record cluster (files edited in same session)
193
+ self._file_clusters[previous_file].add(file_path)
194
+ self._file_clusters[file_path].add(previous_file)
195
+
196
+ def predict_next_files(
197
+ self,
198
+ current_file: str,
199
+ limit: int = 5
200
+ ) -> List[Tuple[str, float]]:
201
+ """Predict next files to be accessed"""
202
+ predictions = []
203
+
204
+ with self._lock:
205
+ # From transitions
206
+ if current_file in self._file_transitions:
207
+ transitions = self._file_transitions[current_file]
208
+ total = sum(transitions.values())
209
+ for file_path, count in transitions.most_common(limit):
210
+ predictions.append((file_path, count / total))
211
+
212
+ # From clusters (if not enough from transitions)
213
+ if len(predictions) < limit and current_file in self._file_clusters:
214
+ cluster = self._file_clusters[current_file]
215
+ for file_path in list(cluster)[:limit - len(predictions)]:
216
+ if file_path not in [p[0] for p in predictions]:
217
+ predictions.append((file_path, 0.3)) # Lower confidence
218
+
219
+ return predictions[:limit]
220
+
221
+ def get_related_files(self, file_path: str) -> List[str]:
222
+ """Get files related to the given file"""
223
+ with self._lock:
224
+ related = set()
225
+
226
+ # From clusters
227
+ if file_path in self._file_clusters:
228
+ related.update(self._file_clusters[file_path])
229
+
230
+ # From transitions (both directions)
231
+ if file_path in self._file_transitions:
232
+ related.update(self._file_transitions[file_path].keys())
233
+
234
+ for source, targets in self._file_transitions.items():
235
+ if file_path in targets:
236
+ related.add(source)
237
+
238
+ return list(related)[:10]
239
+
240
+
241
+ class IssuePredictor:
242
+ """Predicts potential issues and problems"""
243
+
244
+ # Patterns that often lead to bugs
245
+ BUG_PATTERNS = [
246
+ ("missing_error_handling", ["try", "except", "error"]),
247
+ ("missing_null_check", ["None", "null", "undefined"]),
248
+ ("hardcoded_values", ["localhost", "127.0.0.1", "password"]),
249
+ ("missing_tests", ["def ", "class ", "test"]),
250
+ ]
251
+
252
+ def predict_issues(
253
+ self,
254
+ file_path: Optional[str] = None,
255
+ recent_changes: Optional[List[str]] = None,
256
+ context: Optional[PredictionContext] = None
257
+ ) -> List[Suggestion]:
258
+ """Predict potential issues"""
259
+ issues = []
260
+
261
+ # Check for uncommitted changes
262
+ if context and context.uncommitted_changes:
263
+ issues.append(Suggestion(
264
+ suggestion_type=SuggestionType.GIT_COMMIT,
265
+ title="Uncommitted changes detected",
266
+ description="You have uncommitted changes. Consider committing or stashing them.",
267
+ confidence=0.8,
268
+ priority=2,
269
+ action="/git commit",
270
+ ))
271
+
272
+ # Check for test suggestions
273
+ if context and context.recent_tasks:
274
+ code_tasks = [t for t in context.recent_tasks if "code" in t.lower() or "function" in t.lower()]
275
+ if code_tasks and context.tests_passing is None:
276
+ issues.append(Suggestion(
277
+ suggestion_type=SuggestionType.RUN_TESTS,
278
+ title="Tests may need updating",
279
+ description="You've made code changes. Consider running tests.",
280
+ confidence=0.7,
281
+ priority=3,
282
+ action="/test",
283
+ ))
284
+
285
+ # Check for documentation suggestions
286
+ if context and len(context.recent_files) > 3:
287
+ issues.append(Suggestion(
288
+ suggestion_type=SuggestionType.DOCUMENTATION,
289
+ title="Consider updating documentation",
290
+ description=f"You've modified {len(context.recent_files)} files. Documentation may need updating.",
291
+ confidence=0.5,
292
+ priority=1,
293
+ ))
294
+
295
+ return issues
296
+
297
+
298
+ class SuggestionQueue:
299
+ """Priority queue for managing suggestions"""
300
+
301
+ def __init__(self, max_size: int = 50):
302
+ self.max_size = max_size
303
+ self._queue: List[Tuple[int, int, Suggestion]] = [] # (priority, id, suggestion)
304
+ self._counter = 0
305
+ self._lock = threading.Lock()
306
+
307
+ def add(self, suggestion: Suggestion) -> None:
308
+ """Add a suggestion to the queue"""
309
+ with self._lock:
310
+ # Use negative priority for max-heap behavior
311
+ heapq.heappush(
312
+ self._queue,
313
+ (-suggestion.priority, self._counter, suggestion)
314
+ )
315
+ self._counter += 1
316
+
317
+ # Keep queue size bounded
318
+ while len(self._queue) > self.max_size:
319
+ heapq.heappop(self._queue)
320
+
321
+ def get_top(self, n: int = 5) -> List[Suggestion]:
322
+ """Get top n suggestions"""
323
+ with self._lock:
324
+ # Filter valid suggestions
325
+ valid = [
326
+ (p, c, s) for p, c, s in self._queue
327
+ if s.is_valid()
328
+ ]
329
+
330
+ # Sort and return top n
331
+ valid.sort() # Already negative priority, so smallest first = highest priority
332
+ return [s for _, _, s in valid[:n]]
333
+
334
+ def clear_expired(self) -> int:
335
+ """Remove expired suggestions"""
336
+ with self._lock:
337
+ original_size = len(self._queue)
338
+ self._queue = [
339
+ (p, c, s) for p, c, s in self._queue
340
+ if s.is_valid()
341
+ ]
342
+ heapq.heapify(self._queue)
343
+ return original_size - len(self._queue)
344
+
345
+
346
+ class AnticipationEngine:
347
+ """
348
+ Layer 5: Anticipation Engine
349
+
350
+ Predicts user needs and provides proactive suggestions
351
+ to improve developer experience and productivity.
352
+ """
353
+
354
+ def __init__(
355
+ self,
356
+ learning_core: Optional[Any] = None,
357
+ context_engine: Optional[Any] = None
358
+ ):
359
+ """
360
+ Initialize the anticipation engine
361
+
362
+ Args:
363
+ learning_core: Reference to Layer 4 for pattern data
364
+ context_engine: Reference to Layer 2 for code context
365
+ """
366
+ self._learning_core = learning_core
367
+ self._context_engine = context_engine
368
+
369
+ # Predictors
370
+ self.workflow_predictor = WorkflowPredictor()
371
+ self.file_predictor = FilePredictor()
372
+ self.issue_predictor = IssuePredictor()
373
+
374
+ # Suggestion management
375
+ self.suggestion_queue = SuggestionQueue()
376
+
377
+ # State
378
+ self._current_context = PredictionContext()
379
+ self._lock = threading.Lock()
380
+
381
+ def update_context(
382
+ self,
383
+ current_file: Optional[str] = None,
384
+ recent_files: Optional[List[str]] = None,
385
+ current_task: Optional[str] = None,
386
+ error: Optional[str] = None,
387
+ uncommitted_changes: Optional[bool] = None,
388
+ tests_passing: Optional[bool] = None
389
+ ) -> None:
390
+ """Update the prediction context"""
391
+ with self._lock:
392
+ if current_file:
393
+ # Record file access for predictions
394
+ self.file_predictor.record_file_access(
395
+ current_file,
396
+ self._current_context.current_file
397
+ )
398
+ self._current_context.current_file = current_file
399
+
400
+ if recent_files is not None:
401
+ self._current_context.recent_files = recent_files[-20:]
402
+
403
+ if current_task:
404
+ self._current_context.recent_tasks.append(current_task)
405
+ self._current_context.recent_tasks = self._current_context.recent_tasks[-20:]
406
+ # Record for workflow prediction
407
+ self.workflow_predictor.record_action(current_task)
408
+
409
+ if error:
410
+ self._current_context.recent_errors.append(error)
411
+ self._current_context.recent_errors = self._current_context.recent_errors[-10:]
412
+
413
+ if uncommitted_changes is not None:
414
+ self._current_context.uncommitted_changes = uncommitted_changes
415
+
416
+ if tests_passing is not None:
417
+ self._current_context.tests_passing = tests_passing
418
+
419
+ self._current_context.time_of_day = datetime.now().hour
420
+
421
+ def predict_next_files(self, limit: int = 5) -> List[Tuple[str, float]]:
422
+ """Predict which files user will work with next"""
423
+ if self._current_context.current_file:
424
+ return self.file_predictor.predict_next_files(
425
+ self._current_context.current_file,
426
+ limit=limit
427
+ )
428
+ return []
429
+
430
+ def predict_next_actions(self) -> List[Tuple[str, float]]:
431
+ """Predict next likely actions"""
432
+ if self._current_context.recent_tasks:
433
+ current_task = self._current_context.recent_tasks[-1]
434
+ return self.workflow_predictor.predict_next_action(current_task)
435
+ return []
436
+
437
+ def get_suggestions(self, limit: int = 5) -> List[Suggestion]:
438
+ """Get top proactive suggestions"""
439
+ # Generate new suggestions based on current context
440
+ self._generate_suggestions()
441
+
442
+ # Clean up expired suggestions
443
+ self.suggestion_queue.clear_expired()
444
+
445
+ return self.suggestion_queue.get_top(limit)
446
+
447
+ def _generate_suggestions(self) -> None:
448
+ """Generate new suggestions based on current context"""
449
+ # File suggestions
450
+ next_files = self.predict_next_files(limit=3)
451
+ for file_path, confidence in next_files:
452
+ if confidence > 0.3:
453
+ self.suggestion_queue.add(Suggestion(
454
+ suggestion_type=SuggestionType.NEXT_FILE,
455
+ title=f"You might work on: {file_path.split('/')[-1]}",
456
+ description=f"Based on your workflow, you often edit this file next.",
457
+ confidence=confidence,
458
+ priority=int(confidence * 5),
459
+ action=f"/read {file_path}",
460
+ metadata={"file_path": file_path},
461
+ expires_at=datetime.now() + timedelta(minutes=30),
462
+ ))
463
+
464
+ # Issue suggestions
465
+ issues = self.issue_predictor.predict_issues(context=self._current_context)
466
+ for issue in issues:
467
+ self.suggestion_queue.add(issue)
468
+
469
+ # Workflow suggestions
470
+ next_actions = self.predict_next_actions()
471
+ for action, confidence in next_actions[:2]:
472
+ if confidence > 0.4:
473
+ self.suggestion_queue.add(Suggestion(
474
+ suggestion_type=SuggestionType.WORKFLOW_STEP,
475
+ title=f"Next step: {action}",
476
+ description="Based on your usual workflow pattern",
477
+ confidence=confidence,
478
+ priority=int(confidence * 4),
479
+ expires_at=datetime.now() + timedelta(minutes=15),
480
+ ))
481
+
482
+ # Context preload suggestions
483
+ if self._context_engine and self._current_context.current_file:
484
+ related = self.file_predictor.get_related_files(
485
+ self._current_context.current_file
486
+ )
487
+ if related:
488
+ self.suggestion_queue.add(Suggestion(
489
+ suggestion_type=SuggestionType.CONTEXT_PRELOAD,
490
+ title="Related files loaded",
491
+ description=f"Pre-loaded context for {len(related)} related files",
492
+ confidence=0.7,
493
+ priority=1,
494
+ metadata={"files": related[:5]},
495
+ expires_at=datetime.now() + timedelta(minutes=60),
496
+ ))
497
+
498
+ def get_smart_defaults(self, task_type: str) -> Dict[str, Any]:
499
+ """Get smart defaults based on learned patterns"""
500
+ defaults = {}
501
+
502
+ # Default model based on task type and user preference
503
+ if self._learning_core:
504
+ recommended_model = self._learning_core.get_recommended_model(task_type)
505
+ if recommended_model:
506
+ defaults["model"] = recommended_model
507
+
508
+ # Default based on time of day
509
+ hour = datetime.now().hour
510
+ if hour < 9 or hour > 21:
511
+ # Outside work hours - maybe quicker responses preferred
512
+ defaults["verbosity"] = "concise"
513
+ else:
514
+ defaults["verbosity"] = "normal"
515
+
516
+ # Task-specific defaults
517
+ if "debug" in task_type.lower():
518
+ defaults["show_reasoning"] = True
519
+ defaults["include_context"] = True
520
+ elif "test" in task_type.lower():
521
+ defaults["include_examples"] = True
522
+
523
+ return defaults
524
+
525
+ def preload_context(self) -> Dict[str, Any]:
526
+ """Preload context for faster responses"""
527
+ context = {
528
+ "files": [],
529
+ "patterns": [],
530
+ "suggestions": [],
531
+ }
532
+
533
+ # Get likely next files
534
+ next_files = self.predict_next_files(limit=3)
535
+ context["files"] = [f[0] for f in next_files]
536
+
537
+ # Get workflow patterns
538
+ patterns = self.workflow_predictor.detect_patterns()
539
+ context["patterns"] = [
540
+ {"name": p.name, "steps": p.steps, "confidence": p.confidence}
541
+ for p in patterns[:5]
542
+ ]
543
+
544
+ # Get current suggestions
545
+ context["suggestions"] = [
546
+ {
547
+ "type": s.suggestion_type.value,
548
+ "title": s.title,
549
+ "confidence": s.confidence,
550
+ }
551
+ for s in self.get_suggestions(limit=3)
552
+ ]
553
+
554
+ return context
555
+
556
+ def end_session(self) -> None:
557
+ """End the current session"""
558
+ self.workflow_predictor.end_sequence()
559
+
560
+ def get_anticipation_summary(self) -> Dict[str, Any]:
561
+ """Get a summary of anticipation state"""
562
+ return {
563
+ "current_file": self._current_context.current_file,
564
+ "recent_files_count": len(self._current_context.recent_files),
565
+ "recent_tasks_count": len(self._current_context.recent_tasks),
566
+ "workflow_patterns": len(self.workflow_predictor._patterns),
567
+ "pending_suggestions": len(self.suggestion_queue._queue),
568
+ "next_file_predictions": self.predict_next_files(limit=3),
569
+ "next_action_predictions": self.predict_next_actions()[:3],
570
+ }
571
+
572
+
573
+ # Convenience functions
574
+ _anticipation_engine: Optional[AnticipationEngine] = None
575
+
576
+
577
+ def get_anticipation_engine(
578
+ learning_core: Optional[Any] = None,
579
+ context_engine: Optional[Any] = None
580
+ ) -> AnticipationEngine:
581
+ """Get or create the anticipation engine instance"""
582
+ global _anticipation_engine
583
+ if _anticipation_engine is None:
584
+ _anticipation_engine = AnticipationEngine(
585
+ learning_core=learning_core,
586
+ context_engine=context_engine
587
+ )
588
+ return _anticipation_engine
589
+
590
+
591
+ def suggest_next() -> List[Suggestion]:
592
+ """Get proactive suggestions for the user"""
593
+ engine = get_anticipation_engine()
594
+ return engine.get_suggestions(limit=5)