nc1709 1.15.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. nc1709/__init__.py +13 -0
  2. nc1709/agent/__init__.py +36 -0
  3. nc1709/agent/core.py +505 -0
  4. nc1709/agent/mcp_bridge.py +245 -0
  5. nc1709/agent/permissions.py +298 -0
  6. nc1709/agent/tools/__init__.py +21 -0
  7. nc1709/agent/tools/base.py +440 -0
  8. nc1709/agent/tools/bash_tool.py +367 -0
  9. nc1709/agent/tools/file_tools.py +454 -0
  10. nc1709/agent/tools/notebook_tools.py +516 -0
  11. nc1709/agent/tools/search_tools.py +322 -0
  12. nc1709/agent/tools/task_tool.py +284 -0
  13. nc1709/agent/tools/web_tools.py +555 -0
  14. nc1709/agents/__init__.py +17 -0
  15. nc1709/agents/auto_fix.py +506 -0
  16. nc1709/agents/test_generator.py +507 -0
  17. nc1709/checkpoints.py +372 -0
  18. nc1709/cli.py +3380 -0
  19. nc1709/cli_ui.py +1080 -0
  20. nc1709/cognitive/__init__.py +149 -0
  21. nc1709/cognitive/anticipation.py +594 -0
  22. nc1709/cognitive/context_engine.py +1046 -0
  23. nc1709/cognitive/council.py +824 -0
  24. nc1709/cognitive/learning.py +761 -0
  25. nc1709/cognitive/router.py +583 -0
  26. nc1709/cognitive/system.py +519 -0
  27. nc1709/config.py +155 -0
  28. nc1709/custom_commands.py +300 -0
  29. nc1709/executor.py +333 -0
  30. nc1709/file_controller.py +354 -0
  31. nc1709/git_integration.py +308 -0
  32. nc1709/github_integration.py +477 -0
  33. nc1709/image_input.py +446 -0
  34. nc1709/linting.py +519 -0
  35. nc1709/llm_adapter.py +667 -0
  36. nc1709/logger.py +192 -0
  37. nc1709/mcp/__init__.py +18 -0
  38. nc1709/mcp/client.py +370 -0
  39. nc1709/mcp/manager.py +407 -0
  40. nc1709/mcp/protocol.py +210 -0
  41. nc1709/mcp/server.py +473 -0
  42. nc1709/memory/__init__.py +20 -0
  43. nc1709/memory/embeddings.py +325 -0
  44. nc1709/memory/indexer.py +474 -0
  45. nc1709/memory/sessions.py +432 -0
  46. nc1709/memory/vector_store.py +451 -0
  47. nc1709/models/__init__.py +86 -0
  48. nc1709/models/detector.py +377 -0
  49. nc1709/models/formats.py +315 -0
  50. nc1709/models/manager.py +438 -0
  51. nc1709/models/registry.py +497 -0
  52. nc1709/performance/__init__.py +343 -0
  53. nc1709/performance/cache.py +705 -0
  54. nc1709/performance/pipeline.py +611 -0
  55. nc1709/performance/tiering.py +543 -0
  56. nc1709/plan_mode.py +362 -0
  57. nc1709/plugins/__init__.py +17 -0
  58. nc1709/plugins/agents/__init__.py +18 -0
  59. nc1709/plugins/agents/django_agent.py +912 -0
  60. nc1709/plugins/agents/docker_agent.py +623 -0
  61. nc1709/plugins/agents/fastapi_agent.py +887 -0
  62. nc1709/plugins/agents/git_agent.py +731 -0
  63. nc1709/plugins/agents/nextjs_agent.py +867 -0
  64. nc1709/plugins/base.py +359 -0
  65. nc1709/plugins/manager.py +411 -0
  66. nc1709/plugins/registry.py +337 -0
  67. nc1709/progress.py +443 -0
  68. nc1709/prompts/__init__.py +22 -0
  69. nc1709/prompts/agent_system.py +180 -0
  70. nc1709/prompts/task_prompts.py +340 -0
  71. nc1709/prompts/unified_prompt.py +133 -0
  72. nc1709/reasoning_engine.py +541 -0
  73. nc1709/remote_client.py +266 -0
  74. nc1709/shell_completions.py +349 -0
  75. nc1709/slash_commands.py +649 -0
  76. nc1709/task_classifier.py +408 -0
  77. nc1709/version_check.py +177 -0
  78. nc1709/web/__init__.py +8 -0
  79. nc1709/web/server.py +950 -0
  80. nc1709/web/templates/index.html +1127 -0
  81. nc1709-1.15.4.dist-info/METADATA +858 -0
  82. nc1709-1.15.4.dist-info/RECORD +86 -0
  83. nc1709-1.15.4.dist-info/WHEEL +5 -0
  84. nc1709-1.15.4.dist-info/entry_points.txt +2 -0
  85. nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
  86. nc1709-1.15.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,761 @@
1
+ """
2
+ Layer 4: Learning Core
3
+
4
+ Learns from user patterns over time:
5
+ - Tracks user preferences (models, styles, workflows)
6
+ - Learns from successful and unsuccessful interactions
7
+ - Adapts responses based on past behavior
8
+ - Provides personalized suggestions
9
+ - Maintains privacy while learning
10
+
11
+ This layer answers: "What has NC1709 learned about this user?"
12
+ """
13
+
14
+ import os
15
+ import json
16
+ import hashlib
17
+ import logging
18
+ from dataclasses import dataclass, field, asdict
19
+ from typing import Dict, List, Optional, Any, Tuple
20
+ from pathlib import Path
21
+ from datetime import datetime, timedelta
22
+ from enum import Enum
23
+ from collections import Counter, defaultdict
24
+ import threading
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ class FeedbackType(Enum):
30
+ """Types of user feedback"""
31
+ ACCEPTED = "accepted" # User accepted the suggestion
32
+ REJECTED = "rejected" # User rejected/undid the change
33
+ MODIFIED = "modified" # User modified the output
34
+ IGNORED = "ignored" # User didn't respond
35
+ EXPLICIT_POSITIVE = "explicit_positive" # User explicitly liked it
36
+ EXPLICIT_NEGATIVE = "explicit_negative" # User explicitly disliked it
37
+
38
+
39
+ class InteractionType(Enum):
40
+ """Types of user interactions"""
41
+ COMPLETION = "completion"
42
+ AGENT_TASK = "agent_task"
43
+ FILE_EDIT = "file_edit"
44
+ CODE_GENERATION = "code_generation"
45
+ EXPLANATION = "explanation"
46
+ DEBUG_HELP = "debug_help"
47
+ COMMAND = "command"
48
+ SEARCH = "search"
49
+
50
+
51
+ @dataclass
52
+ class UserPreference:
53
+ """A learned user preference"""
54
+ key: str
55
+ value: Any
56
+ confidence: float # 0.0 to 1.0
57
+ observation_count: int
58
+ last_updated: datetime = field(default_factory=datetime.now)
59
+ category: str = "general"
60
+
61
+
62
+ @dataclass
63
+ class InteractionRecord:
64
+ """Record of a user interaction"""
65
+ interaction_id: str
66
+ interaction_type: InteractionType
67
+ timestamp: datetime
68
+ task_category: str
69
+ model_used: Optional[str]
70
+ input_summary: str # Hashed/summarized for privacy
71
+ output_summary: str
72
+ feedback: Optional[FeedbackType] = None
73
+ duration_ms: Optional[int] = None
74
+ tokens_used: Optional[int] = None
75
+ metadata: Dict[str, Any] = field(default_factory=dict)
76
+
77
+
78
+ @dataclass
79
+ class PatternInsight:
80
+ """An insight derived from usage patterns"""
81
+ pattern_type: str
82
+ description: str
83
+ confidence: float
84
+ evidence_count: int
85
+ actionable_suggestion: Optional[str] = None
86
+ discovered_at: datetime = field(default_factory=datetime.now)
87
+
88
+
89
+ @dataclass
90
+ class UserProfile:
91
+ """User profile with learned preferences and patterns"""
92
+ user_id: str
93
+ created_at: datetime = field(default_factory=datetime.now)
94
+ last_active: datetime = field(default_factory=datetime.now)
95
+ total_interactions: int = 0
96
+ preferences: Dict[str, UserPreference] = field(default_factory=dict)
97
+ patterns: List[PatternInsight] = field(default_factory=list)
98
+ favorite_models: Dict[str, int] = field(default_factory=dict)
99
+ task_distribution: Dict[str, int] = field(default_factory=dict)
100
+ working_hours: Dict[int, int] = field(default_factory=dict) # Hour -> count
101
+ session_durations: List[int] = field(default_factory=list) # In minutes
102
+
103
+ def to_dict(self) -> Dict[str, Any]:
104
+ """Convert to dictionary for serialization"""
105
+ return {
106
+ "user_id": self.user_id,
107
+ "created_at": self.created_at.isoformat(),
108
+ "last_active": self.last_active.isoformat(),
109
+ "total_interactions": self.total_interactions,
110
+ "preferences": {k: asdict(v) for k, v in self.preferences.items()},
111
+ "patterns": [asdict(p) for p in self.patterns],
112
+ "favorite_models": self.favorite_models,
113
+ "task_distribution": self.task_distribution,
114
+ "working_hours": self.working_hours,
115
+ "session_durations": self.session_durations,
116
+ }
117
+
118
+ @classmethod
119
+ def from_dict(cls, data: Dict[str, Any]) -> "UserProfile":
120
+ """Create from dictionary"""
121
+ profile = cls(user_id=data["user_id"])
122
+ profile.created_at = datetime.fromisoformat(data.get("created_at", datetime.now().isoformat()))
123
+ profile.last_active = datetime.fromisoformat(data.get("last_active", datetime.now().isoformat()))
124
+ profile.total_interactions = data.get("total_interactions", 0)
125
+ profile.favorite_models = data.get("favorite_models", {})
126
+ profile.task_distribution = data.get("task_distribution", {})
127
+ profile.working_hours = {int(k): v for k, v in data.get("working_hours", {}).items()}
128
+ profile.session_durations = data.get("session_durations", [])
129
+
130
+ # Reconstruct preferences
131
+ for key, pref_data in data.get("preferences", {}).items():
132
+ pref_data["last_updated"] = datetime.fromisoformat(pref_data["last_updated"])
133
+ profile.preferences[key] = UserPreference(**pref_data)
134
+
135
+ # Reconstruct patterns
136
+ for pattern_data in data.get("patterns", []):
137
+ pattern_data["discovered_at"] = datetime.fromisoformat(pattern_data["discovered_at"])
138
+ profile.patterns.append(PatternInsight(**pattern_data))
139
+
140
+ return profile
141
+
142
+
143
+ class PreferenceLearner:
144
+ """Learns user preferences from interactions"""
145
+
146
+ # Preference keys
147
+ PREF_CODE_STYLE = "code_style"
148
+ PREF_VERBOSITY = "verbosity"
149
+ PREF_EXPLANATION_DEPTH = "explanation_depth"
150
+ PREF_MODEL = "preferred_model"
151
+ PREF_LANGUAGE = "preferred_language"
152
+ PREF_FRAMEWORK = "preferred_framework"
153
+ PREF_AUTO_APPLY = "auto_apply_suggestions"
154
+ PREF_COMMENT_STYLE = "comment_style"
155
+
156
+ def __init__(self, min_observations: int = 3, confidence_threshold: float = 0.6):
157
+ self.min_observations = min_observations
158
+ self.confidence_threshold = confidence_threshold
159
+ self._observation_buffer: Dict[str, List[Any]] = defaultdict(list)
160
+
161
+ def observe(self, key: str, value: Any, weight: float = 1.0) -> None:
162
+ """Record an observation for a preference"""
163
+ self._observation_buffer[key].append((value, weight, datetime.now()))
164
+
165
+ def learn_preference(self, key: str) -> Optional[UserPreference]:
166
+ """Analyze observations and learn a preference"""
167
+ observations = self._observation_buffer.get(key, [])
168
+
169
+ if len(observations) < self.min_observations:
170
+ return None
171
+
172
+ # Count value occurrences with weights
173
+ value_weights: Dict[Any, float] = defaultdict(float)
174
+ total_weight = 0.0
175
+
176
+ for value, weight, _ in observations:
177
+ if isinstance(value, dict):
178
+ value = json.dumps(value, sort_keys=True)
179
+ value_weights[value] += weight
180
+ total_weight += weight
181
+
182
+ if total_weight == 0:
183
+ return None
184
+
185
+ # Find most common value
186
+ best_value, best_weight = max(value_weights.items(), key=lambda x: x[1])
187
+ confidence = best_weight / total_weight
188
+
189
+ if confidence >= self.confidence_threshold:
190
+ # Try to parse back if it was a dict
191
+ try:
192
+ if isinstance(best_value, str) and best_value.startswith('{'):
193
+ best_value = json.loads(best_value)
194
+ except Exception:
195
+ pass
196
+
197
+ return UserPreference(
198
+ key=key,
199
+ value=best_value,
200
+ confidence=confidence,
201
+ observation_count=len(observations),
202
+ category=self._categorize_preference(key),
203
+ )
204
+
205
+ return None
206
+
207
+ def _categorize_preference(self, key: str) -> str:
208
+ """Categorize a preference key"""
209
+ if "model" in key:
210
+ return "model"
211
+ elif "style" in key or "format" in key:
212
+ return "style"
213
+ elif "language" in key or "framework" in key:
214
+ return "tech"
215
+ else:
216
+ return "general"
217
+
218
+
219
+ class PatternAnalyzer:
220
+ """Analyzes user behavior patterns"""
221
+
222
+ def __init__(self):
223
+ self._interaction_history: List[InteractionRecord] = []
224
+
225
+ def add_interaction(self, record: InteractionRecord) -> None:
226
+ """Add an interaction record"""
227
+ self._interaction_history.append(record)
228
+ # Keep last 1000 interactions
229
+ if len(self._interaction_history) > 1000:
230
+ self._interaction_history = self._interaction_history[-1000:]
231
+
232
+ def analyze_patterns(self) -> List[PatternInsight]:
233
+ """Analyze patterns from interaction history"""
234
+ patterns = []
235
+
236
+ if len(self._interaction_history) < 10:
237
+ return patterns
238
+
239
+ # Analyze time patterns
240
+ time_pattern = self._analyze_time_patterns()
241
+ if time_pattern:
242
+ patterns.append(time_pattern)
243
+
244
+ # Analyze task patterns
245
+ task_pattern = self._analyze_task_patterns()
246
+ if task_pattern:
247
+ patterns.append(task_pattern)
248
+
249
+ # Analyze model preferences
250
+ model_pattern = self._analyze_model_patterns()
251
+ if model_pattern:
252
+ patterns.append(model_pattern)
253
+
254
+ # Analyze feedback patterns
255
+ feedback_pattern = self._analyze_feedback_patterns()
256
+ if feedback_pattern:
257
+ patterns.append(feedback_pattern)
258
+
259
+ return patterns
260
+
261
+ def _analyze_time_patterns(self) -> Optional[PatternInsight]:
262
+ """Analyze when user is most active"""
263
+ if not self._interaction_history:
264
+ return None
265
+
266
+ hour_counts = Counter(r.timestamp.hour for r in self._interaction_history)
267
+ if not hour_counts:
268
+ return None
269
+
270
+ peak_hour, count = hour_counts.most_common(1)[0]
271
+ total = sum(hour_counts.values())
272
+ confidence = count / total if total > 0 else 0
273
+
274
+ # Check if there's a clear peak
275
+ if confidence > 0.15: # At least 15% of activity in one hour
276
+ # Determine time of day
277
+ if 5 <= peak_hour < 12:
278
+ time_period = "morning"
279
+ elif 12 <= peak_hour < 17:
280
+ time_period = "afternoon"
281
+ elif 17 <= peak_hour < 21:
282
+ time_period = "evening"
283
+ else:
284
+ time_period = "night"
285
+
286
+ return PatternInsight(
287
+ pattern_type="time_preference",
288
+ description=f"User is most active in the {time_period} (peak at {peak_hour}:00)",
289
+ confidence=confidence,
290
+ evidence_count=count,
291
+ actionable_suggestion=f"Schedule complex tasks during {time_period} for best results",
292
+ )
293
+
294
+ return None
295
+
296
+ def _analyze_task_patterns(self) -> Optional[PatternInsight]:
297
+ """Analyze what types of tasks user does most"""
298
+ if not self._interaction_history:
299
+ return None
300
+
301
+ task_counts = Counter(r.task_category for r in self._interaction_history)
302
+ if not task_counts:
303
+ return None
304
+
305
+ top_task, count = task_counts.most_common(1)[0]
306
+ total = sum(task_counts.values())
307
+ confidence = count / total if total > 0 else 0
308
+
309
+ if confidence > 0.2:
310
+ return PatternInsight(
311
+ pattern_type="task_preference",
312
+ description=f"User frequently works on {top_task} tasks ({count} of {total})",
313
+ confidence=confidence,
314
+ evidence_count=count,
315
+ actionable_suggestion=f"Optimize for {top_task} workflows",
316
+ )
317
+
318
+ return None
319
+
320
+ def _analyze_model_patterns(self) -> Optional[PatternInsight]:
321
+ """Analyze model usage patterns"""
322
+ model_counts = Counter(
323
+ r.model_used for r in self._interaction_history
324
+ if r.model_used
325
+ )
326
+ if not model_counts:
327
+ return None
328
+
329
+ top_model, count = model_counts.most_common(1)[0]
330
+ total = sum(model_counts.values())
331
+ confidence = count / total if total > 0 else 0
332
+
333
+ if confidence > 0.3:
334
+ return PatternInsight(
335
+ pattern_type="model_preference",
336
+ description=f"User prefers {top_model} ({int(confidence * 100)}% of usage)",
337
+ confidence=confidence,
338
+ evidence_count=count,
339
+ actionable_suggestion=f"Default to {top_model} for similar tasks",
340
+ )
341
+
342
+ return None
343
+
344
+ def _analyze_feedback_patterns(self) -> Optional[PatternInsight]:
345
+ """Analyze feedback patterns"""
346
+ feedback_counts = Counter(
347
+ r.feedback for r in self._interaction_history
348
+ if r.feedback
349
+ )
350
+ if not feedback_counts:
351
+ return None
352
+
353
+ total = sum(feedback_counts.values())
354
+ accepted = feedback_counts.get(FeedbackType.ACCEPTED, 0)
355
+ rejected = feedback_counts.get(FeedbackType.REJECTED, 0)
356
+ modified = feedback_counts.get(FeedbackType.MODIFIED, 0)
357
+
358
+ acceptance_rate = accepted / total if total > 0 else 0
359
+ modification_rate = modified / total if total > 0 else 0
360
+
361
+ if acceptance_rate > 0.7:
362
+ return PatternInsight(
363
+ pattern_type="satisfaction",
364
+ description=f"High acceptance rate ({int(acceptance_rate * 100)}%)",
365
+ confidence=acceptance_rate,
366
+ evidence_count=accepted,
367
+ )
368
+ elif modification_rate > 0.4:
369
+ return PatternInsight(
370
+ pattern_type="customization_needed",
371
+ description=f"User often modifies outputs ({int(modification_rate * 100)}%)",
372
+ confidence=modification_rate,
373
+ evidence_count=modified,
374
+ actionable_suggestion="Consider asking for more preferences upfront",
375
+ )
376
+
377
+ return None
378
+
379
+
380
+ class LearningCore:
381
+ """
382
+ Layer 4: Learning Core
383
+
384
+ Learns from user patterns over time to provide
385
+ personalized and adaptive assistance.
386
+ """
387
+
388
+ def __init__(
389
+ self,
390
+ data_dir: Optional[Path] = None,
391
+ user_id: Optional[str] = None,
392
+ anonymize: bool = True
393
+ ):
394
+ """
395
+ Initialize learning core
396
+
397
+ Args:
398
+ data_dir: Directory to store learning data
399
+ user_id: User identifier (generated if not provided)
400
+ anonymize: Whether to anonymize stored data
401
+ """
402
+ self.data_dir = data_dir or Path.home() / ".nc1709" / "learning"
403
+ self.anonymize = anonymize
404
+ self._lock = threading.Lock()
405
+
406
+ # Initialize or load user profile
407
+ self.user_id = user_id or self._get_or_create_user_id()
408
+ self.profile = self._load_profile()
409
+
410
+ # Initialize components
411
+ self.preference_learner = PreferenceLearner()
412
+ self.pattern_analyzer = PatternAnalyzer()
413
+
414
+ # Session tracking
415
+ self._session_start = datetime.now()
416
+ self._session_interactions = 0
417
+
418
+ def _get_or_create_user_id(self) -> str:
419
+ """Get or create a stable user ID"""
420
+ id_file = self.data_dir / "user_id"
421
+
422
+ if id_file.exists():
423
+ return id_file.read_text().strip()
424
+
425
+ # Generate new ID
426
+ import uuid
427
+ user_id = f"user_{uuid.uuid4().hex[:12]}"
428
+
429
+ # Save it
430
+ self.data_dir.mkdir(parents=True, exist_ok=True)
431
+ id_file.write_text(user_id)
432
+
433
+ return user_id
434
+
435
+ def _load_profile(self) -> UserProfile:
436
+ """Load user profile from disk"""
437
+ profile_file = self.data_dir / f"{self.user_id}_profile.json"
438
+
439
+ if profile_file.exists():
440
+ try:
441
+ with open(profile_file) as f:
442
+ data = json.load(f)
443
+ return UserProfile.from_dict(data)
444
+ except Exception as e:
445
+ logger.warning(f"Error loading profile: {e}")
446
+
447
+ return UserProfile(user_id=self.user_id)
448
+
449
+ def _save_profile(self) -> None:
450
+ """Save user profile to disk"""
451
+ try:
452
+ self.data_dir.mkdir(parents=True, exist_ok=True)
453
+ profile_file = self.data_dir / f"{self.user_id}_profile.json"
454
+
455
+ with open(profile_file, "w") as f:
456
+ json.dump(self.profile.to_dict(), f, indent=2, default=str)
457
+
458
+ except Exception as e:
459
+ logger.warning(f"Error saving profile: {e}")
460
+
461
+ def _hash_content(self, content: str) -> str:
462
+ """Hash content for privacy"""
463
+ if not self.anonymize:
464
+ return content[:200] # Truncate instead
465
+ return hashlib.sha256(content.encode()).hexdigest()[:16]
466
+
467
+ def record_interaction(
468
+ self,
469
+ interaction_type: InteractionType,
470
+ task_category: str,
471
+ input_text: str,
472
+ output_text: str,
473
+ model_used: Optional[str] = None,
474
+ duration_ms: Optional[int] = None,
475
+ tokens_used: Optional[int] = None,
476
+ metadata: Optional[Dict[str, Any]] = None
477
+ ) -> str:
478
+ """
479
+ Record a user interaction
480
+
481
+ Args:
482
+ interaction_type: Type of interaction
483
+ task_category: Category of the task
484
+ input_text: User input (will be hashed if anonymize=True)
485
+ output_text: AI output (will be hashed if anonymize=True)
486
+ model_used: Model that was used
487
+ duration_ms: Duration in milliseconds
488
+ tokens_used: Tokens consumed
489
+ metadata: Additional metadata
490
+
491
+ Returns:
492
+ Interaction ID
493
+ """
494
+ import uuid
495
+
496
+ interaction_id = f"int_{datetime.now().strftime('%Y%m%d%H%M%S')}_{uuid.uuid4().hex[:6]}"
497
+
498
+ record = InteractionRecord(
499
+ interaction_id=interaction_id,
500
+ interaction_type=interaction_type,
501
+ timestamp=datetime.now(),
502
+ task_category=task_category,
503
+ model_used=model_used,
504
+ input_summary=self._hash_content(input_text),
505
+ output_summary=self._hash_content(output_text),
506
+ duration_ms=duration_ms,
507
+ tokens_used=tokens_used,
508
+ metadata=metadata or {},
509
+ )
510
+
511
+ with self._lock:
512
+ # Update pattern analyzer
513
+ self.pattern_analyzer.add_interaction(record)
514
+
515
+ # Update profile stats
516
+ self.profile.total_interactions += 1
517
+ self.profile.last_active = datetime.now()
518
+
519
+ # Track model usage
520
+ if model_used:
521
+ self.profile.favorite_models[model_used] = \
522
+ self.profile.favorite_models.get(model_used, 0) + 1
523
+
524
+ # Track task distribution
525
+ self.profile.task_distribution[task_category] = \
526
+ self.profile.task_distribution.get(task_category, 0) + 1
527
+
528
+ # Track working hours
529
+ hour = datetime.now().hour
530
+ self.profile.working_hours[hour] = \
531
+ self.profile.working_hours.get(hour, 0) + 1
532
+
533
+ # Learn preferences from model choice
534
+ if model_used:
535
+ self.preference_learner.observe(
536
+ PreferenceLearner.PREF_MODEL,
537
+ model_used,
538
+ weight=1.0
539
+ )
540
+
541
+ self._session_interactions += 1
542
+
543
+ return interaction_id
544
+
545
+ def record_feedback(
546
+ self,
547
+ interaction_id: str,
548
+ feedback: FeedbackType,
549
+ details: Optional[str] = None
550
+ ) -> None:
551
+ """
552
+ Record feedback for an interaction
553
+
554
+ Args:
555
+ interaction_id: ID of the interaction
556
+ feedback: Type of feedback
557
+ details: Optional details
558
+ """
559
+ with self._lock:
560
+ # Find and update the interaction in pattern analyzer
561
+ for record in self.pattern_analyzer._interaction_history:
562
+ if record.interaction_id == interaction_id:
563
+ record.feedback = feedback
564
+ if details:
565
+ record.metadata["feedback_details"] = self._hash_content(details)
566
+ break
567
+
568
+ # Learn from feedback
569
+ if feedback == FeedbackType.ACCEPTED:
570
+ self.preference_learner.observe("satisfaction", 1.0, weight=1.0)
571
+ elif feedback == FeedbackType.REJECTED:
572
+ self.preference_learner.observe("satisfaction", 0.0, weight=1.0)
573
+ elif feedback == FeedbackType.MODIFIED:
574
+ self.preference_learner.observe("needs_customization", 1.0, weight=0.5)
575
+
576
+ def observe_preference(
577
+ self,
578
+ key: str,
579
+ value: Any,
580
+ weight: float = 1.0
581
+ ) -> None:
582
+ """
583
+ Observe a user preference
584
+
585
+ Args:
586
+ key: Preference key
587
+ value: Observed value
588
+ weight: Weight of this observation
589
+ """
590
+ with self._lock:
591
+ self.preference_learner.observe(key, value, weight)
592
+
593
+ def get_preference(self, key: str, default: Any = None) -> Any:
594
+ """
595
+ Get a learned preference
596
+
597
+ Args:
598
+ key: Preference key
599
+ default: Default value if not learned
600
+
601
+ Returns:
602
+ Learned preference value or default
603
+ """
604
+ with self._lock:
605
+ if key in self.profile.preferences:
606
+ pref = self.profile.preferences[key]
607
+ if pref.confidence >= 0.6:
608
+ return pref.value
609
+
610
+ # Try to learn it now
611
+ learned = self.preference_learner.learn_preference(key)
612
+ if learned:
613
+ self.profile.preferences[key] = learned
614
+ return learned.value
615
+
616
+ return default
617
+
618
+ def get_all_preferences(self) -> Dict[str, UserPreference]:
619
+ """Get all learned preferences"""
620
+ with self._lock:
621
+ # Update with any newly learned preferences
622
+ for key in list(self.preference_learner._observation_buffer.keys()):
623
+ if key not in self.profile.preferences:
624
+ learned = self.preference_learner.learn_preference(key)
625
+ if learned:
626
+ self.profile.preferences[key] = learned
627
+
628
+ return self.profile.preferences.copy()
629
+
630
+ def analyze_patterns(self) -> List[PatternInsight]:
631
+ """Analyze and return usage patterns"""
632
+ with self._lock:
633
+ patterns = self.pattern_analyzer.analyze_patterns()
634
+ self.profile.patterns = patterns
635
+ return patterns
636
+
637
+ def get_recommended_model(self, task_category: str) -> Optional[str]:
638
+ """
639
+ Get recommended model for a task based on learning
640
+
641
+ Args:
642
+ task_category: Category of the task
643
+
644
+ Returns:
645
+ Recommended model or None
646
+ """
647
+ # Check explicit preference
648
+ preferred = self.get_preference(PreferenceLearner.PREF_MODEL)
649
+ if preferred:
650
+ return preferred
651
+
652
+ # Check most successful model for this task type
653
+ with self._lock:
654
+ # Get successful interactions for this task type
655
+ successful = [
656
+ r for r in self.pattern_analyzer._interaction_history
657
+ if r.task_category == task_category
658
+ and r.feedback == FeedbackType.ACCEPTED
659
+ and r.model_used
660
+ ]
661
+
662
+ if successful:
663
+ model_counts = Counter(r.model_used for r in successful)
664
+ return model_counts.most_common(1)[0][0]
665
+
666
+ # Fall back to overall favorite
667
+ if self.profile.favorite_models:
668
+ return max(self.profile.favorite_models, key=self.profile.favorite_models.get)
669
+
670
+ return None
671
+
672
+ def get_user_summary(self) -> Dict[str, Any]:
673
+ """Get a summary of learned user behavior"""
674
+ with self._lock:
675
+ patterns = self.analyze_patterns()
676
+
677
+ # Calculate session stats
678
+ session_duration = (datetime.now() - self._session_start).total_seconds() / 60
679
+
680
+ summary = {
681
+ "user_id": self.user_id,
682
+ "total_interactions": self.profile.total_interactions,
683
+ "session_interactions": self._session_interactions,
684
+ "session_duration_minutes": round(session_duration, 1),
685
+ "favorite_model": max(self.profile.favorite_models, key=self.profile.favorite_models.get)
686
+ if self.profile.favorite_models else None,
687
+ "top_task_type": max(self.profile.task_distribution, key=self.profile.task_distribution.get)
688
+ if self.profile.task_distribution else None,
689
+ "peak_hour": max(self.profile.working_hours, key=self.profile.working_hours.get)
690
+ if self.profile.working_hours else None,
691
+ "preferences": {
692
+ k: {"value": p.value, "confidence": p.confidence}
693
+ for k, p in self.profile.preferences.items()
694
+ },
695
+ "insights": [
696
+ {"type": p.pattern_type, "description": p.description, "suggestion": p.actionable_suggestion}
697
+ for p in patterns
698
+ ],
699
+ }
700
+
701
+ return summary
702
+
703
+ def end_session(self) -> None:
704
+ """End the current session and save data"""
705
+ with self._lock:
706
+ # Record session duration
707
+ duration_minutes = int((datetime.now() - self._session_start).total_seconds() / 60)
708
+ self.profile.session_durations.append(duration_minutes)
709
+
710
+ # Keep last 100 session durations
711
+ if len(self.profile.session_durations) > 100:
712
+ self.profile.session_durations = self.profile.session_durations[-100:]
713
+
714
+ # Update patterns
715
+ self.profile.patterns = self.pattern_analyzer.analyze_patterns()
716
+
717
+ # Save profile
718
+ self._save_profile()
719
+
720
+ logger.info(f"Session ended: {self._session_interactions} interactions, {duration_minutes} minutes")
721
+
722
+ def reset(self) -> None:
723
+ """Reset all learned data (use with caution)"""
724
+ with self._lock:
725
+ self.profile = UserProfile(user_id=self.user_id)
726
+ self.preference_learner = PreferenceLearner()
727
+ self.pattern_analyzer = PatternAnalyzer()
728
+ self._save_profile()
729
+
730
+ logger.info("Learning data reset")
731
+
732
+
733
+ # Convenience functions
734
+ _learning_core: Optional[LearningCore] = None
735
+
736
+
737
+ def get_learning_core(data_dir: Optional[Path] = None) -> LearningCore:
738
+ """Get or create the learning core instance"""
739
+ global _learning_core
740
+ if _learning_core is None:
741
+ _learning_core = LearningCore(data_dir=data_dir)
742
+ return _learning_core
743
+
744
+
745
+ def record_interaction(
746
+ interaction_type: str,
747
+ task_category: str,
748
+ input_text: str,
749
+ output_text: str,
750
+ **kwargs
751
+ ) -> str:
752
+ """Quick helper to record an interaction"""
753
+ core = get_learning_core()
754
+ int_type = InteractionType(interaction_type) if isinstance(interaction_type, str) else interaction_type
755
+ return core.record_interaction(
756
+ interaction_type=int_type,
757
+ task_category=task_category,
758
+ input_text=input_text,
759
+ output_text=output_text,
760
+ **kwargs
761
+ )