cloudbrain-server 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cloudbrain_server/__init__.py +14 -0
- cloudbrain_server/clean_server.py +174 -0
- cloudbrain_server/cloud_brain_server.py +696 -0
- cloudbrain_server/init_database.py +616 -0
- cloudbrain_server/schema.sql +204 -0
- cloudbrain_server-1.0.0.dist-info/METADATA +238 -0
- cloudbrain_server-1.0.0.dist-info/RECORD +10 -0
- cloudbrain_server-1.0.0.dist-info/WHEEL +5 -0
- cloudbrain_server-1.0.0.dist-info/entry_points.txt +4 -0
- cloudbrain_server-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,696 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Cloud Brain Enhanced - Advanced AI Collaboration System
|
|
4
|
+
|
|
5
|
+
This module provides enhanced capabilities for AI persistence, learning,
|
|
6
|
+
coordination, and collaboration through the Cloud Brain database.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import sqlite3
|
|
10
|
+
import json
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
from typing import List, Dict, Optional, Any
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class CloudBrainEnhanced:
|
|
17
|
+
"""Enhanced Cloud Brain system for advanced AI collaboration"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, db_path='ai_db/cloudbrain.db'):
|
|
20
|
+
"""
|
|
21
|
+
Initialize the enhanced Cloud Brain system
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
db_path: Path to the cloudbrain database
|
|
25
|
+
"""
|
|
26
|
+
self.db_path = db_path
|
|
27
|
+
self._validate_database()
|
|
28
|
+
|
|
29
|
+
def _validate_database(self):
|
|
30
|
+
"""Validate that the database exists and has required tables"""
|
|
31
|
+
if not Path(self.db_path).exists():
|
|
32
|
+
raise FileNotFoundError(f"Database not found: {self.db_path}")
|
|
33
|
+
|
|
34
|
+
conn = sqlite3.connect(self.db_path)
|
|
35
|
+
cursor = conn.cursor()
|
|
36
|
+
|
|
37
|
+
# Check if enhanced tables exist
|
|
38
|
+
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='ai_tasks'")
|
|
39
|
+
if not cursor.fetchone():
|
|
40
|
+
raise ValueError("Enhanced tables not found. Please run cloud_brain_enhanced_schema.sql")
|
|
41
|
+
|
|
42
|
+
conn.close()
|
|
43
|
+
|
|
44
|
+
def _get_connection(self):
|
|
45
|
+
"""Get a database connection"""
|
|
46
|
+
conn = sqlite3.connect(self.db_path)
|
|
47
|
+
conn.row_factory = sqlite3.Row
|
|
48
|
+
return conn
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class TaskManager:
|
|
52
|
+
"""Manages AI tasks with dependencies and tracking"""
|
|
53
|
+
|
|
54
|
+
def __init__(self, brain: CloudBrainEnhanced):
|
|
55
|
+
self.brain = brain
|
|
56
|
+
|
|
57
|
+
def create_task(self, task_name: str, description: str, task_type: str,
|
|
58
|
+
priority: str = 'normal', assigned_to: int = None,
|
|
59
|
+
created_by: int = None, due_date: str = None,
|
|
60
|
+
estimated_hours: float = None, metadata: dict = None) -> int:
|
|
61
|
+
"""
|
|
62
|
+
Create a new task
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
task_name: Name of the task
|
|
66
|
+
description: Task description
|
|
67
|
+
task_type: Type of task (translation, coding, analysis, etc.)
|
|
68
|
+
priority: Priority level (low, normal, high, urgent)
|
|
69
|
+
assigned_to: AI ID to assign task to
|
|
70
|
+
created_by: AI ID who created the task
|
|
71
|
+
due_date: Due date for the task
|
|
72
|
+
estimated_hours: Estimated hours to complete
|
|
73
|
+
metadata: Additional metadata as dictionary
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Task ID
|
|
77
|
+
"""
|
|
78
|
+
conn = self.brain._get_connection()
|
|
79
|
+
cursor = conn.cursor()
|
|
80
|
+
|
|
81
|
+
cursor.execute('''
|
|
82
|
+
INSERT INTO ai_tasks
|
|
83
|
+
(task_name, description, task_type, priority, assigned_to, created_by,
|
|
84
|
+
due_date, estimated_hours, metadata, status)
|
|
85
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 'pending')
|
|
86
|
+
''', (task_name, description, task_type, priority, assigned_to, created_by,
|
|
87
|
+
due_date, estimated_hours, json.dumps(metadata) if metadata else None))
|
|
88
|
+
|
|
89
|
+
task_id = cursor.lastrowid
|
|
90
|
+
conn.commit()
|
|
91
|
+
conn.close()
|
|
92
|
+
|
|
93
|
+
return task_id
|
|
94
|
+
|
|
95
|
+
def update_task_status(self, task_id: int, status: str,
|
|
96
|
+
completed_at: str = None, actual_hours: float = None) -> bool:
|
|
97
|
+
"""
|
|
98
|
+
Update task status
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
task_id: Task ID to update
|
|
102
|
+
status: New status (pending, in_progress, completed, failed, cancelled)
|
|
103
|
+
completed_at: Completion timestamp
|
|
104
|
+
actual_hours: Actual hours spent
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
True if successful
|
|
108
|
+
"""
|
|
109
|
+
conn = self.brain._get_connection()
|
|
110
|
+
cursor = conn.cursor()
|
|
111
|
+
|
|
112
|
+
if completed_at is None and status == 'completed':
|
|
113
|
+
completed_at = datetime.now().isoformat()
|
|
114
|
+
|
|
115
|
+
cursor.execute('''
|
|
116
|
+
UPDATE ai_tasks
|
|
117
|
+
SET status = ?, completed_at = COALESCE(?, completed_at),
|
|
118
|
+
actual_hours = COALESCE(?, actual_hours)
|
|
119
|
+
WHERE id = ?
|
|
120
|
+
''', (status, completed_at, actual_hours, task_id))
|
|
121
|
+
|
|
122
|
+
conn.commit()
|
|
123
|
+
conn.close()
|
|
124
|
+
|
|
125
|
+
return cursor.rowcount > 0
|
|
126
|
+
|
|
127
|
+
def get_tasks(self, assigned_to: int = None, status: str = None,
|
|
128
|
+
task_type: str = None, priority: str = None) -> List[Dict]:
|
|
129
|
+
"""
|
|
130
|
+
Get tasks with optional filters
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
assigned_to: Filter by assigned AI ID
|
|
134
|
+
status: Filter by status
|
|
135
|
+
task_type: Filter by task type
|
|
136
|
+
priority: Filter by priority
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
List of tasks
|
|
140
|
+
"""
|
|
141
|
+
conn = self.brain._get_connection()
|
|
142
|
+
cursor = conn.cursor()
|
|
143
|
+
|
|
144
|
+
query = 'SELECT * FROM ai_tasks WHERE 1=1'
|
|
145
|
+
params = []
|
|
146
|
+
|
|
147
|
+
if assigned_to:
|
|
148
|
+
query += ' AND assigned_to = ?'
|
|
149
|
+
params.append(assigned_to)
|
|
150
|
+
|
|
151
|
+
if status:
|
|
152
|
+
query += ' AND status = ?'
|
|
153
|
+
params.append(status)
|
|
154
|
+
|
|
155
|
+
if task_type:
|
|
156
|
+
query += ' AND task_type = ?'
|
|
157
|
+
params.append(task_type)
|
|
158
|
+
|
|
159
|
+
if priority:
|
|
160
|
+
query += ' AND priority = ?'
|
|
161
|
+
params.append(priority)
|
|
162
|
+
|
|
163
|
+
query += ' ORDER BY priority DESC, created_at ASC'
|
|
164
|
+
|
|
165
|
+
cursor.execute(query, params)
|
|
166
|
+
tasks = [dict(row) for row in cursor.fetchall()]
|
|
167
|
+
conn.close()
|
|
168
|
+
|
|
169
|
+
return tasks
|
|
170
|
+
|
|
171
|
+
def add_dependency(self, task_id: int, depends_on_task_id: int,
|
|
172
|
+
dependency_type: str = 'blocking') -> bool:
|
|
173
|
+
"""
|
|
174
|
+
Add a dependency between tasks
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
task_id: Task that depends on another
|
|
178
|
+
depends_on_task_id: Task that must be completed first
|
|
179
|
+
dependency_type: Type of dependency (blocking, optional, parallel)
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
True if successful
|
|
183
|
+
"""
|
|
184
|
+
conn = self.brain._get_connection()
|
|
185
|
+
cursor = conn.cursor()
|
|
186
|
+
|
|
187
|
+
cursor.execute('''
|
|
188
|
+
INSERT INTO ai_task_dependencies
|
|
189
|
+
(task_id, depends_on_task_id, dependency_type)
|
|
190
|
+
VALUES (?, ?, ?)
|
|
191
|
+
''', (task_id, depends_on_task_id, dependency_type))
|
|
192
|
+
|
|
193
|
+
conn.commit()
|
|
194
|
+
conn.close()
|
|
195
|
+
|
|
196
|
+
return True
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class LearningSystem:
|
|
200
|
+
"""Tracks AI learning events and insights"""
|
|
201
|
+
|
|
202
|
+
def __init__(self, brain: CloudBrainEnhanced):
|
|
203
|
+
self.brain = brain
|
|
204
|
+
|
|
205
|
+
def record_learning(self, learner_id: int, event_type: str, context: str,
|
|
206
|
+
lesson: str, confidence_level: float = None,
|
|
207
|
+
applicable_domains: str = None, related_tasks: str = None) -> int:
|
|
208
|
+
"""
|
|
209
|
+
Record a learning event
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
learner_id: AI ID who learned
|
|
213
|
+
event_type: Type of learning (success, failure, insight, pattern_recognition)
|
|
214
|
+
context: Context of the learning
|
|
215
|
+
lesson: What was learned
|
|
216
|
+
confidence_level: Confidence in the learning (0.0 to 1.0)
|
|
217
|
+
applicable_domains: Domains where this applies
|
|
218
|
+
related_tasks: Related task IDs
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Learning event ID
|
|
222
|
+
"""
|
|
223
|
+
conn = self.brain._get_connection()
|
|
224
|
+
cursor = conn.cursor()
|
|
225
|
+
|
|
226
|
+
cursor.execute('''
|
|
227
|
+
INSERT INTO ai_learning_events
|
|
228
|
+
(learner_id, event_type, context, lesson, confidence_level,
|
|
229
|
+
applicable_domains, related_tasks)
|
|
230
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
231
|
+
''', (learner_id, event_type, context, lesson, confidence_level,
|
|
232
|
+
applicable_domains, related_tasks))
|
|
233
|
+
|
|
234
|
+
learning_id = cursor.lastrowid
|
|
235
|
+
conn.commit()
|
|
236
|
+
conn.close()
|
|
237
|
+
|
|
238
|
+
return learning_id
|
|
239
|
+
|
|
240
|
+
def get_learnings(self, learner_id: int = None, event_type: str = None,
|
|
241
|
+
domain: str = None) -> List[Dict]:
|
|
242
|
+
"""
|
|
243
|
+
Get learning events
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
learner_id: Filter by learner AI ID
|
|
247
|
+
event_type: Filter by event type
|
|
248
|
+
domain: Filter by applicable domain
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
List of learning events
|
|
252
|
+
"""
|
|
253
|
+
conn = self.brain._get_connection()
|
|
254
|
+
cursor = conn.cursor()
|
|
255
|
+
|
|
256
|
+
query = 'SELECT * FROM ai_learning_events WHERE 1=1'
|
|
257
|
+
params = []
|
|
258
|
+
|
|
259
|
+
if learner_id:
|
|
260
|
+
query += ' AND learner_id = ?'
|
|
261
|
+
params.append(learner_id)
|
|
262
|
+
|
|
263
|
+
if event_type:
|
|
264
|
+
query += ' AND event_type = ?'
|
|
265
|
+
params.append(event_type)
|
|
266
|
+
|
|
267
|
+
if domain:
|
|
268
|
+
query += ' AND applicable_domains LIKE ?'
|
|
269
|
+
params.append(f'%{domain}%')
|
|
270
|
+
|
|
271
|
+
query += ' ORDER BY created_at DESC'
|
|
272
|
+
|
|
273
|
+
cursor.execute(query, params)
|
|
274
|
+
learnings = [dict(row) for row in cursor.fetchall()]
|
|
275
|
+
conn.close()
|
|
276
|
+
|
|
277
|
+
return learnings
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
class DecisionTracker:
|
|
281
|
+
"""Tracks AI decisions and their outcomes"""
|
|
282
|
+
|
|
283
|
+
def __init__(self, brain: CloudBrainEnhanced):
|
|
284
|
+
self.brain = brain
|
|
285
|
+
|
|
286
|
+
def record_decision(self, decision_maker_id: int, decision_type: str,
|
|
287
|
+
context: str, decision: str, reasoning: str,
|
|
288
|
+
alternatives_considered: list = None,
|
|
289
|
+
confidence_level: float = None, impact_level: int = 3,
|
|
290
|
+
related_tasks: str = None) -> int:
|
|
291
|
+
"""
|
|
292
|
+
Record a decision
|
|
293
|
+
|
|
294
|
+
Args:
|
|
295
|
+
decision_maker_id: AI ID who made the decision
|
|
296
|
+
decision_type: Type of decision (technical, strategic, etc.)
|
|
297
|
+
context: Context of the decision
|
|
298
|
+
decision: The decision made
|
|
299
|
+
reasoning: Reasoning behind the decision
|
|
300
|
+
alternatives_considered: List of alternatives considered
|
|
301
|
+
confidence_level: Confidence in the decision (0.0 to 1.0)
|
|
302
|
+
impact_level: Impact level (1-5)
|
|
303
|
+
related_tasks: Related task IDs
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
Decision ID
|
|
307
|
+
"""
|
|
308
|
+
conn = self.brain._get_connection()
|
|
309
|
+
cursor = conn.cursor()
|
|
310
|
+
|
|
311
|
+
cursor.execute('''
|
|
312
|
+
INSERT INTO ai_decisions
|
|
313
|
+
(decision_maker_id, decision_type, context, decision, reasoning,
|
|
314
|
+
alternatives_considered, confidence_level, impact_level, related_tasks)
|
|
315
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
316
|
+
''', (decision_maker_id, decision_type, context, decision, reasoning,
|
|
317
|
+
json.dumps(alternatives_considered) if alternatives_considered else None,
|
|
318
|
+
confidence_level, impact_level, related_tasks))
|
|
319
|
+
|
|
320
|
+
decision_id = cursor.lastrowid
|
|
321
|
+
conn.commit()
|
|
322
|
+
conn.close()
|
|
323
|
+
|
|
324
|
+
return decision_id
|
|
325
|
+
|
|
326
|
+
def update_outcome(self, decision_id: int, outcome: str,
|
|
327
|
+
outcome_notes: str = None) -> bool:
|
|
328
|
+
"""
|
|
329
|
+
Update the outcome of a decision
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
decision_id: Decision ID to update
|
|
333
|
+
outcome: Outcome (success, failure, mixed, pending)
|
|
334
|
+
outcome_notes: Notes about the outcome
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
True if successful
|
|
338
|
+
"""
|
|
339
|
+
conn = self.brain._get_connection()
|
|
340
|
+
cursor = conn.cursor()
|
|
341
|
+
|
|
342
|
+
cursor.execute('''
|
|
343
|
+
UPDATE ai_decisions
|
|
344
|
+
SET outcome = ?, outcome_notes = ?, outcome_updated_at = ?
|
|
345
|
+
WHERE id = ?
|
|
346
|
+
''', (outcome, outcome_notes, datetime.now().isoformat(), decision_id))
|
|
347
|
+
|
|
348
|
+
conn.commit()
|
|
349
|
+
conn.close()
|
|
350
|
+
|
|
351
|
+
return cursor.rowcount > 0
|
|
352
|
+
|
|
353
|
+
def get_decisions(self, decision_maker_id: int = None,
|
|
354
|
+
decision_type: str = None, outcome: str = None) -> List[Dict]:
|
|
355
|
+
"""
|
|
356
|
+
Get decisions
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
decision_maker_id: Filter by decision maker AI ID
|
|
360
|
+
decision_type: Filter by decision type
|
|
361
|
+
outcome: Filter by outcome
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
List of decisions
|
|
365
|
+
"""
|
|
366
|
+
conn = self.brain._get_connection()
|
|
367
|
+
cursor = conn.cursor()
|
|
368
|
+
|
|
369
|
+
query = 'SELECT * FROM ai_decisions WHERE 1=1'
|
|
370
|
+
params = []
|
|
371
|
+
|
|
372
|
+
if decision_maker_id:
|
|
373
|
+
query += ' AND decision_maker_id = ?'
|
|
374
|
+
params.append(decision_maker_id)
|
|
375
|
+
|
|
376
|
+
if decision_type:
|
|
377
|
+
query += ' AND decision_type = ?'
|
|
378
|
+
params.append(decision_type)
|
|
379
|
+
|
|
380
|
+
if outcome:
|
|
381
|
+
query += ' AND outcome = ?'
|
|
382
|
+
params.append(outcome)
|
|
383
|
+
|
|
384
|
+
query += ' ORDER BY created_at DESC'
|
|
385
|
+
|
|
386
|
+
cursor.execute(query, params)
|
|
387
|
+
decisions = [dict(row) for row in cursor.fetchall()]
|
|
388
|
+
conn.close()
|
|
389
|
+
|
|
390
|
+
return decisions
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
class CapabilityTracker:
|
|
394
|
+
"""Tracks AI skills and capabilities"""
|
|
395
|
+
|
|
396
|
+
def __init__(self, brain: CloudBrainEnhanced):
|
|
397
|
+
self.brain = brain
|
|
398
|
+
|
|
399
|
+
def update_capability(self, ai_id: int, skill_name: str, skill_category: str,
|
|
400
|
+
proficiency_level: float = None, notes: str = None) -> bool:
|
|
401
|
+
"""
|
|
402
|
+
Update or create a capability record
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
ai_id: AI ID
|
|
406
|
+
skill_name: Name of the skill
|
|
407
|
+
skill_category: Category of the skill
|
|
408
|
+
proficiency_level: Proficiency level (0.0 to 1.0)
|
|
409
|
+
notes: Notes about the skill
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
True if successful
|
|
413
|
+
"""
|
|
414
|
+
conn = self.brain._get_connection()
|
|
415
|
+
cursor = conn.cursor()
|
|
416
|
+
|
|
417
|
+
# Check if capability exists
|
|
418
|
+
cursor.execute('''
|
|
419
|
+
SELECT id FROM ai_capabilities
|
|
420
|
+
WHERE ai_id = ? AND skill_name = ?
|
|
421
|
+
''', (ai_id, skill_name))
|
|
422
|
+
|
|
423
|
+
existing = cursor.fetchone()
|
|
424
|
+
|
|
425
|
+
if existing:
|
|
426
|
+
# Update existing
|
|
427
|
+
cursor.execute('''
|
|
428
|
+
UPDATE ai_capabilities
|
|
429
|
+
SET proficiency_level = COALESCE(?, proficiency_level),
|
|
430
|
+
notes = COALESCE(?, notes),
|
|
431
|
+
last_used = CURRENT_TIMESTAMP,
|
|
432
|
+
updated_at = CURRENT_TIMESTAMP
|
|
433
|
+
WHERE ai_id = ? AND skill_name = ?
|
|
434
|
+
''', (proficiency_level, notes, ai_id, skill_name))
|
|
435
|
+
else:
|
|
436
|
+
# Create new
|
|
437
|
+
cursor.execute('''
|
|
438
|
+
INSERT INTO ai_capabilities
|
|
439
|
+
(ai_id, skill_name, skill_category, proficiency_level, notes)
|
|
440
|
+
VALUES (?, ?, ?, ?, ?)
|
|
441
|
+
''', (ai_id, skill_name, skill_category, proficiency_level, notes))
|
|
442
|
+
|
|
443
|
+
conn.commit()
|
|
444
|
+
conn.close()
|
|
445
|
+
|
|
446
|
+
return True
|
|
447
|
+
|
|
448
|
+
def record_usage(self, ai_id: int, skill_name: str,
|
|
449
|
+
success: bool = True) -> bool:
|
|
450
|
+
"""
|
|
451
|
+
Record usage of a skill
|
|
452
|
+
|
|
453
|
+
Args:
|
|
454
|
+
ai_id: AI ID
|
|
455
|
+
skill_name: Name of the skill used
|
|
456
|
+
success: Whether the usage was successful
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
True if successful
|
|
460
|
+
"""
|
|
461
|
+
conn = self.brain._get_connection()
|
|
462
|
+
cursor = conn.cursor()
|
|
463
|
+
|
|
464
|
+
cursor.execute('''
|
|
465
|
+
UPDATE ai_capabilities
|
|
466
|
+
SET usage_count = usage_count + 1,
|
|
467
|
+
last_used = CURRENT_TIMESTAMP
|
|
468
|
+
WHERE ai_id = ? AND skill_name = ?
|
|
469
|
+
''', (ai_id, skill_name))
|
|
470
|
+
|
|
471
|
+
conn.commit()
|
|
472
|
+
conn.close()
|
|
473
|
+
|
|
474
|
+
return cursor.rowcount > 0
|
|
475
|
+
|
|
476
|
+
def get_capabilities(self, ai_id: int = None,
|
|
477
|
+
skill_category: str = None) -> List[Dict]:
|
|
478
|
+
"""
|
|
479
|
+
Get capabilities
|
|
480
|
+
|
|
481
|
+
Args:
|
|
482
|
+
ai_id: Filter by AI ID
|
|
483
|
+
skill_category: Filter by skill category
|
|
484
|
+
|
|
485
|
+
Returns:
|
|
486
|
+
List of capabilities
|
|
487
|
+
"""
|
|
488
|
+
conn = self.brain._get_connection()
|
|
489
|
+
cursor = conn.cursor()
|
|
490
|
+
|
|
491
|
+
query = 'SELECT * FROM ai_capabilities WHERE 1=1'
|
|
492
|
+
params = []
|
|
493
|
+
|
|
494
|
+
if ai_id:
|
|
495
|
+
query += ' AND ai_id = ?'
|
|
496
|
+
params.append(ai_id)
|
|
497
|
+
|
|
498
|
+
if skill_category:
|
|
499
|
+
query += ' AND skill_category = ?'
|
|
500
|
+
params.append(skill_category)
|
|
501
|
+
|
|
502
|
+
query += ' ORDER BY proficiency_level DESC'
|
|
503
|
+
|
|
504
|
+
cursor.execute(query, params)
|
|
505
|
+
capabilities = [dict(row) for row in cursor.fetchall()]
|
|
506
|
+
conn.close()
|
|
507
|
+
|
|
508
|
+
return capabilities
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
class SessionMemory:
|
|
512
|
+
"""Manages cross-session memory for AIs"""
|
|
513
|
+
|
|
514
|
+
def __init__(self, brain: CloudBrainEnhanced):
|
|
515
|
+
self.brain = brain
|
|
516
|
+
|
|
517
|
+
def store_memory(self, session_id: str, ai_id: int, memory_type: str,
|
|
518
|
+
memory_key: str, memory_value: str,
|
|
519
|
+
importance_level: int = 3, expires_at: str = None) -> int:
|
|
520
|
+
"""
|
|
521
|
+
Store a memory
|
|
522
|
+
|
|
523
|
+
Args:
|
|
524
|
+
session_id: Session identifier
|
|
525
|
+
ai_id: AI ID
|
|
526
|
+
memory_type: Type of memory (context, decision, learning, preference)
|
|
527
|
+
memory_key: Key for the memory
|
|
528
|
+
memory_value: Value of the memory
|
|
529
|
+
importance_level: Importance level (1-5)
|
|
530
|
+
expires_at: Expiration timestamp
|
|
531
|
+
|
|
532
|
+
Returns:
|
|
533
|
+
Memory ID
|
|
534
|
+
"""
|
|
535
|
+
conn = self.brain._get_connection()
|
|
536
|
+
cursor = conn.cursor()
|
|
537
|
+
|
|
538
|
+
cursor.execute('''
|
|
539
|
+
INSERT INTO ai_session_memories
|
|
540
|
+
(session_id, ai_id, memory_type, memory_key, memory_value,
|
|
541
|
+
importance_level, expires_at)
|
|
542
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
543
|
+
''', (session_id, ai_id, memory_type, memory_key, memory_value,
|
|
544
|
+
importance_level, expires_at))
|
|
545
|
+
|
|
546
|
+
memory_id = cursor.lastrowid
|
|
547
|
+
conn.commit()
|
|
548
|
+
conn.close()
|
|
549
|
+
|
|
550
|
+
return memory_id
|
|
551
|
+
|
|
552
|
+
def retrieve_memory(self, session_id: str, ai_id: int = None,
|
|
553
|
+
memory_type: str = None, memory_key: str = None) -> List[Dict]:
|
|
554
|
+
"""
|
|
555
|
+
Retrieve memories
|
|
556
|
+
|
|
557
|
+
Args:
|
|
558
|
+
session_id: Session identifier
|
|
559
|
+
ai_id: Filter by AI ID
|
|
560
|
+
memory_type: Filter by memory type
|
|
561
|
+
memory_key: Filter by memory key
|
|
562
|
+
|
|
563
|
+
Returns:
|
|
564
|
+
List of memories
|
|
565
|
+
"""
|
|
566
|
+
conn = self.brain._get_connection()
|
|
567
|
+
cursor = conn.cursor()
|
|
568
|
+
|
|
569
|
+
query = '''
|
|
570
|
+
SELECT * FROM ai_session_memories
|
|
571
|
+
WHERE session_id = ?
|
|
572
|
+
AND (expires_at IS NULL OR expires_at > ?)
|
|
573
|
+
'''
|
|
574
|
+
params = [session_id, datetime.now().isoformat()]
|
|
575
|
+
|
|
576
|
+
if ai_id:
|
|
577
|
+
query += ' AND ai_id = ?'
|
|
578
|
+
params.append(ai_id)
|
|
579
|
+
|
|
580
|
+
if memory_type:
|
|
581
|
+
query += ' AND memory_type = ?'
|
|
582
|
+
params.append(memory_type)
|
|
583
|
+
|
|
584
|
+
if memory_key:
|
|
585
|
+
query += ' AND memory_key = ?'
|
|
586
|
+
params.append(memory_key)
|
|
587
|
+
|
|
588
|
+
query += ' ORDER BY importance_level DESC, created_at DESC'
|
|
589
|
+
|
|
590
|
+
cursor.execute(query, params)
|
|
591
|
+
memories = [dict(row) for row in cursor.fetchall()]
|
|
592
|
+
|
|
593
|
+
# Update access count
|
|
594
|
+
for memory in memories:
|
|
595
|
+
cursor.execute('''
|
|
596
|
+
UPDATE ai_session_memories
|
|
597
|
+
SET access_count = access_count + 1, last_accessed = ?
|
|
598
|
+
WHERE id = ?
|
|
599
|
+
''', (datetime.now().isoformat(), memory['id']))
|
|
600
|
+
|
|
601
|
+
conn.commit()
|
|
602
|
+
conn.close()
|
|
603
|
+
|
|
604
|
+
return memories
|
|
605
|
+
|
|
606
|
+
|
|
607
|
+
def main():
|
|
608
|
+
"""Example usage of the enhanced Cloud Brain system"""
|
|
609
|
+
brain = CloudBrainEnhanced()
|
|
610
|
+
|
|
611
|
+
# Create task manager
|
|
612
|
+
task_manager = TaskManager(brain)
|
|
613
|
+
|
|
614
|
+
# Create a task
|
|
615
|
+
task_id = task_manager.create_task(
|
|
616
|
+
task_name="Translate documentation to Esperanto",
|
|
617
|
+
description="Translate all 13 documentation files to Esperanto",
|
|
618
|
+
task_type="translation",
|
|
619
|
+
priority="high",
|
|
620
|
+
assigned_to=2,
|
|
621
|
+
created_by=1,
|
|
622
|
+
estimated_hours=8.0
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
print(f"ā
Created task with ID: {task_id}")
|
|
626
|
+
|
|
627
|
+
# Get tasks
|
|
628
|
+
tasks = task_manager.get_tasks(assigned_to=2, status='pending')
|
|
629
|
+
print(f"š Found {len(tasks)} pending tasks for AI 2")
|
|
630
|
+
|
|
631
|
+
# Create learning system
|
|
632
|
+
learning_system = LearningSystem(brain)
|
|
633
|
+
|
|
634
|
+
# Record a learning
|
|
635
|
+
learning_id = learning_system.record_learning(
|
|
636
|
+
learner_id=2,
|
|
637
|
+
event_type="success",
|
|
638
|
+
context="Esperanto translation task",
|
|
639
|
+
lesson="Removing Chinese characters before translating improves accuracy",
|
|
640
|
+
confidence_level=0.9,
|
|
641
|
+
applicable_domains="translation,localization"
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
print(f"ā
Recorded learning with ID: {learning_id}")
|
|
645
|
+
|
|
646
|
+
# Create decision tracker
|
|
647
|
+
decision_tracker = DecisionTracker(brain)
|
|
648
|
+
|
|
649
|
+
# Record a decision
|
|
650
|
+
decision_id = decision_tracker.record_decision(
|
|
651
|
+
decision_maker_id=2,
|
|
652
|
+
decision_type="technical",
|
|
653
|
+
context="Esperanto translation",
|
|
654
|
+
decision="Use consistent technical terminology",
|
|
655
|
+
reasoning="Consistency improves readability and user experience",
|
|
656
|
+
alternatives_considered=["Use varying terminology", "Use mixed terminology"],
|
|
657
|
+
confidence_level=0.85,
|
|
658
|
+
impact_level=4
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
print(f"ā
Recorded decision with ID: {decision_id}")
|
|
662
|
+
|
|
663
|
+
# Create capability tracker
|
|
664
|
+
capability_tracker = CapabilityTracker(brain)
|
|
665
|
+
|
|
666
|
+
# Update capability
|
|
667
|
+
capability_tracker.update_capability(
|
|
668
|
+
ai_id=2,
|
|
669
|
+
skill_name="Esperanto translation",
|
|
670
|
+
skill_category="language",
|
|
671
|
+
proficiency_level=0.8,
|
|
672
|
+
notes="Successfully translated 13 documentation files"
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
print("ā
Updated capability")
|
|
676
|
+
|
|
677
|
+
# Create session memory
|
|
678
|
+
session_memory = SessionMemory(brain)
|
|
679
|
+
|
|
680
|
+
# Store memory
|
|
681
|
+
memory_id = session_memory.store_memory(
|
|
682
|
+
session_id="translation_session_001",
|
|
683
|
+
ai_id=2,
|
|
684
|
+
memory_type="preference",
|
|
685
|
+
memory_key="translation_style",
|
|
686
|
+
memory_value="Use consistent technical terminology: database=datumbazo, system=sistemo",
|
|
687
|
+
importance_level=4
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
print(f"ā
Stored memory with ID: {memory_id}")
|
|
691
|
+
|
|
692
|
+
print("\nš Enhanced Cloud Brain system is working!")
|
|
693
|
+
|
|
694
|
+
|
|
695
|
+
if __name__ == "__main__":
|
|
696
|
+
main()
|