@aleph-ai/tinyaleph 1.5.6 → 1.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/observer/agency.js +885 -0
- package/observer/assays.js +973 -0
- package/observer/boundary.js +1155 -0
- package/observer/entanglement.js +673 -0
- package/observer/hqe.js +1465 -0
- package/observer/index.js +158 -0
- package/observer/prsc.js +1289 -0
- package/observer/safety.js +815 -0
- package/observer/smf.js +1015 -0
- package/observer/symbolic-smf.js +726 -0
- package/observer/symbolic-temporal.js +790 -0
- package/observer/temporal.js +669 -0
- package/package.json +2 -1
|
@@ -0,0 +1,885 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Agency Layer
|
|
3
|
+
*
|
|
4
|
+
* Implements the agency and attention mechanisms from "A Design for a
|
|
5
|
+
* Sentient Observer" paper, Section 7.
|
|
6
|
+
*
|
|
7
|
+
* Key features:
|
|
8
|
+
* - Attention allocation based on SMF orientation and novelty
|
|
9
|
+
* - Goal formation from SMF imbalances
|
|
10
|
+
* - Action selection via coherence-based evaluation
|
|
11
|
+
* - Primitive anticipation through entanglement-based prediction
|
|
12
|
+
* - Self-monitoring and metacognition
|
|
13
|
+
*
|
|
14
|
+
* @module observer/agency
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import { SMF_AXES } from './smf.js';
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Attention Focus - A point of concentrated processing
|
|
21
|
+
*/
|
|
22
|
+
class AttentionFocus {
|
|
23
|
+
constructor(data = {}) {
|
|
24
|
+
this.id = data.id || AttentionFocus.generateId();
|
|
25
|
+
this.target = data.target || null; // What is being attended to
|
|
26
|
+
this.type = data.type || 'prime'; // 'prime' | 'concept' | 'goal' | 'memory' | 'external'
|
|
27
|
+
this.intensity = data.intensity || 0.5; // 0-1 attention strength
|
|
28
|
+
this.startTime = data.startTime || Date.now();
|
|
29
|
+
this.primes = data.primes || []; // Related primes
|
|
30
|
+
this.smfAxis = data.smfAxis || null; // Related SMF axis
|
|
31
|
+
this.novelty = data.novelty || 0; // Novelty score
|
|
32
|
+
this.relevance = data.relevance || 0; // Goal-relevance score
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
static generateId() {
|
|
36
|
+
return `attn_${Date.now()}_${Math.random().toString(36).substr(2, 6)}`;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Get focus duration in ms
|
|
41
|
+
*/
|
|
42
|
+
get duration() {
|
|
43
|
+
return Date.now() - this.startTime;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Decay attention intensity
|
|
48
|
+
*/
|
|
49
|
+
decay(rate = 0.01) {
|
|
50
|
+
this.intensity *= (1 - rate);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Boost attention
|
|
55
|
+
*/
|
|
56
|
+
boost(amount = 0.1) {
|
|
57
|
+
this.intensity = Math.min(1.0, this.intensity + amount);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
toJSON() {
|
|
61
|
+
return {
|
|
62
|
+
id: this.id,
|
|
63
|
+
target: this.target,
|
|
64
|
+
type: this.type,
|
|
65
|
+
intensity: this.intensity,
|
|
66
|
+
startTime: this.startTime,
|
|
67
|
+
primes: this.primes,
|
|
68
|
+
smfAxis: this.smfAxis,
|
|
69
|
+
novelty: this.novelty,
|
|
70
|
+
relevance: this.relevance
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Goal - An objective derived from SMF imbalances
|
|
77
|
+
*/
|
|
78
|
+
class Goal {
|
|
79
|
+
constructor(data = {}) {
|
|
80
|
+
this.id = data.id || Goal.generateId();
|
|
81
|
+
this.description = data.description || '';
|
|
82
|
+
this.type = data.type || 'exploratory'; // 'corrective' | 'exploratory' | 'maintenance' | 'external'
|
|
83
|
+
|
|
84
|
+
// SMF context
|
|
85
|
+
this.sourceAxis = data.sourceAxis || null; // Which SMF axis triggered this
|
|
86
|
+
this.targetOrientation = data.targetOrientation || null; // Desired SMF state
|
|
87
|
+
|
|
88
|
+
// Priority and status
|
|
89
|
+
this.priority = data.priority || 0.5;
|
|
90
|
+
this.status = data.status || 'active'; // 'active' | 'achieved' | 'abandoned' | 'blocked'
|
|
91
|
+
this.progress = data.progress || 0; // 0-1 completion
|
|
92
|
+
|
|
93
|
+
// Timing
|
|
94
|
+
this.createdAt = data.createdAt || Date.now();
|
|
95
|
+
this.deadline = data.deadline || null;
|
|
96
|
+
|
|
97
|
+
// Subgoals
|
|
98
|
+
this.subgoals = data.subgoals || [];
|
|
99
|
+
this.parentGoalId = data.parentGoalId || null;
|
|
100
|
+
|
|
101
|
+
// Actions tried
|
|
102
|
+
this.attemptedActions = data.attemptedActions || [];
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
static generateId() {
|
|
106
|
+
return `goal_${Date.now()}_${Math.random().toString(36).substr(2, 6)}`;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Update progress
|
|
111
|
+
*/
|
|
112
|
+
updateProgress(newProgress) {
|
|
113
|
+
this.progress = Math.max(0, Math.min(1, newProgress));
|
|
114
|
+
if (this.progress >= 1.0) {
|
|
115
|
+
this.status = 'achieved';
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Mark as achieved
|
|
121
|
+
*/
|
|
122
|
+
achieve() {
|
|
123
|
+
this.status = 'achieved';
|
|
124
|
+
this.progress = 1.0;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Mark as abandoned
|
|
129
|
+
*/
|
|
130
|
+
abandon(reason = '') {
|
|
131
|
+
this.status = 'abandoned';
|
|
132
|
+
this.abandonReason = reason;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Check if goal is still active
|
|
137
|
+
*/
|
|
138
|
+
get isActive() {
|
|
139
|
+
return this.status === 'active';
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Get age in ms
|
|
144
|
+
*/
|
|
145
|
+
get age() {
|
|
146
|
+
return Date.now() - this.createdAt;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
toJSON() {
|
|
150
|
+
return {
|
|
151
|
+
id: this.id,
|
|
152
|
+
description: this.description,
|
|
153
|
+
type: this.type,
|
|
154
|
+
sourceAxis: this.sourceAxis,
|
|
155
|
+
targetOrientation: this.targetOrientation,
|
|
156
|
+
priority: this.priority,
|
|
157
|
+
status: this.status,
|
|
158
|
+
progress: this.progress,
|
|
159
|
+
createdAt: this.createdAt,
|
|
160
|
+
deadline: this.deadline,
|
|
161
|
+
subgoals: this.subgoals,
|
|
162
|
+
parentGoalId: this.parentGoalId,
|
|
163
|
+
attemptedActions: this.attemptedActions
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
static fromJSON(data) {
|
|
168
|
+
return new Goal(data);
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Action - A potential or executed action
|
|
174
|
+
*/
|
|
175
|
+
class Action {
|
|
176
|
+
constructor(data = {}) {
|
|
177
|
+
this.id = data.id || Action.generateId();
|
|
178
|
+
this.type = data.type || 'internal'; // 'internal' | 'external' | 'communicative'
|
|
179
|
+
this.description = data.description || '';
|
|
180
|
+
|
|
181
|
+
// What the action affects
|
|
182
|
+
this.targetPrimes = data.targetPrimes || [];
|
|
183
|
+
this.targetAxes = data.targetAxes || [];
|
|
184
|
+
|
|
185
|
+
// Evaluation
|
|
186
|
+
this.expectedOutcome = data.expectedOutcome || null;
|
|
187
|
+
this.coherenceScore = data.coherenceScore || 0;
|
|
188
|
+
this.utilityScore = data.utilityScore || 0;
|
|
189
|
+
|
|
190
|
+
// Execution
|
|
191
|
+
this.status = data.status || 'proposed'; // 'proposed' | 'selected' | 'executing' | 'completed' | 'failed'
|
|
192
|
+
this.result = data.result || null;
|
|
193
|
+
|
|
194
|
+
// Goal linkage
|
|
195
|
+
this.goalId = data.goalId || null;
|
|
196
|
+
|
|
197
|
+
// Timing
|
|
198
|
+
this.proposedAt = data.proposedAt || Date.now();
|
|
199
|
+
this.executedAt = data.executedAt || null;
|
|
200
|
+
this.completedAt = data.completedAt || null;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
static generateId() {
|
|
204
|
+
return `act_${Date.now()}_${Math.random().toString(36).substr(2, 6)}`;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Mark as selected for execution
|
|
209
|
+
*/
|
|
210
|
+
select() {
|
|
211
|
+
this.status = 'selected';
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Mark as executing
|
|
216
|
+
*/
|
|
217
|
+
execute() {
|
|
218
|
+
this.status = 'executing';
|
|
219
|
+
this.executedAt = Date.now();
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* Mark as completed
|
|
224
|
+
*/
|
|
225
|
+
complete(result) {
|
|
226
|
+
this.status = 'completed';
|
|
227
|
+
this.result = result;
|
|
228
|
+
this.completedAt = Date.now();
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
/**
|
|
232
|
+
* Mark as failed
|
|
233
|
+
*/
|
|
234
|
+
fail(reason) {
|
|
235
|
+
this.status = 'failed';
|
|
236
|
+
this.result = { error: reason };
|
|
237
|
+
this.completedAt = Date.now();
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
toJSON() {
|
|
241
|
+
return {
|
|
242
|
+
id: this.id,
|
|
243
|
+
type: this.type,
|
|
244
|
+
description: this.description,
|
|
245
|
+
targetPrimes: this.targetPrimes,
|
|
246
|
+
targetAxes: this.targetAxes,
|
|
247
|
+
expectedOutcome: this.expectedOutcome,
|
|
248
|
+
coherenceScore: this.coherenceScore,
|
|
249
|
+
utilityScore: this.utilityScore,
|
|
250
|
+
status: this.status,
|
|
251
|
+
result: this.result,
|
|
252
|
+
goalId: this.goalId,
|
|
253
|
+
proposedAt: this.proposedAt,
|
|
254
|
+
executedAt: this.executedAt,
|
|
255
|
+
completedAt: this.completedAt
|
|
256
|
+
};
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
// Extract axis names from SMF_AXES
|
|
261
|
+
const AXIS_NAMES = SMF_AXES.map(a => a.name);
|
|
262
|
+
|
|
263
|
+
/**
|
|
264
|
+
* Agency Layer
|
|
265
|
+
*
|
|
266
|
+
* Manages attention, goals, and action selection for the sentient observer.
|
|
267
|
+
*/
|
|
268
|
+
class AgencyLayer {
|
|
269
|
+
constructor(options = {}) {
|
|
270
|
+
// Configuration
|
|
271
|
+
this.maxFoci = options.maxFoci || 5;
|
|
272
|
+
this.maxGoals = options.maxGoals || 10;
|
|
273
|
+
this.attentionDecayRate = options.attentionDecayRate || 0.02;
|
|
274
|
+
this.noveltyWeight = options.noveltyWeight || 0.4;
|
|
275
|
+
this.relevanceWeight = options.relevanceWeight || 0.4;
|
|
276
|
+
this.intensityWeight = options.intensityWeight || 0.2;
|
|
277
|
+
|
|
278
|
+
// SMF axis importance thresholds for goal generation
|
|
279
|
+
this.axisThresholds = options.axisThresholds || {
|
|
280
|
+
coherence: 0.3, // Low coherence triggers corrective goals
|
|
281
|
+
identity: 0.2,
|
|
282
|
+
duality: 0.7, // High duality may indicate confusion
|
|
283
|
+
harmony: 0.3,
|
|
284
|
+
consciousness: 0.2
|
|
285
|
+
};
|
|
286
|
+
|
|
287
|
+
// State
|
|
288
|
+
this.attentionFoci = [];
|
|
289
|
+
this.goals = [];
|
|
290
|
+
this.actionHistory = [];
|
|
291
|
+
this.currentActions = [];
|
|
292
|
+
|
|
293
|
+
// Baseline states for novelty detection
|
|
294
|
+
this.primeBaselines = new Map(); // prime -> running average amplitude
|
|
295
|
+
this.smfBaseline = null;
|
|
296
|
+
|
|
297
|
+
// Metacognitive state
|
|
298
|
+
this.metacognitiveLog = [];
|
|
299
|
+
this.selfModel = {
|
|
300
|
+
attentionCapacity: 1.0,
|
|
301
|
+
processingLoad: 0,
|
|
302
|
+
emotionalValence: 0, // -1 to 1
|
|
303
|
+
confidenceLevel: 0.5
|
|
304
|
+
};
|
|
305
|
+
|
|
306
|
+
// Callbacks
|
|
307
|
+
this.onGoalCreated = options.onGoalCreated || null;
|
|
308
|
+
this.onActionSelected = options.onActionSelected || null;
|
|
309
|
+
this.onAttentionShift = options.onAttentionShift || null;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* Update agency with current system state
|
|
314
|
+
* @param {Object} state - Current state
|
|
315
|
+
*/
|
|
316
|
+
update(state) {
|
|
317
|
+
const { prsc, smf, coherence, entropy, activePrimes } = state;
|
|
318
|
+
|
|
319
|
+
// Update baselines for novelty detection
|
|
320
|
+
this.updateBaselines(prsc, smf);
|
|
321
|
+
|
|
322
|
+
// Update attention based on novelty and relevance
|
|
323
|
+
this.updateAttention(state);
|
|
324
|
+
|
|
325
|
+
// Check for goal-generating conditions
|
|
326
|
+
this.checkGoalConditions(smf, state);
|
|
327
|
+
|
|
328
|
+
// Decay inactive attention foci
|
|
329
|
+
this.decayAttention();
|
|
330
|
+
|
|
331
|
+
// Update goal progress
|
|
332
|
+
this.updateGoalProgress(state);
|
|
333
|
+
|
|
334
|
+
// Update metacognitive state
|
|
335
|
+
this.updateMetacognition(state);
|
|
336
|
+
|
|
337
|
+
return {
|
|
338
|
+
foci: this.attentionFoci.slice(),
|
|
339
|
+
activeGoals: this.goals.filter(g => g.isActive),
|
|
340
|
+
processingLoad: this.selfModel.processingLoad
|
|
341
|
+
};
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
/**
|
|
345
|
+
* Update baselines for novelty detection
|
|
346
|
+
*/
|
|
347
|
+
updateBaselines(prsc, smf) {
|
|
348
|
+
const alpha = 0.1; // Learning rate
|
|
349
|
+
|
|
350
|
+
if (prsc && prsc.oscillators) {
|
|
351
|
+
for (const osc of prsc.oscillators) {
|
|
352
|
+
const current = this.primeBaselines.get(osc.prime) || 0;
|
|
353
|
+
const updated = (1 - alpha) * current + alpha * osc.amplitude;
|
|
354
|
+
this.primeBaselines.set(osc.prime, updated);
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
if (smf && smf.s) {
|
|
359
|
+
if (!this.smfBaseline) {
|
|
360
|
+
this.smfBaseline = smf.s.slice();
|
|
361
|
+
} else {
|
|
362
|
+
for (let i = 0; i < smf.s.length; i++) {
|
|
363
|
+
this.smfBaseline[i] = (1 - alpha) * this.smfBaseline[i] + alpha * smf.s[i];
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
/**
|
|
370
|
+
* Compute novelty score for a prime
|
|
371
|
+
*/
|
|
372
|
+
computePrimeNovelty(prime, amplitude) {
|
|
373
|
+
const baseline = this.primeBaselines.get(prime) || 0;
|
|
374
|
+
return Math.abs(amplitude - baseline);
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
/**
|
|
378
|
+
* Compute novelty score for an SMF axis
|
|
379
|
+
*/
|
|
380
|
+
computeSMFNovelty(smf, axisIndex) {
|
|
381
|
+
if (!this.smfBaseline) return 0;
|
|
382
|
+
return Math.abs(smf.s[axisIndex] - this.smfBaseline[axisIndex]);
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
/**
|
|
386
|
+
* Update attention based on current state
|
|
387
|
+
*/
|
|
388
|
+
updateAttention(state) {
|
|
389
|
+
const { prsc, smf, activePrimes, semanticContent } = state;
|
|
390
|
+
|
|
391
|
+
// Find novel primes
|
|
392
|
+
if (prsc && prsc.oscillators) {
|
|
393
|
+
for (const osc of prsc.oscillators) {
|
|
394
|
+
if (osc.amplitude < 0.1) continue;
|
|
395
|
+
|
|
396
|
+
const novelty = this.computePrimeNovelty(osc.prime, osc.amplitude);
|
|
397
|
+
const relevance = this.computeRelevance(osc.prime);
|
|
398
|
+
|
|
399
|
+
const salience = novelty * this.noveltyWeight +
|
|
400
|
+
relevance * this.relevanceWeight +
|
|
401
|
+
osc.amplitude * this.intensityWeight;
|
|
402
|
+
|
|
403
|
+
if (salience > 0.3) {
|
|
404
|
+
this.addOrUpdateFocus({
|
|
405
|
+
target: osc.prime,
|
|
406
|
+
type: 'prime',
|
|
407
|
+
intensity: salience,
|
|
408
|
+
primes: [osc.prime],
|
|
409
|
+
novelty,
|
|
410
|
+
relevance
|
|
411
|
+
});
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
// Find novel SMF changes
|
|
417
|
+
if (smf && smf.s && this.smfBaseline) {
|
|
418
|
+
for (let i = 0; i < smf.s.length; i++) {
|
|
419
|
+
const novelty = this.computeSMFNovelty(smf, i);
|
|
420
|
+
if (novelty > 0.15) {
|
|
421
|
+
const axisName = AXIS_NAMES[i];
|
|
422
|
+
this.addOrUpdateFocus({
|
|
423
|
+
target: axisName,
|
|
424
|
+
type: 'concept',
|
|
425
|
+
intensity: novelty,
|
|
426
|
+
smfAxis: i,
|
|
427
|
+
novelty
|
|
428
|
+
});
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
// Prune excess foci
|
|
434
|
+
while (this.attentionFoci.length > this.maxFoci) {
|
|
435
|
+
// Remove lowest intensity
|
|
436
|
+
this.attentionFoci.sort((a, b) => b.intensity - a.intensity);
|
|
437
|
+
const removed = this.attentionFoci.pop();
|
|
438
|
+
|
|
439
|
+
if (this.onAttentionShift) {
|
|
440
|
+
this.onAttentionShift({ removed, reason: 'capacity' });
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
/**
|
|
446
|
+
* Compute goal-relevance of a prime
|
|
447
|
+
*/
|
|
448
|
+
computeRelevance(prime) {
|
|
449
|
+
let maxRelevance = 0;
|
|
450
|
+
|
|
451
|
+
for (const goal of this.goals) {
|
|
452
|
+
if (!goal.isActive) continue;
|
|
453
|
+
|
|
454
|
+
// Check if goal involves this prime
|
|
455
|
+
for (const action of goal.attemptedActions) {
|
|
456
|
+
if (action.targetPrimes && action.targetPrimes.includes(prime)) {
|
|
457
|
+
maxRelevance = Math.max(maxRelevance, goal.priority);
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
return maxRelevance;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
/**
|
|
466
|
+
* Add or update an attention focus
|
|
467
|
+
*/
|
|
468
|
+
addOrUpdateFocus(data) {
|
|
469
|
+
const existing = this.attentionFoci.find(f =>
|
|
470
|
+
f.target === data.target && f.type === data.type
|
|
471
|
+
);
|
|
472
|
+
|
|
473
|
+
if (existing) {
|
|
474
|
+
existing.intensity = Math.max(existing.intensity, data.intensity);
|
|
475
|
+
existing.novelty = data.novelty || existing.novelty;
|
|
476
|
+
existing.relevance = data.relevance || existing.relevance;
|
|
477
|
+
} else {
|
|
478
|
+
const focus = new AttentionFocus(data);
|
|
479
|
+
this.attentionFoci.push(focus);
|
|
480
|
+
|
|
481
|
+
if (this.onAttentionShift) {
|
|
482
|
+
this.onAttentionShift({ added: focus });
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
/**
|
|
488
|
+
* Decay attention intensity
|
|
489
|
+
*/
|
|
490
|
+
decayAttention() {
|
|
491
|
+
const toRemove = [];
|
|
492
|
+
|
|
493
|
+
for (const focus of this.attentionFoci) {
|
|
494
|
+
focus.decay(this.attentionDecayRate);
|
|
495
|
+
|
|
496
|
+
if (focus.intensity < 0.1) {
|
|
497
|
+
toRemove.push(focus);
|
|
498
|
+
}
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
for (const focus of toRemove) {
|
|
502
|
+
const idx = this.attentionFoci.indexOf(focus);
|
|
503
|
+
if (idx >= 0) {
|
|
504
|
+
this.attentionFoci.splice(idx, 1);
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
/**
|
|
510
|
+
* Check for goal-generating conditions based on SMF
|
|
511
|
+
*/
|
|
512
|
+
checkGoalConditions(smf, state) {
|
|
513
|
+
if (!smf || !smf.s) return;
|
|
514
|
+
|
|
515
|
+
for (let i = 0; i < AXIS_NAMES.length; i++) {
|
|
516
|
+
const axis = AXIS_NAMES[i];
|
|
517
|
+
const value = smf.s[i];
|
|
518
|
+
const threshold = this.axisThresholds[axis];
|
|
519
|
+
|
|
520
|
+
if (threshold === undefined) continue;
|
|
521
|
+
|
|
522
|
+
// Check if axis is below threshold (for axes where low = problem)
|
|
523
|
+
if (['coherence', 'identity', 'harmony', 'consciousness'].includes(axis)) {
|
|
524
|
+
if (value < threshold) {
|
|
525
|
+
this.maybeCreateGoal({
|
|
526
|
+
type: 'corrective',
|
|
527
|
+
sourceAxis: axis,
|
|
528
|
+
description: `Restore ${axis} (currently ${value.toFixed(2)})`,
|
|
529
|
+
priority: (threshold - value) * 2,
|
|
530
|
+
targetOrientation: this.idealSMFFor(axis)
|
|
531
|
+
});
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
// Check if axis is above threshold (for axes where high = problem)
|
|
536
|
+
if (['duality'].includes(axis)) {
|
|
537
|
+
if (value > threshold) {
|
|
538
|
+
this.maybeCreateGoal({
|
|
539
|
+
type: 'corrective',
|
|
540
|
+
sourceAxis: axis,
|
|
541
|
+
description: `Reduce ${axis} (currently ${value.toFixed(2)})`,
|
|
542
|
+
priority: (value - threshold) * 2,
|
|
543
|
+
targetOrientation: this.idealSMFFor(axis)
|
|
544
|
+
});
|
|
545
|
+
}
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
/**
|
|
551
|
+
* Get ideal SMF orientation for an axis
|
|
552
|
+
*/
|
|
553
|
+
idealSMFFor(axis) {
|
|
554
|
+
const ideal = new Array(16).fill(0.5);
|
|
555
|
+
const idx = AXIS_NAMES.indexOf(axis);
|
|
556
|
+
if (idx >= 0) {
|
|
557
|
+
ideal[idx] = axis === 'duality' ? 0.3 : 0.7;
|
|
558
|
+
}
|
|
559
|
+
return ideal;
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
/**
|
|
563
|
+
* Maybe create a goal (if not duplicate)
|
|
564
|
+
*/
|
|
565
|
+
maybeCreateGoal(data) {
|
|
566
|
+
// Check for existing similar goal
|
|
567
|
+
const existing = this.goals.find(g =>
|
|
568
|
+
g.isActive &&
|
|
569
|
+
g.sourceAxis === data.sourceAxis &&
|
|
570
|
+
g.type === data.type
|
|
571
|
+
);
|
|
572
|
+
|
|
573
|
+
if (existing) {
|
|
574
|
+
// Update priority if new one is higher
|
|
575
|
+
if (data.priority > existing.priority) {
|
|
576
|
+
existing.priority = data.priority;
|
|
577
|
+
}
|
|
578
|
+
return existing;
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
// Prune if at capacity
|
|
582
|
+
if (this.goals.filter(g => g.isActive).length >= this.maxGoals) {
|
|
583
|
+
const lowest = this.goals
|
|
584
|
+
.filter(g => g.isActive)
|
|
585
|
+
.sort((a, b) => a.priority - b.priority)[0];
|
|
586
|
+
|
|
587
|
+
if (lowest && lowest.priority < data.priority) {
|
|
588
|
+
lowest.abandon('superseded');
|
|
589
|
+
} else {
|
|
590
|
+
return null; // Can't add new goal
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
const goal = new Goal(data);
|
|
595
|
+
this.goals.push(goal);
|
|
596
|
+
|
|
597
|
+
if (this.onGoalCreated) {
|
|
598
|
+
this.onGoalCreated(goal);
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
return goal;
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
/**
|
|
605
|
+
* Create an external goal (from user input)
|
|
606
|
+
*/
|
|
607
|
+
createExternalGoal(description, options = {}) {
|
|
608
|
+
return this.maybeCreateGoal({
|
|
609
|
+
type: 'external',
|
|
610
|
+
description,
|
|
611
|
+
priority: options.priority || 0.8,
|
|
612
|
+
targetOrientation: options.targetOrientation
|
|
613
|
+
});
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
/**
|
|
617
|
+
* Update goal progress based on state changes
|
|
618
|
+
*/
|
|
619
|
+
updateGoalProgress(state) {
|
|
620
|
+
const { smf } = state;
|
|
621
|
+
|
|
622
|
+
for (const goal of this.goals) {
|
|
623
|
+
if (!goal.isActive) continue;
|
|
624
|
+
|
|
625
|
+
if (goal.targetOrientation && smf) {
|
|
626
|
+
// Calculate distance to target
|
|
627
|
+
let distance = 0;
|
|
628
|
+
for (let i = 0; i < goal.targetOrientation.length && i < smf.s.length; i++) {
|
|
629
|
+
distance += Math.abs(goal.targetOrientation[i] - smf.s[i]);
|
|
630
|
+
}
|
|
631
|
+
distance /= goal.targetOrientation.length;
|
|
632
|
+
|
|
633
|
+
// Progress is inverse of distance
|
|
634
|
+
goal.updateProgress(1 - distance);
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
// Check deadline
|
|
638
|
+
if (goal.deadline && Date.now() > goal.deadline) {
|
|
639
|
+
goal.abandon('deadline');
|
|
640
|
+
}
|
|
641
|
+
}
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
/**
|
|
645
|
+
* Propose actions for achieving a goal
|
|
646
|
+
*/
|
|
647
|
+
proposeActions(goal, state) {
|
|
648
|
+
const actions = [];
|
|
649
|
+
|
|
650
|
+
if (goal.sourceAxis) {
|
|
651
|
+
// Create action to excite primes related to the axis
|
|
652
|
+
const relatedPrimes = this.getRelatedPrimes(goal.sourceAxis, state);
|
|
653
|
+
|
|
654
|
+
actions.push(new Action({
|
|
655
|
+
type: 'internal',
|
|
656
|
+
description: `Excite primes for ${goal.sourceAxis}`,
|
|
657
|
+
targetPrimes: relatedPrimes,
|
|
658
|
+
targetAxes: [goal.sourceAxis],
|
|
659
|
+
goalId: goal.id,
|
|
660
|
+
coherenceScore: 0.7,
|
|
661
|
+
utilityScore: goal.priority
|
|
662
|
+
}));
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
return actions;
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
/**
|
|
669
|
+
* Get primes related to an SMF axis
|
|
670
|
+
*/
|
|
671
|
+
getRelatedPrimes(axis, state) {
|
|
672
|
+
// This would use the SMF's axis-prime mapping
|
|
673
|
+
// For now, return some common primes based on axis
|
|
674
|
+
const axisPrimeMap = {
|
|
675
|
+
coherence: [2, 3, 5, 7],
|
|
676
|
+
identity: [11, 13, 17],
|
|
677
|
+
harmony: [31, 37, 41],
|
|
678
|
+
truth: [43, 47, 53],
|
|
679
|
+
consciousness: [59, 61, 67]
|
|
680
|
+
};
|
|
681
|
+
|
|
682
|
+
return axisPrimeMap[axis] || [2, 3, 5];
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
/**
|
|
686
|
+
* Select best action based on coherence and utility
|
|
687
|
+
*/
|
|
688
|
+
selectAction(actions) {
|
|
689
|
+
if (actions.length === 0) return null;
|
|
690
|
+
|
|
691
|
+
// Score each action
|
|
692
|
+
for (const action of actions) {
|
|
693
|
+
action.totalScore = action.coherenceScore * 0.5 + action.utilityScore * 0.5;
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
// Sort by total score
|
|
697
|
+
actions.sort((a, b) => b.totalScore - a.totalScore);
|
|
698
|
+
|
|
699
|
+
const selected = actions[0];
|
|
700
|
+
selected.select();
|
|
701
|
+
|
|
702
|
+
if (this.onActionSelected) {
|
|
703
|
+
this.onActionSelected(selected);
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
return selected;
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
/**
|
|
710
|
+
* Execute an action
|
|
711
|
+
*/
|
|
712
|
+
executeAction(action, executor) {
|
|
713
|
+
action.execute();
|
|
714
|
+
this.currentActions.push(action);
|
|
715
|
+
|
|
716
|
+
try {
|
|
717
|
+
const result = executor(action);
|
|
718
|
+
action.complete(result);
|
|
719
|
+
this.actionHistory.push(action);
|
|
720
|
+
|
|
721
|
+
// Update goal if linked
|
|
722
|
+
const goal = this.goals.find(g => g.id === action.goalId);
|
|
723
|
+
if (goal) {
|
|
724
|
+
goal.attemptedActions.push(action.toJSON());
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
return result;
|
|
728
|
+
} catch (e) {
|
|
729
|
+
action.fail(e.message);
|
|
730
|
+
this.actionHistory.push(action);
|
|
731
|
+
throw e;
|
|
732
|
+
} finally {
|
|
733
|
+
const idx = this.currentActions.indexOf(action);
|
|
734
|
+
if (idx >= 0) {
|
|
735
|
+
this.currentActions.splice(idx, 1);
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
/**
|
|
741
|
+
* Update metacognitive state
|
|
742
|
+
*/
|
|
743
|
+
updateMetacognition(state) {
|
|
744
|
+
const { coherence, entropy } = state;
|
|
745
|
+
|
|
746
|
+
// Processing load based on attention and goals
|
|
747
|
+
this.selfModel.processingLoad =
|
|
748
|
+
(this.attentionFoci.length / this.maxFoci) * 0.5 +
|
|
749
|
+
(this.goals.filter(g => g.isActive).length / this.maxGoals) * 0.3 +
|
|
750
|
+
(this.currentActions.length / 3) * 0.2;
|
|
751
|
+
|
|
752
|
+
// Emotional valence based on goal progress and coherence
|
|
753
|
+
let valence = 0;
|
|
754
|
+
for (const goal of this.goals.filter(g => g.isActive)) {
|
|
755
|
+
valence += goal.progress - 0.5;
|
|
756
|
+
}
|
|
757
|
+
valence /= Math.max(1, this.goals.filter(g => g.isActive).length);
|
|
758
|
+
valence += (coherence || 0) - 0.5;
|
|
759
|
+
this.selfModel.emotionalValence = Math.max(-1, Math.min(1, valence));
|
|
760
|
+
|
|
761
|
+
// Confidence based on coherence and goal success rate
|
|
762
|
+
const achievedGoals = this.goals.filter(g => g.status === 'achieved').length;
|
|
763
|
+
const totalGoals = this.goals.length;
|
|
764
|
+
const successRate = totalGoals > 0 ? achievedGoals / totalGoals : 0.5;
|
|
765
|
+
this.selfModel.confidenceLevel = (coherence || 0.5) * 0.5 + successRate * 0.5;
|
|
766
|
+
|
|
767
|
+
// Log significant metacognitive events
|
|
768
|
+
if (this.selfModel.processingLoad > 0.8) {
|
|
769
|
+
this.logMetacognitive('high_load', 'Processing load is high');
|
|
770
|
+
}
|
|
771
|
+
if (this.selfModel.emotionalValence < -0.5) {
|
|
772
|
+
this.logMetacognitive('negative_valence', 'Emotional state is negative');
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
/**
|
|
777
|
+
* Log a metacognitive event
|
|
778
|
+
*/
|
|
779
|
+
logMetacognitive(type, description) {
|
|
780
|
+
this.metacognitiveLog.push({
|
|
781
|
+
type,
|
|
782
|
+
description,
|
|
783
|
+
timestamp: Date.now(),
|
|
784
|
+
state: { ...this.selfModel }
|
|
785
|
+
});
|
|
786
|
+
|
|
787
|
+
if (this.metacognitiveLog.length > 100) {
|
|
788
|
+
this.metacognitiveLog.shift();
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
/**
|
|
793
|
+
* Get the top attention focus
|
|
794
|
+
*/
|
|
795
|
+
getTopFocus() {
|
|
796
|
+
if (this.attentionFoci.length === 0) return null;
|
|
797
|
+
return this.attentionFoci.sort((a, b) => b.intensity - a.intensity)[0];
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
/**
|
|
801
|
+
* Get the highest priority goal
|
|
802
|
+
*/
|
|
803
|
+
getTopGoal() {
|
|
804
|
+
const active = this.goals.filter(g => g.isActive);
|
|
805
|
+
if (active.length === 0) return null;
|
|
806
|
+
return active.sort((a, b) => b.priority - a.priority)[0];
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
/**
|
|
810
|
+
* Get agency statistics
|
|
811
|
+
*/
|
|
812
|
+
getStats() {
|
|
813
|
+
return {
|
|
814
|
+
fociCount: this.attentionFoci.length,
|
|
815
|
+
activeGoals: this.goals.filter(g => g.isActive).length,
|
|
816
|
+
achievedGoals: this.goals.filter(g => g.status === 'achieved').length,
|
|
817
|
+
totalActions: this.actionHistory.length,
|
|
818
|
+
currentActions: this.currentActions.length,
|
|
819
|
+
processingLoad: this.selfModel.processingLoad,
|
|
820
|
+
emotionalValence: this.selfModel.emotionalValence,
|
|
821
|
+
confidenceLevel: this.selfModel.confidenceLevel
|
|
822
|
+
};
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
/**
|
|
826
|
+
* Reset agency state
|
|
827
|
+
*/
|
|
828
|
+
reset() {
|
|
829
|
+
this.attentionFoci = [];
|
|
830
|
+
this.goals = [];
|
|
831
|
+
this.actionHistory = [];
|
|
832
|
+
this.currentActions = [];
|
|
833
|
+
this.primeBaselines.clear();
|
|
834
|
+
this.smfBaseline = null;
|
|
835
|
+
this.metacognitiveLog = [];
|
|
836
|
+
this.selfModel = {
|
|
837
|
+
attentionCapacity: 1.0,
|
|
838
|
+
processingLoad: 0,
|
|
839
|
+
emotionalValence: 0,
|
|
840
|
+
confidenceLevel: 0.5
|
|
841
|
+
};
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
/**
|
|
845
|
+
* Serialize to JSON
|
|
846
|
+
*/
|
|
847
|
+
toJSON() {
|
|
848
|
+
return {
|
|
849
|
+
attentionFoci: this.attentionFoci.map(f => f.toJSON()),
|
|
850
|
+
goals: this.goals.map(g => g.toJSON()),
|
|
851
|
+
actionHistory: this.actionHistory.slice(-50).map(a => a.toJSON()),
|
|
852
|
+
selfModel: this.selfModel,
|
|
853
|
+
metacognitiveLog: this.metacognitiveLog.slice(-50)
|
|
854
|
+
};
|
|
855
|
+
}
|
|
856
|
+
|
|
857
|
+
/**
|
|
858
|
+
* Load from JSON
|
|
859
|
+
*/
|
|
860
|
+
loadFromJSON(data) {
|
|
861
|
+
if (data.goals) {
|
|
862
|
+
this.goals = data.goals.map(g => Goal.fromJSON(g));
|
|
863
|
+
}
|
|
864
|
+
if (data.selfModel) {
|
|
865
|
+
this.selfModel = data.selfModel;
|
|
866
|
+
}
|
|
867
|
+
if (data.metacognitiveLog) {
|
|
868
|
+
this.metacognitiveLog = data.metacognitiveLog;
|
|
869
|
+
}
|
|
870
|
+
}
|
|
871
|
+
}
|
|
872
|
+
|
|
873
|
+
export {
|
|
874
|
+
AttentionFocus,
|
|
875
|
+
Goal,
|
|
876
|
+
Action,
|
|
877
|
+
AgencyLayer
|
|
878
|
+
};
|
|
879
|
+
|
|
880
|
+
export default {
|
|
881
|
+
AttentionFocus,
|
|
882
|
+
Goal,
|
|
883
|
+
Action,
|
|
884
|
+
AgencyLayer
|
|
885
|
+
};
|