@gotza02/seq-thinking 1.1.2 → 1.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +42 -4
- package/data/agents/1770106504306-dljh9ef.json +68 -0
- package/data/agents/1770106504310-4oarrst.json +58 -0
- package/data/agents/1770106540588-pvitt55.json +68 -0
- package/data/agents/1770106540595-z2ya871.json +58 -0
- package/data/agents/1770106710890-0e2naq1.json +68 -0
- package/data/agents/1770106710893-r076yxx.json +58 -0
- package/data/agents/1770109212161-4ybd0i7.json +68 -0
- package/data/agents/1770109212166-gkhya8h.json +58 -0
- package/data/sessions/1770100622009-5afiuyv.json +499 -0
- package/data/sessions/1770106504312-75zk750.json +107 -0
- package/data/sessions/1770106540597-z8e8soo.json +150 -0
- package/data/sessions/1770106710894-0kxgy5x.json +150 -0
- package/data/sessions/1770109212169-zpddeb9.json +150 -0
- package/dist/__tests__/sequential-thinking.test.js +21 -21
- package/dist/__tests__/sequential-thinking.test.js.map +1 -1
- package/dist/agents/base-agent.d.ts +1 -0
- package/dist/agents/base-agent.d.ts.map +1 -1
- package/dist/agents/base-agent.js +5 -3
- package/dist/agents/base-agent.js.map +1 -1
- package/dist/agents/meta-reasoning-agent.d.ts +3 -54
- package/dist/agents/meta-reasoning-agent.d.ts.map +1 -1
- package/dist/agents/meta-reasoning-agent.js +35 -328
- package/dist/agents/meta-reasoning-agent.js.map +1 -1
- package/dist/agents/synthesizer-agent.d.ts +3 -17
- package/dist/agents/synthesizer-agent.d.ts.map +1 -1
- package/dist/agents/synthesizer-agent.js +41 -139
- package/dist/agents/synthesizer-agent.js.map +1 -1
- package/dist/mcp-server.d.ts.map +1 -1
- package/dist/mcp-server.js +9 -6
- package/dist/mcp-server.js.map +1 -1
- package/dist/real_world_test.d.ts +2 -0
- package/dist/real_world_test.d.ts.map +1 -0
- package/dist/real_world_test.js +78 -0
- package/dist/real_world_test.js.map +1 -0
- package/dist/sequential-thinking.d.ts +5 -5
- package/dist/sequential-thinking.d.ts.map +1 -1
- package/dist/sequential-thinking.js +68 -32
- package/dist/sequential-thinking.js.map +1 -1
- package/dist/swarm-coordinator.d.ts +1 -1
- package/dist/swarm-coordinator.d.ts.map +1 -1
- package/dist/swarm-coordinator.js +39 -13
- package/dist/swarm-coordinator.js.map +1 -1
- package/dist/utils/llm-adapter.d.ts +2 -2
- package/dist/utils/llm-adapter.d.ts.map +1 -1
- package/dist/utils/llm-adapter.js +57 -33
- package/dist/utils/llm-adapter.js.map +1 -1
- package/dist/utils/logger.d.ts +20 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +49 -0
- package/dist/utils/logger.js.map +1 -0
- package/dist/utils/persistence.d.ts.map +1 -1
- package/dist/utils/persistence.js +4 -3
- package/dist/utils/persistence.js.map +1 -1
- package/package.json +1 -1
- package/real_world_test.log +200 -0
- package/real_world_test_dynamic.log +184 -0
- package/real_world_test_real.log +184 -0
- package/src/__tests__/sequential-thinking.test.ts +21 -21
- package/src/agents/base-agent.ts +6 -3
- package/src/agents/meta-reasoning-agent.ts +38 -397
- package/src/agents/synthesizer-agent.ts +48 -165
- package/src/mcp-server.ts +9 -6
- package/src/real_world_test.ts +89 -0
- package/src/sequential-thinking.ts +87 -33
- package/src/swarm-coordinator.ts +41 -13
- package/src/utils/llm-adapter.ts +66 -32
- package/src/utils/logger.ts +56 -0
- package/src/utils/persistence.ts +4 -3
|
@@ -292,9 +292,9 @@ describe('MetaReasoningEngine', () => {
|
|
|
292
292
|
graph = new ThoughtGraph();
|
|
293
293
|
});
|
|
294
294
|
|
|
295
|
-
test('should reflect on session', () => {
|
|
295
|
+
test('should reflect on session', async () => {
|
|
296
296
|
const session = createMockSession();
|
|
297
|
-
const reflection = engine.reflect(session);
|
|
297
|
+
const reflection = await engine.reflect(session);
|
|
298
298
|
|
|
299
299
|
assert.ok(reflection.reflectionType);
|
|
300
300
|
assert.ok(reflection.content);
|
|
@@ -318,18 +318,18 @@ describe('MetaReasoningEngine', () => {
|
|
|
318
318
|
}
|
|
319
319
|
});
|
|
320
320
|
|
|
321
|
-
test('should handle empty session reflection', () => {
|
|
321
|
+
test('should handle empty session reflection', async () => {
|
|
322
322
|
const emptySession = createMockSession({ totalThoughts: 0, totalBranches: 0 });
|
|
323
|
-
const reflection = engine.reflect(emptySession);
|
|
323
|
+
const reflection = await engine.reflect(emptySession);
|
|
324
324
|
|
|
325
325
|
assert.ok(reflection);
|
|
326
326
|
assert.ok(Array.isArray(reflection.insights));
|
|
327
327
|
assert.ok(Array.isArray(reflection.recommendations));
|
|
328
328
|
});
|
|
329
329
|
|
|
330
|
-
test('should handle session with multiple branches', () => {
|
|
330
|
+
test('should handle session with multiple branches', async () => {
|
|
331
331
|
const session = createMockSession({ totalThoughts: 10, totalBranches: 3 });
|
|
332
|
-
const reflection = engine.reflect(session);
|
|
332
|
+
const reflection = await engine.reflect(session);
|
|
333
333
|
|
|
334
334
|
assert.ok(reflection.insights.length > 0 || reflection.recommendations.length > 0);
|
|
335
335
|
});
|
|
@@ -344,11 +344,11 @@ describe('SelfCorrectionEngine', () => {
|
|
|
344
344
|
graph = new ThoughtGraph();
|
|
345
345
|
});
|
|
346
346
|
|
|
347
|
-
test('should detect issues in thought', () => {
|
|
347
|
+
test('should detect issues in thought', async () => {
|
|
348
348
|
const sessionId = 'test-session';
|
|
349
349
|
const thought = graph.createThought(sessionId, 'Test thought with potential issues');
|
|
350
350
|
|
|
351
|
-
const issues = engine.detectIssues(thought, graph);
|
|
351
|
+
const issues = await engine.detectIssues(thought, graph);
|
|
352
352
|
|
|
353
353
|
assert.ok(Array.isArray(issues));
|
|
354
354
|
issues.forEach(issue => {
|
|
@@ -359,14 +359,14 @@ describe('SelfCorrectionEngine', () => {
|
|
|
359
359
|
});
|
|
360
360
|
});
|
|
361
361
|
|
|
362
|
-
test('should detect no issues in well-formed thought', () => {
|
|
362
|
+
test('should detect no issues in well-formed thought', async () => {
|
|
363
363
|
const sessionId = 'test-session';
|
|
364
364
|
const thought = graph.createThought(sessionId, 'Well-formed thought with clear reasoning', {
|
|
365
365
|
confidence: 0.95,
|
|
366
366
|
assumptions: ['valid-assumption'],
|
|
367
367
|
});
|
|
368
368
|
|
|
369
|
-
const issues = engine.detectIssues(thought, graph);
|
|
369
|
+
const issues = await engine.detectIssues(thought, graph);
|
|
370
370
|
|
|
371
371
|
// Should either return empty array or only low severity issues
|
|
372
372
|
issues.forEach(issue => {
|
|
@@ -418,30 +418,30 @@ describe('AdaptiveGranularityEngine', () => {
|
|
|
418
418
|
engine = new AdaptiveGranularityEngine();
|
|
419
419
|
});
|
|
420
420
|
|
|
421
|
-
test('should calculate granularity for simple topic', () => {
|
|
421
|
+
test('should calculate granularity for simple topic', async () => {
|
|
422
422
|
const context = createMockContext();
|
|
423
|
-
const result = engine.calculateGranularity('Simple topic', context, 1);
|
|
423
|
+
const result = await engine.calculateGranularity('Simple topic', context, 1);
|
|
424
424
|
|
|
425
425
|
assert.ok(typeof result.detail === 'number');
|
|
426
426
|
assert.ok(result.detail > 0 && result.detail <= 1);
|
|
427
427
|
assert.ok(result.reasoning);
|
|
428
428
|
});
|
|
429
429
|
|
|
430
|
-
test('should calculate granularity for complex topic', () => {
|
|
430
|
+
test('should calculate granularity for complex topic', async () => {
|
|
431
431
|
const context = createMockContext();
|
|
432
432
|
const complexTopic = 'Analyze the implications of quantum computing on cryptographic systems';
|
|
433
|
-
const result = engine.calculateGranularity(complexTopic, context, 1);
|
|
433
|
+
const result = await engine.calculateGranularity(complexTopic, context, 1);
|
|
434
434
|
|
|
435
435
|
assert.ok(result.detail > 0);
|
|
436
436
|
assert.ok(result.reasoning.length > 0);
|
|
437
437
|
});
|
|
438
438
|
|
|
439
|
-
test('should adjust granularity based on depth', () => {
|
|
439
|
+
test('should adjust granularity based on depth', async () => {
|
|
440
440
|
const context = createMockContext();
|
|
441
441
|
const topic = 'Test topic';
|
|
442
442
|
|
|
443
|
-
const shallow = engine.calculateGranularity(topic, context, 1);
|
|
444
|
-
const deep = engine.calculateGranularity(topic, context, 10);
|
|
443
|
+
const shallow = await engine.calculateGranularity(topic, context, 1);
|
|
444
|
+
const deep = await engine.calculateGranularity(topic, context, 10);
|
|
445
445
|
|
|
446
446
|
// Deeper depth might suggest different granularity
|
|
447
447
|
assert.ok(typeof shallow.detail === 'number');
|
|
@@ -458,7 +458,7 @@ describe('AdaptiveGranularityEngine', () => {
|
|
|
458
458
|
assert.ok(recommendation.reasoning);
|
|
459
459
|
});
|
|
460
460
|
|
|
461
|
-
test('should handle empty context', () => {
|
|
461
|
+
test('should handle empty context', async () => {
|
|
462
462
|
const emptyContext: SessionContext = {
|
|
463
463
|
originalProblem: '',
|
|
464
464
|
constraints: [],
|
|
@@ -467,13 +467,13 @@ describe('AdaptiveGranularityEngine', () => {
|
|
|
467
467
|
longTermReferences: [],
|
|
468
468
|
};
|
|
469
469
|
|
|
470
|
-
const result = engine.calculateGranularity('Topic', emptyContext, 1);
|
|
470
|
+
const result = await engine.calculateGranularity('Topic', emptyContext, 1);
|
|
471
471
|
|
|
472
472
|
assert.ok(typeof result.detail === 'number');
|
|
473
473
|
assert.ok(result.reasoning);
|
|
474
474
|
});
|
|
475
475
|
|
|
476
|
-
test('should handle very complex context', () => {
|
|
476
|
+
test('should handle very complex context', async () => {
|
|
477
477
|
const complexContext: SessionContext = {
|
|
478
478
|
originalProblem: 'Complex multi-faceted problem',
|
|
479
479
|
constraints: Array(20).fill('constraint'),
|
|
@@ -482,7 +482,7 @@ describe('AdaptiveGranularityEngine', () => {
|
|
|
482
482
|
longTermReferences: Array(10).fill('ref'),
|
|
483
483
|
};
|
|
484
484
|
|
|
485
|
-
const result = engine.calculateGranularity('Complex topic', complexContext, 5);
|
|
485
|
+
const result = await engine.calculateGranularity('Complex topic', complexContext, 5);
|
|
486
486
|
|
|
487
487
|
assert.ok(typeof result.detail === 'number');
|
|
488
488
|
assert.ok(result.detail > 0 && result.detail <= 1);
|
package/src/agents/base-agent.ts
CHANGED
|
@@ -13,6 +13,7 @@ import {
|
|
|
13
13
|
type TaskResult,
|
|
14
14
|
type SwarmMessage
|
|
15
15
|
} from '../types/index.js';
|
|
16
|
+
import { Logger } from '../utils/logger.js';
|
|
16
17
|
|
|
17
18
|
/**
|
|
18
19
|
* Abstract base class for all agents
|
|
@@ -142,11 +143,11 @@ export abstract class BaseAgent {
|
|
|
142
143
|
const result = handler(message);
|
|
143
144
|
if (result instanceof Promise) {
|
|
144
145
|
result.catch(error => {
|
|
145
|
-
|
|
146
|
+
Logger.error(`Error in message handler for agent ${this.id}`, { error });
|
|
146
147
|
});
|
|
147
148
|
}
|
|
148
149
|
} catch (error) {
|
|
149
|
-
|
|
150
|
+
Logger.error(`Error in message handler for agent ${this.id}`, { error });
|
|
150
151
|
}
|
|
151
152
|
}
|
|
152
153
|
}
|
|
@@ -254,6 +255,7 @@ export abstract class BaseAgent {
|
|
|
254
255
|
tokensUsed?: number;
|
|
255
256
|
reasoningSteps?: number;
|
|
256
257
|
intermediateResults?: unknown[];
|
|
258
|
+
error?: string;
|
|
257
259
|
} = {}
|
|
258
260
|
): TaskResult {
|
|
259
261
|
return {
|
|
@@ -266,7 +268,8 @@ export abstract class BaseAgent {
|
|
|
266
268
|
metadata: {
|
|
267
269
|
tokensUsed: metadata.tokensUsed || 0,
|
|
268
270
|
reasoningSteps: metadata.reasoningSteps || 1,
|
|
269
|
-
intermediateResults: metadata.intermediateResults || []
|
|
271
|
+
intermediateResults: metadata.intermediateResults || [],
|
|
272
|
+
error: metadata.error
|
|
270
273
|
}
|
|
271
274
|
};
|
|
272
275
|
}
|
|
@@ -14,6 +14,7 @@ import {
|
|
|
14
14
|
type Thought,
|
|
15
15
|
type ThinkingSession
|
|
16
16
|
} from '../types/index.js';
|
|
17
|
+
import { LLMAdapter } from '../utils/llm-adapter.js';
|
|
17
18
|
|
|
18
19
|
/**
|
|
19
20
|
* Meta-observation
|
|
@@ -27,17 +28,7 @@ interface MetaObservation {
|
|
|
27
28
|
}
|
|
28
29
|
|
|
29
30
|
/**
|
|
30
|
-
*
|
|
31
|
-
*/
|
|
32
|
-
interface ThinkingPattern {
|
|
33
|
-
pattern: string;
|
|
34
|
-
frequency: number;
|
|
35
|
-
impact: 'positive' | 'negative' | 'neutral';
|
|
36
|
-
suggestion?: string;
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
/**
|
|
40
|
-
* Meta-reasoning agent that reflects on thinking processes
|
|
31
|
+
* Meta-reasoning agent that reflects on thinking processes using LLM
|
|
41
32
|
*/
|
|
42
33
|
export class MetaReasoningAgent extends BaseAgent {
|
|
43
34
|
/** Observation history */
|
|
@@ -54,26 +45,26 @@ export class MetaReasoningAgent extends BaseAgent {
|
|
|
54
45
|
const defaultCapabilities: AgentCapability[] = [
|
|
55
46
|
{
|
|
56
47
|
name: 'process_reflection',
|
|
57
|
-
description: 'Reflect on thinking processes',
|
|
58
|
-
confidence: 0.
|
|
48
|
+
description: 'Reflect on thinking processes using LLM',
|
|
49
|
+
confidence: 0.95,
|
|
59
50
|
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
60
51
|
},
|
|
61
52
|
{
|
|
62
53
|
name: 'strategy_assessment',
|
|
63
|
-
description: 'Assess reasoning strategies',
|
|
64
|
-
confidence: 0.
|
|
54
|
+
description: 'Assess reasoning strategies using LLM',
|
|
55
|
+
confidence: 0.9,
|
|
65
56
|
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
66
57
|
},
|
|
67
58
|
{
|
|
68
59
|
name: 'bias_detection',
|
|
69
|
-
description: 'Detect cognitive biases',
|
|
70
|
-
confidence: 0.
|
|
60
|
+
description: 'Detect cognitive biases using LLM',
|
|
61
|
+
confidence: 0.85,
|
|
71
62
|
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
72
63
|
},
|
|
73
64
|
{
|
|
74
65
|
name: 'pattern_analysis',
|
|
75
|
-
description: 'Analyze thinking patterns',
|
|
76
|
-
confidence: 0.
|
|
66
|
+
description: 'Analyze thinking patterns using LLM',
|
|
67
|
+
confidence: 0.9,
|
|
77
68
|
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
78
69
|
}
|
|
79
70
|
];
|
|
@@ -104,7 +95,7 @@ export class MetaReasoningAgent extends BaseAgent {
|
|
|
104
95
|
}
|
|
105
96
|
|
|
106
97
|
/**
|
|
107
|
-
* Process a task - perform meta-reasoning
|
|
98
|
+
* Process a task - perform meta-reasoning using LLM
|
|
108
99
|
* @param task - Task to process
|
|
109
100
|
* @returns Task result
|
|
110
101
|
*/
|
|
@@ -116,30 +107,48 @@ export class MetaReasoningAgent extends BaseAgent {
|
|
|
116
107
|
operation: 'reflect' | 'analyze_patterns' | 'assess_strategy' | 'detect_bias';
|
|
117
108
|
};
|
|
118
109
|
|
|
119
|
-
|
|
120
|
-
|
|
110
|
+
const provider = (task.context as any)?.modelProvider;
|
|
111
|
+
|
|
112
|
+
let prompt = '';
|
|
113
|
+
let systemPrompt = 'You are a meta-reasoning agent. Your role is to analyze and reflect on the thinking processes of other agents or sessions, identifying patterns, biases, and areas for improvement.';
|
|
114
|
+
|
|
115
|
+
const sessionData = input.session ? `Session Topic: ${input.session.topic}\nSession Status: ${input.session.status}\n` : '';
|
|
116
|
+
const thoughtsData = (input.thoughts || (input.session ? Array.from(input.session.thoughts.values()) : []))
|
|
117
|
+
.map(t => `Step ${t.stepNumber} (${t.thoughtType}, Confidence: ${t.confidence.overall}):\n${t.content}`).join('\n\n');
|
|
121
118
|
|
|
122
119
|
switch (input.operation) {
|
|
123
120
|
case 'reflect':
|
|
124
|
-
|
|
121
|
+
prompt = `Perform a comprehensive reflection on this thinking session. What are the key insights? What has been achieved? What is still missing?\n\n${sessionData}${thoughtsData}`;
|
|
125
122
|
break;
|
|
126
123
|
case 'analyze_patterns':
|
|
127
|
-
|
|
124
|
+
prompt = `Analyze the thinking patterns in these thoughts. Are there repetitive cycles? Is there a logical progression? Are there any gaps in the reasoning chain?\n\n${thoughtsData}`;
|
|
128
125
|
break;
|
|
129
126
|
case 'assess_strategy':
|
|
130
|
-
|
|
127
|
+
prompt = `Assess the reasoning strategy used in this session. Is it effective for the topic? Should the strategy be adjusted (e.g., move from analysis to synthesis)?\n\n${sessionData}${thoughtsData}`;
|
|
131
128
|
break;
|
|
132
129
|
case 'detect_bias':
|
|
133
|
-
|
|
130
|
+
prompt = `Identify any cognitive biases present in these thoughts (e.g., confirmation bias, anchoring, overconfidence). Provide evidence from the text for each detected bias.\n\n${thoughtsData}`;
|
|
131
|
+
systemPrompt = 'You are an expert in cognitive psychology and bias detection. Your role is to identify subtle biases in reasoning processes.';
|
|
134
132
|
break;
|
|
135
133
|
default:
|
|
136
|
-
|
|
134
|
+
prompt = `Reflect on the following thoughts and provide meta-observations.\n\n${thoughtsData}`;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
const response = await LLMAdapter.call(prompt, systemPrompt, provider);
|
|
138
|
+
|
|
139
|
+
if (response.error) {
|
|
140
|
+
// Fallback to simple reflection if LLM fails
|
|
141
|
+
return this.createTaskResult(task.id, { error: response.error, fallback: 'LLM failed' }, 0, Date.now() - startTime);
|
|
137
142
|
}
|
|
138
143
|
|
|
139
144
|
return this.createTaskResult(
|
|
140
145
|
task.id,
|
|
141
|
-
|
|
142
|
-
|
|
146
|
+
{
|
|
147
|
+
analysis: response.content,
|
|
148
|
+
operation: input.operation,
|
|
149
|
+
timestamp: new Date()
|
|
150
|
+
},
|
|
151
|
+
0.9,
|
|
143
152
|
Date.now() - startTime,
|
|
144
153
|
{
|
|
145
154
|
reasoningSteps: 1,
|
|
@@ -148,374 +157,6 @@ export class MetaReasoningAgent extends BaseAgent {
|
|
|
148
157
|
);
|
|
149
158
|
}
|
|
150
159
|
|
|
151
|
-
/**
|
|
152
|
-
* Reflect on a thinking session
|
|
153
|
-
* @param session - Session to reflect on
|
|
154
|
-
* @returns Reflection result
|
|
155
|
-
*/
|
|
156
|
-
reflect(session?: ThinkingSession): {
|
|
157
|
-
reflection: string;
|
|
158
|
-
insights: string[];
|
|
159
|
-
recommendations: string[];
|
|
160
|
-
observations: MetaObservation[];
|
|
161
|
-
} {
|
|
162
|
-
if (!session) {
|
|
163
|
-
return {
|
|
164
|
-
reflection: 'No session provided for reflection',
|
|
165
|
-
insights: [],
|
|
166
|
-
recommendations: [],
|
|
167
|
-
observations: []
|
|
168
|
-
};
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
const thoughts = Array.from(session.thoughts.values());
|
|
172
|
-
const insights: string[] = [];
|
|
173
|
-
const recommendations: string[] = [];
|
|
174
|
-
const observations: MetaObservation[] = [];
|
|
175
|
-
|
|
176
|
-
// Analyze thinking patterns
|
|
177
|
-
const patterns = this.analyzeThinkingPatterns(thoughts);
|
|
178
|
-
|
|
179
|
-
for (const pattern of patterns) {
|
|
180
|
-
if (pattern.impact === 'negative') {
|
|
181
|
-
observations.push({
|
|
182
|
-
type: 'concern',
|
|
183
|
-
description: pattern.pattern,
|
|
184
|
-
severity: pattern.frequency > 3 ? 'high' : 'medium',
|
|
185
|
-
recommendation: pattern.suggestion
|
|
186
|
-
});
|
|
187
|
-
recommendations.push(pattern.suggestion || 'Review and adjust thinking pattern');
|
|
188
|
-
} else if (pattern.impact === 'positive') {
|
|
189
|
-
observations.push({
|
|
190
|
-
type: 'insight',
|
|
191
|
-
description: pattern.pattern
|
|
192
|
-
});
|
|
193
|
-
insights.push(pattern.pattern);
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
// Check confidence trends
|
|
198
|
-
const confidenceTrend = this.analyzeConfidenceTrend(thoughts);
|
|
199
|
-
if (confidenceTrend.trend === 'declining') {
|
|
200
|
-
observations.push({
|
|
201
|
-
type: 'concern',
|
|
202
|
-
description: 'Confidence is declining over time',
|
|
203
|
-
severity: 'medium',
|
|
204
|
-
recommendation: 'Review assumptions and gather more evidence'
|
|
205
|
-
});
|
|
206
|
-
recommendations.push('Strengthen reasoning with additional evidence');
|
|
207
|
-
} else if (confidenceTrend.trend === 'improving') {
|
|
208
|
-
insights.push('Confidence is improving, indicating solid reasoning progress');
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
// Check thought type distribution
|
|
212
|
-
const typeDistribution = this.analyzeThoughtTypeDistribution(thoughts);
|
|
213
|
-
const analysisCount = typeDistribution.get(ThoughtType.ANALYSIS) || 0;
|
|
214
|
-
const hypothesisCount = typeDistribution.get(ThoughtType.HYPOTHESIS) || 0;
|
|
215
|
-
|
|
216
|
-
if (analysisCount > hypothesisCount * 3) {
|
|
217
|
-
observations.push({
|
|
218
|
-
type: 'suggestion',
|
|
219
|
-
description: 'Heavy focus on analysis with limited hypothesis generation',
|
|
220
|
-
recommendation: 'Consider exploring more alternative hypotheses'
|
|
221
|
-
});
|
|
222
|
-
recommendations.push('Generate more diverse hypotheses');
|
|
223
|
-
}
|
|
224
|
-
|
|
225
|
-
// Check for meta-reasoning gaps
|
|
226
|
-
const metaThoughtCount = thoughts.filter(t => t.thoughtType === ThoughtType.META_REASONING).length;
|
|
227
|
-
if (metaThoughtCount < thoughts.length * 0.1) {
|
|
228
|
-
observations.push({
|
|
229
|
-
type: 'suggestion',
|
|
230
|
-
description: 'Limited meta-reasoning detected',
|
|
231
|
-
recommendation: 'Increase reflection on the thinking process itself'
|
|
232
|
-
});
|
|
233
|
-
recommendations.push('Add more meta-reasoning steps');
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
const reflection = `Analyzed ${thoughts.length} thoughts across ${session.branches.size} branches. ` +
|
|
237
|
-
`Found ${insights.length} positive insights and ${recommendations.length} areas for improvement.`;
|
|
238
|
-
|
|
239
|
-
return {
|
|
240
|
-
reflection,
|
|
241
|
-
insights,
|
|
242
|
-
recommendations,
|
|
243
|
-
observations
|
|
244
|
-
};
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
/**
|
|
248
|
-
* Analyze thinking patterns
|
|
249
|
-
* @param thoughts - Thoughts to analyze
|
|
250
|
-
* @returns Array of patterns
|
|
251
|
-
*/
|
|
252
|
-
analyzeThinkingPatterns(thoughts: Thought[]): ThinkingPattern[] {
|
|
253
|
-
const patterns: ThinkingPattern[] = [];
|
|
254
|
-
|
|
255
|
-
if (thoughts.length === 0) return patterns;
|
|
256
|
-
|
|
257
|
-
// Pattern: Repetitive thinking
|
|
258
|
-
const contentSimilarities = this.findSimilarThoughts(thoughts);
|
|
259
|
-
if (contentSimilarities.length > thoughts.length * 0.3) {
|
|
260
|
-
patterns.push({
|
|
261
|
-
pattern: 'Repetitive thinking detected - similar thoughts being generated',
|
|
262
|
-
frequency: contentSimilarities.length,
|
|
263
|
-
impact: 'negative',
|
|
264
|
-
suggestion: 'Focus on generating novel insights rather than restating'
|
|
265
|
-
});
|
|
266
|
-
}
|
|
267
|
-
|
|
268
|
-
// Pattern: Confidence progression
|
|
269
|
-
const confidenceValues = thoughts.map(t => t.confidence.overall);
|
|
270
|
-
const avgConfidence = confidenceValues.reduce((a, b) => a + b, 0) / confidenceValues.length;
|
|
271
|
-
|
|
272
|
-
if (avgConfidence < 0.5) {
|
|
273
|
-
patterns.push({
|
|
274
|
-
pattern: 'Low average confidence across thoughts',
|
|
275
|
-
frequency: thoughts.length,
|
|
276
|
-
impact: 'negative',
|
|
277
|
-
suggestion: 'Strengthen evidence and reasoning for key claims'
|
|
278
|
-
});
|
|
279
|
-
} else if (avgConfidence > 0.8) {
|
|
280
|
-
patterns.push({
|
|
281
|
-
pattern: 'High confidence maintained throughout',
|
|
282
|
-
frequency: thoughts.length,
|
|
283
|
-
impact: 'positive'
|
|
284
|
-
});
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
// Pattern: Branch exploration
|
|
288
|
-
const branchIds = new Set(thoughts.map(t => t.branchId));
|
|
289
|
-
if (branchIds.size === 1 && thoughts.length > 5) {
|
|
290
|
-
patterns.push({
|
|
291
|
-
pattern: 'Single branch exploration - no parallel hypotheses',
|
|
292
|
-
frequency: 1,
|
|
293
|
-
impact: 'negative',
|
|
294
|
-
suggestion: 'Consider branching to explore alternative approaches'
|
|
295
|
-
});
|
|
296
|
-
} else if (branchIds.size > 2) {
|
|
297
|
-
patterns.push({
|
|
298
|
-
pattern: 'Active branch exploration with parallel hypotheses',
|
|
299
|
-
frequency: branchIds.size,
|
|
300
|
-
impact: 'positive'
|
|
301
|
-
});
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
// Pattern: Assumption documentation
|
|
305
|
-
const thoughtsWithAssumptions = thoughts.filter(t => t.assumptions.length > 0).length;
|
|
306
|
-
if (thoughtsWithAssumptions < thoughts.length * 0.3) {
|
|
307
|
-
patterns.push({
|
|
308
|
-
pattern: 'Limited explicit assumption documentation',
|
|
309
|
-
frequency: thoughts.length - thoughtsWithAssumptions,
|
|
310
|
-
impact: 'negative',
|
|
311
|
-
suggestion: 'Document assumptions explicitly for each major claim'
|
|
312
|
-
});
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
// Pattern: Revision activity
|
|
316
|
-
const revisedThoughts = thoughts.filter(t => t.revisionHistory.length > 0).length;
|
|
317
|
-
if (revisedThoughts > thoughts.length * 0.5) {
|
|
318
|
-
patterns.push({
|
|
319
|
-
pattern: 'High revision rate - thoughts being frequently corrected',
|
|
320
|
-
frequency: revisedThoughts,
|
|
321
|
-
impact: 'neutral',
|
|
322
|
-
suggestion: 'Consider more careful initial analysis to reduce revisions'
|
|
323
|
-
});
|
|
324
|
-
}
|
|
325
|
-
|
|
326
|
-
return patterns;
|
|
327
|
-
}
|
|
328
|
-
|
|
329
|
-
/**
|
|
330
|
-
* Assess reasoning strategy
|
|
331
|
-
* @param session - Session to assess
|
|
332
|
-
* @returns Strategy assessment
|
|
333
|
-
*/
|
|
334
|
-
assessStrategy(session?: ThinkingSession): {
|
|
335
|
-
assessment: string;
|
|
336
|
-
strengths: string[];
|
|
337
|
-
weaknesses: string[];
|
|
338
|
-
suggestions: string[];
|
|
339
|
-
} {
|
|
340
|
-
if (!session) {
|
|
341
|
-
return {
|
|
342
|
-
assessment: 'No session provided',
|
|
343
|
-
strengths: [],
|
|
344
|
-
weaknesses: [],
|
|
345
|
-
suggestions: []
|
|
346
|
-
};
|
|
347
|
-
}
|
|
348
|
-
|
|
349
|
-
const thoughts = Array.from(session.thoughts.values());
|
|
350
|
-
const strengths: string[] = [];
|
|
351
|
-
const weaknesses: string[] = [];
|
|
352
|
-
const suggestions: string[] = [];
|
|
353
|
-
|
|
354
|
-
// Assess hypothesis generation
|
|
355
|
-
const hypothesisCount = thoughts.filter(t => t.thoughtType === ThoughtType.HYPOTHESIS).length;
|
|
356
|
-
if (hypothesisCount >= 3) {
|
|
357
|
-
strengths.push('Good hypothesis generation with multiple alternatives');
|
|
358
|
-
} else if (hypothesisCount < 2 && thoughts.length > 5) {
|
|
359
|
-
weaknesses.push('Limited hypothesis generation');
|
|
360
|
-
suggestions.push('Generate at least 3 alternative hypotheses early in the process');
|
|
361
|
-
}
|
|
362
|
-
|
|
363
|
-
// Assess evidence gathering
|
|
364
|
-
const evidenceCount = thoughts.filter(t => t.thoughtType === ThoughtType.EVIDENCE).length;
|
|
365
|
-
if (evidenceCount >= thoughts.length * 0.2) {
|
|
366
|
-
strengths.push('Strong evidence-based reasoning');
|
|
367
|
-
} else {
|
|
368
|
-
weaknesses.push('Insufficient evidence gathering');
|
|
369
|
-
suggestions.push('Support claims with more evidence and citations');
|
|
370
|
-
}
|
|
371
|
-
|
|
372
|
-
// Assess counterargument consideration
|
|
373
|
-
const counterCount = thoughts.filter(t => t.thoughtType === ThoughtType.COUNTERARGUMENT).length;
|
|
374
|
-
if (counterCount >= 2) {
|
|
375
|
-
strengths.push('Good consideration of counterarguments');
|
|
376
|
-
} else if (thoughts.length > 8) {
|
|
377
|
-
weaknesses.push('Limited counterargument analysis');
|
|
378
|
-
suggestions.push('Actively seek and address counterarguments');
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
// Assess synthesis
|
|
382
|
-
const synthesisCount = thoughts.filter(t => t.thoughtType === ThoughtType.SYNTHESIS).length;
|
|
383
|
-
if (synthesisCount >= 2) {
|
|
384
|
-
strengths.push('Regular synthesis of findings');
|
|
385
|
-
} else if (thoughts.length > 10) {
|
|
386
|
-
suggestions.push('Periodically synthesize findings to maintain coherence');
|
|
387
|
-
}
|
|
388
|
-
|
|
389
|
-
// Assess self-correction
|
|
390
|
-
const correctionCount = thoughts.filter(t => t.thoughtType === ThoughtType.SELF_CORRECTION).length;
|
|
391
|
-
if (correctionCount >= 1) {
|
|
392
|
-
strengths.push('Active self-correction detected');
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
const assessment = strengths.length > weaknesses.length
|
|
396
|
-
? 'Overall effective reasoning strategy'
|
|
397
|
-
: weaknesses.length > strengths.length
|
|
398
|
-
? 'Strategy needs improvement'
|
|
399
|
-
: 'Balanced strategy with room for improvement';
|
|
400
|
-
|
|
401
|
-
return {
|
|
402
|
-
assessment,
|
|
403
|
-
strengths,
|
|
404
|
-
weaknesses,
|
|
405
|
-
suggestions
|
|
406
|
-
};
|
|
407
|
-
}
|
|
408
|
-
|
|
409
|
-
/**
|
|
410
|
-
* Detect cognitive biases
|
|
411
|
-
* @param thoughts - Thoughts to analyze
|
|
412
|
-
* @returns Bias detection results
|
|
413
|
-
*/
|
|
414
|
-
detectBias(thoughts: Thought[]): {
|
|
415
|
-
detectedBiases: Array<{
|
|
416
|
-
bias: string;
|
|
417
|
-
evidence: string;
|
|
418
|
-
severity: 'low' | 'medium' | 'high';
|
|
419
|
-
mitigation: string;
|
|
420
|
-
}>;
|
|
421
|
-
overallRisk: 'low' | 'medium' | 'high';
|
|
422
|
-
} {
|
|
423
|
-
const detectedBiases: Array<{
|
|
424
|
-
bias: string;
|
|
425
|
-
evidence: string;
|
|
426
|
-
severity: 'low' | 'medium' | 'high';
|
|
427
|
-
mitigation: string;
|
|
428
|
-
}> = [];
|
|
429
|
-
|
|
430
|
-
if (thoughts.length === 0) {
|
|
431
|
-
return { detectedBiases, overallRisk: 'low' };
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
// Check for confirmation bias
|
|
435
|
-
const confirmingThoughts = thoughts.filter(t =>
|
|
436
|
-
t.content.toLowerCase().includes('confirms') ||
|
|
437
|
-
t.content.toLowerCase().includes('proves') ||
|
|
438
|
-
t.content.toLowerCase().includes('supports')
|
|
439
|
-
).length;
|
|
440
|
-
|
|
441
|
-
const disconfirmingThoughts = thoughts.filter(t =>
|
|
442
|
-
t.content.toLowerCase().includes('contradicts') ||
|
|
443
|
-
t.content.toLowerCase().includes('challenges') ||
|
|
444
|
-
t.content.toLowerCase().includes('refutes')
|
|
445
|
-
).length;
|
|
446
|
-
|
|
447
|
-
if (confirmingThoughts > disconfirmingThoughts * 3 && confirmingThoughts > 2) {
|
|
448
|
-
detectedBiases.push({
|
|
449
|
-
bias: 'Confirmation Bias',
|
|
450
|
-
evidence: `${confirmingThoughts} confirming vs ${disconfirmingThoughts} disconfirming thoughts`,
|
|
451
|
-
severity: 'medium',
|
|
452
|
-
mitigation: 'Actively seek disconfirming evidence for each claim'
|
|
453
|
-
});
|
|
454
|
-
}
|
|
455
|
-
|
|
456
|
-
// Check for anchoring bias
|
|
457
|
-
const firstThoughtConfidence = thoughts[0]?.confidence.overall || 0.5;
|
|
458
|
-
const confidenceVariance = this.calculateConfidenceVariance(thoughts);
|
|
459
|
-
|
|
460
|
-
if (confidenceVariance < 0.1 && thoughts.length > 5) {
|
|
461
|
-
detectedBiases.push({
|
|
462
|
-
bias: 'Anchoring Bias',
|
|
463
|
-
evidence: 'Low confidence variance suggests anchoring to initial estimates',
|
|
464
|
-
severity: 'low',
|
|
465
|
-
mitigation: 'Re-evaluate confidence independently for each thought'
|
|
466
|
-
});
|
|
467
|
-
}
|
|
468
|
-
|
|
469
|
-
// Check for availability bias
|
|
470
|
-
const recentThoughts = thoughts.slice(-3);
|
|
471
|
-
const recentReferences = recentThoughts.filter(t =>
|
|
472
|
-
t.dependencies.some(d => thoughts.slice(0, -3).some(pt => pt.id === d))
|
|
473
|
-
).length;
|
|
474
|
-
|
|
475
|
-
if (recentReferences === 0 && thoughts.length > 5) {
|
|
476
|
-
detectedBiases.push({
|
|
477
|
-
bias: 'Availability Bias',
|
|
478
|
-
evidence: 'Recent thoughts do not reference earlier work',
|
|
479
|
-
severity: 'low',
|
|
480
|
-
mitigation: 'Actively reference and build upon earlier insights'
|
|
481
|
-
});
|
|
482
|
-
}
|
|
483
|
-
|
|
484
|
-
// Check for overconfidence
|
|
485
|
-
const highConfidenceCount = thoughts.filter(t => t.confidence.overall > 0.9).length;
|
|
486
|
-
if (highConfidenceCount > thoughts.length * 0.5) {
|
|
487
|
-
detectedBiases.push({
|
|
488
|
-
bias: 'Overconfidence Bias',
|
|
489
|
-
evidence: `${highConfidenceCount}/${thoughts.length} thoughts have >90% confidence`,
|
|
490
|
-
severity: 'medium',
|
|
491
|
-
mitigation: 'Calibrate confidence more conservatively'
|
|
492
|
-
});
|
|
493
|
-
}
|
|
494
|
-
|
|
495
|
-
// Check for sunk cost fallacy
|
|
496
|
-
const revisionCount = thoughts.filter(t => t.revisionHistory.length > 0).length;
|
|
497
|
-
if (revisionCount > thoughts.length * 0.7 && thoughts.length > 5) {
|
|
498
|
-
detectedBiases.push({
|
|
499
|
-
bias: 'Sunk Cost Fallacy',
|
|
500
|
-
evidence: 'High revision rate may indicate reluctance to abandon initial approach',
|
|
501
|
-
severity: 'low',
|
|
502
|
-
mitigation: 'Be willing to start fresh if current approach is not working'
|
|
503
|
-
});
|
|
504
|
-
}
|
|
505
|
-
|
|
506
|
-
// Determine overall risk
|
|
507
|
-
const highSeverityCount = detectedBiases.filter(b => b.severity === 'high').length;
|
|
508
|
-
const mediumSeverityCount = detectedBiases.filter(b => b.severity === 'medium').length;
|
|
509
|
-
|
|
510
|
-
let overallRisk: 'low' | 'medium' | 'high' = 'low';
|
|
511
|
-
if (highSeverityCount > 0 || mediumSeverityCount > 2) {
|
|
512
|
-
overallRisk = 'high';
|
|
513
|
-
} else if (mediumSeverityCount > 0) {
|
|
514
|
-
overallRisk = 'medium';
|
|
515
|
-
}
|
|
516
|
-
|
|
517
|
-
return { detectedBiases, overallRisk };
|
|
518
|
-
}
|
|
519
160
|
|
|
520
161
|
// ============================================================================
|
|
521
162
|
// Helper Methods
|