@gotza02/seq-thinking 1.1.5 → 1.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/mcp-server.js +1 -1
- package/package.json +9 -3
- package/agents_test.log +0 -15
- package/data/agents/1770106504306-dljh9ef.json +0 -68
- package/data/agents/1770106504310-4oarrst.json +0 -58
- package/data/agents/1770106540588-pvitt55.json +0 -68
- package/data/agents/1770106540595-z2ya871.json +0 -58
- package/data/agents/1770106710890-0e2naq1.json +0 -68
- package/data/agents/1770106710893-r076yxx.json +0 -58
- package/data/agents/1770109212161-4ybd0i7.json +0 -68
- package/data/agents/1770109212166-gkhya8h.json +0 -58
- package/data/agents/1770117726716-lrnm415.json +0 -68
- package/data/agents/1770117726719-w6hsf3v.json +0 -58
- package/data/sessions/1770100622009-5afiuyv.json +0 -499
- package/data/sessions/1770106504312-75zk750.json +0 -107
- package/data/sessions/1770106540597-z8e8soo.json +0 -150
- package/data/sessions/1770106710894-0kxgy5x.json +0 -150
- package/data/sessions/1770109212169-zpddeb9.json +0 -150
- package/data/sessions/1770117726720-frcwj99.json +0 -150
- package/real_world_test.log +0 -200
- package/real_world_test_dynamic.log +0 -184
- package/real_world_test_real.log +0 -184
- package/src/__tests__/agents.test.ts +0 -858
- package/src/__tests__/mcp-server.test.ts +0 -380
- package/src/__tests__/sequential-thinking.test.ts +0 -687
- package/src/__tests__/swarm-coordinator.test.ts +0 -903
- package/src/__tests__/types.test.ts +0 -839
- package/src/__tests__/utils.test.ts +0 -322
- package/src/agents/base-agent.ts +0 -288
- package/src/agents/critic-agent.ts +0 -582
- package/src/agents/index.ts +0 -11
- package/src/agents/meta-reasoning-agent.ts +0 -314
- package/src/agents/reasoner-agent.ts +0 -312
- package/src/agents/synthesizer-agent.ts +0 -641
- package/src/index.ts +0 -118
- package/src/mcp-server.ts +0 -391
- package/src/real_world_test.ts +0 -89
- package/src/sequential-thinking.ts +0 -614
- package/src/swarm-coordinator.ts +0 -772
- package/src/types/index.ts +0 -915
- package/src/utils/index.ts +0 -1004
- package/src/utils/llm-adapter.ts +0 -110
- package/src/utils/logger.ts +0 -56
- package/src/utils/persistence.ts +0 -109
- package/test_output.log +0 -0
- package/tsconfig.json +0 -21
|
@@ -1,314 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Meta-Reasoning Agent Implementation
|
|
3
|
-
* @module agents/meta-reasoning-agent
|
|
4
|
-
* @version 1.0.0
|
|
5
|
-
*/
|
|
6
|
-
|
|
7
|
-
import { BaseAgent } from './base-agent.js';
|
|
8
|
-
import {
|
|
9
|
-
AgentType,
|
|
10
|
-
ThoughtType,
|
|
11
|
-
type Task,
|
|
12
|
-
type TaskResult,
|
|
13
|
-
type AgentCapability,
|
|
14
|
-
type Thought,
|
|
15
|
-
type ThinkingSession
|
|
16
|
-
} from '../types/index.js';
|
|
17
|
-
import { LLMAdapter } from '../utils/llm-adapter.js';
|
|
18
|
-
|
|
19
|
-
/**
|
|
20
|
-
* Meta-observation
|
|
21
|
-
*/
|
|
22
|
-
interface MetaObservation {
|
|
23
|
-
type: 'pattern' | 'concern' | 'insight' | 'suggestion';
|
|
24
|
-
description: string;
|
|
25
|
-
severity?: 'low' | 'medium' | 'high';
|
|
26
|
-
recommendation?: string;
|
|
27
|
-
triggeredActions?: string[];
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
/**
|
|
31
|
-
* Meta-reasoning agent that reflects on thinking processes using LLM
|
|
32
|
-
*/
|
|
33
|
-
export class MetaReasoningAgent extends BaseAgent {
|
|
34
|
-
/** Observation history */
|
|
35
|
-
private observationHistory: MetaObservation[] = [];
|
|
36
|
-
|
|
37
|
-
/**
|
|
38
|
-
* Create a new meta-reasoning agent
|
|
39
|
-
* @param config - Agent configuration
|
|
40
|
-
*/
|
|
41
|
-
constructor(config: {
|
|
42
|
-
name: string;
|
|
43
|
-
capabilities?: AgentCapability[];
|
|
44
|
-
}) {
|
|
45
|
-
const defaultCapabilities: AgentCapability[] = [
|
|
46
|
-
{
|
|
47
|
-
name: 'process_reflection',
|
|
48
|
-
description: 'Reflect on thinking processes using LLM',
|
|
49
|
-
confidence: 0.95,
|
|
50
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
51
|
-
},
|
|
52
|
-
{
|
|
53
|
-
name: 'strategy_assessment',
|
|
54
|
-
description: 'Assess reasoning strategies using LLM',
|
|
55
|
-
confidence: 0.9,
|
|
56
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
57
|
-
},
|
|
58
|
-
{
|
|
59
|
-
name: 'bias_detection',
|
|
60
|
-
description: 'Detect cognitive biases using LLM',
|
|
61
|
-
confidence: 0.85,
|
|
62
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
63
|
-
},
|
|
64
|
-
{
|
|
65
|
-
name: 'pattern_analysis',
|
|
66
|
-
description: 'Analyze thinking patterns using LLM',
|
|
67
|
-
confidence: 0.9,
|
|
68
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
69
|
-
}
|
|
70
|
-
];
|
|
71
|
-
|
|
72
|
-
super({
|
|
73
|
-
name: config.name,
|
|
74
|
-
type: AgentType.META_REASONING,
|
|
75
|
-
capabilities: config.capabilities || defaultCapabilities,
|
|
76
|
-
maxConcurrentTasks: 5,
|
|
77
|
-
confidenceThreshold: 0.7
|
|
78
|
-
});
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
/**
|
|
82
|
-
* Get agent type
|
|
83
|
-
* @returns Agent type
|
|
84
|
-
*/
|
|
85
|
-
getType(): string {
|
|
86
|
-
return AgentType.META_REASONING;
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
/**
|
|
90
|
-
* Get agent capabilities
|
|
91
|
-
* @returns Array of capabilities
|
|
92
|
-
*/
|
|
93
|
-
getCapabilities(): AgentCapability[] {
|
|
94
|
-
return this.config.capabilities;
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
/**
|
|
98
|
-
* Process a task - perform meta-reasoning using LLM
|
|
99
|
-
* @param task - Task to process
|
|
100
|
-
* @returns Task result
|
|
101
|
-
*/
|
|
102
|
-
async process(task: Task): Promise<TaskResult> {
|
|
103
|
-
const startTime = Date.now();
|
|
104
|
-
const input = task.input as {
|
|
105
|
-
session?: ThinkingSession;
|
|
106
|
-
thoughts?: Thought[];
|
|
107
|
-
operation: 'reflect' | 'analyze_patterns' | 'assess_strategy' | 'detect_bias';
|
|
108
|
-
};
|
|
109
|
-
|
|
110
|
-
const provider = (task.context as any)?.modelProvider;
|
|
111
|
-
|
|
112
|
-
let prompt = '';
|
|
113
|
-
let systemPrompt = 'You are a meta-reasoning agent. Your role is to analyze and reflect on the thinking processes of other agents or sessions, identifying patterns, biases, and areas for improvement.';
|
|
114
|
-
|
|
115
|
-
const sessionData = input.session ? `Session Topic: ${input.session.topic}\nSession Status: ${input.session.status}\n` : '';
|
|
116
|
-
const thoughtsData = (input.thoughts || (input.session ? Array.from(input.session.thoughts.values()) : []))
|
|
117
|
-
.map(t => `Step ${t.stepNumber} (${t.thoughtType}, Confidence: ${t.confidence?.overall ?? 'N/A'}):\n${t.content}`).join('\n\n');
|
|
118
|
-
|
|
119
|
-
switch (input.operation) {
|
|
120
|
-
case 'reflect':
|
|
121
|
-
prompt = `Perform a comprehensive reflection on this thinking session. What are the key insights? What has been achieved? What is still missing?\n\n${sessionData}${thoughtsData}`;
|
|
122
|
-
break;
|
|
123
|
-
case 'analyze_patterns':
|
|
124
|
-
prompt = `Analyze the thinking patterns in these thoughts. Are there repetitive cycles? Is there a logical progression? Are there any gaps in the reasoning chain?\n\n${thoughtsData}`;
|
|
125
|
-
break;
|
|
126
|
-
case 'assess_strategy':
|
|
127
|
-
prompt = `Assess the reasoning strategy used in this session. Is it effective for the topic? Should the strategy be adjusted (e.g., move from analysis to synthesis)?\n\n${sessionData}${thoughtsData}`;
|
|
128
|
-
break;
|
|
129
|
-
case 'detect_bias':
|
|
130
|
-
prompt = `Identify any cognitive biases present in these thoughts (e.g., confirmation bias, anchoring, overconfidence). Provide evidence from the text for each detected bias.\n\n${thoughtsData}`;
|
|
131
|
-
systemPrompt = 'You are an expert in cognitive psychology and bias detection. Your role is to identify subtle biases in reasoning processes.';
|
|
132
|
-
break;
|
|
133
|
-
default:
|
|
134
|
-
prompt = `Reflect on the following thoughts and provide meta-observations.\n\n${thoughtsData}`;
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
const response = await LLMAdapter.call(prompt, systemPrompt, provider);
|
|
138
|
-
|
|
139
|
-
if (response.error) {
|
|
140
|
-
// Fallback to simple reflection if LLM fails
|
|
141
|
-
return this.createTaskResult(task.id, { error: response.error, fallback: 'LLM failed' }, 0, Date.now() - startTime);
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
return this.createTaskResult(
|
|
145
|
-
task.id,
|
|
146
|
-
{
|
|
147
|
-
analysis: response.content,
|
|
148
|
-
operation: input.operation,
|
|
149
|
-
timestamp: new Date()
|
|
150
|
-
},
|
|
151
|
-
0.9,
|
|
152
|
-
Date.now() - startTime,
|
|
153
|
-
{
|
|
154
|
-
reasoningSteps: 1,
|
|
155
|
-
intermediateResults: []
|
|
156
|
-
}
|
|
157
|
-
);
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
// ============================================================================
|
|
162
|
-
// Helper Methods
|
|
163
|
-
// ============================================================================
|
|
164
|
-
|
|
165
|
-
/**
|
|
166
|
-
* Find similar thoughts
|
|
167
|
-
* @param thoughts - Thoughts to compare
|
|
168
|
-
* @returns Array of similar thought pairs
|
|
169
|
-
*/
|
|
170
|
-
private findSimilarThoughts(thoughts: Thought[]): Array<[Thought, Thought]> {
|
|
171
|
-
const similar: Array<[Thought, Thought]> = [];
|
|
172
|
-
|
|
173
|
-
for (let i = 0; i < thoughts.length; i++) {
|
|
174
|
-
for (let j = i + 1; j < thoughts.length; j++) {
|
|
175
|
-
if (this.thoughtsAreSimilar(thoughts[i], thoughts[j])) {
|
|
176
|
-
similar.push([thoughts[i], thoughts[j]]);
|
|
177
|
-
}
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
return similar;
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
/**
|
|
185
|
-
* Check if two thoughts are similar
|
|
186
|
-
* @param a - First thought
|
|
187
|
-
* @param b - Second thought
|
|
188
|
-
* @returns True if similar
|
|
189
|
-
*/
|
|
190
|
-
private thoughtsAreSimilar(a: Thought, b: Thought): boolean {
|
|
191
|
-
const wordsA = new Set(a.content.toLowerCase().split(/\s+/));
|
|
192
|
-
const wordsB = new Set(b.content.toLowerCase().split(/\s+/));
|
|
193
|
-
const intersection = [...wordsA].filter(w => wordsB.has(w));
|
|
194
|
-
const union = new Set([...wordsA, ...wordsB]);
|
|
195
|
-
|
|
196
|
-
return union.size > 0 && intersection.length / union.size > 0.7;
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
/**
|
|
200
|
-
* Analyze confidence trend
|
|
201
|
-
* @param thoughts - Thoughts to analyze
|
|
202
|
-
* @returns Trend analysis
|
|
203
|
-
*/
|
|
204
|
-
private analyzeConfidenceTrend(thoughts: Thought[]): {
|
|
205
|
-
trend: 'improving' | 'declining' | 'stable';
|
|
206
|
-
initial: number;
|
|
207
|
-
final: number;
|
|
208
|
-
} {
|
|
209
|
-
if (thoughts.length < 3) {
|
|
210
|
-
return { trend: 'stable', initial: 0, final: 0 };
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
const sorted = [...thoughts].sort((a, b) =>
|
|
214
|
-
a.metadata.createdAt.getTime() - b.metadata.createdAt.getTime()
|
|
215
|
-
);
|
|
216
|
-
|
|
217
|
-
const firstThird = sorted.slice(0, Math.ceil(sorted.length / 3));
|
|
218
|
-
const lastThird = sorted.slice(-Math.ceil(sorted.length / 3));
|
|
219
|
-
|
|
220
|
-
const initial = firstThird.reduce((sum, t) => sum + (t.confidence?.overall ?? 0.5), 0) / firstThird.length;
|
|
221
|
-
const final = lastThird.reduce((sum, t) => sum + (t.confidence?.overall ?? 0.5), 0) / lastThird.length;
|
|
222
|
-
|
|
223
|
-
const diff = final - initial;
|
|
224
|
-
const threshold = 0.1;
|
|
225
|
-
|
|
226
|
-
let trend: 'improving' | 'declining' | 'stable';
|
|
227
|
-
if (diff > threshold) {
|
|
228
|
-
trend = 'improving';
|
|
229
|
-
} else if (diff < -threshold) {
|
|
230
|
-
trend = 'declining';
|
|
231
|
-
} else {
|
|
232
|
-
trend = 'stable';
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
return { trend, initial, final };
|
|
236
|
-
}
|
|
237
|
-
|
|
238
|
-
/**
|
|
239
|
-
* Analyze thought type distribution
|
|
240
|
-
* @param thoughts - Thoughts to analyze
|
|
241
|
-
* @returns Type distribution map
|
|
242
|
-
*/
|
|
243
|
-
private analyzeThoughtTypeDistribution(thoughts: Thought[]): Map<ThoughtType, number> {
|
|
244
|
-
const distribution = new Map<ThoughtType, number>();
|
|
245
|
-
|
|
246
|
-
for (const thought of thoughts) {
|
|
247
|
-
const count = distribution.get(thought.thoughtType) || 0;
|
|
248
|
-
distribution.set(thought.thoughtType, count + 1);
|
|
249
|
-
}
|
|
250
|
-
|
|
251
|
-
return distribution;
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
/**
|
|
255
|
-
* Calculate confidence variance
|
|
256
|
-
* @param thoughts - Thoughts to analyze
|
|
257
|
-
* @returns Variance
|
|
258
|
-
*/
|
|
259
|
-
calculateConfidenceVariance(thoughts: Thought[]): number {
|
|
260
|
-
if (thoughts.length < 2) return 0;
|
|
261
|
-
|
|
262
|
-
const values = thoughts.map(t => t.confidence?.overall ?? 0.5);
|
|
263
|
-
const mean = values.reduce((a, b) => a + b, 0) / values.length;
|
|
264
|
-
const squaredDiffs = values.map(v => Math.pow(v - mean, 2));
|
|
265
|
-
|
|
266
|
-
return squaredDiffs.reduce((a, b) => a + b, 0) / values.length;
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
/**
|
|
270
|
-
* Generate meta-observation for a thought
|
|
271
|
-
* @param thought - Thought to analyze
|
|
272
|
-
* @returns Meta-observation or null
|
|
273
|
-
*/
|
|
274
|
-
generateMetaObservation(thought: Thought): MetaObservation | null {
|
|
275
|
-
const observations: string[] = [];
|
|
276
|
-
const triggeredActions: string[] = [];
|
|
277
|
-
|
|
278
|
-
// Check for high uncertainty
|
|
279
|
-
const confidence = thought.confidence || { overall: 0.5, uncertaintyBounds: [0.3, 0.7] };
|
|
280
|
-
const uncertaintyRange = (confidence.uncertaintyBounds?.[1] ?? 0.7) - (confidence.uncertaintyBounds?.[0] ?? 0.3);
|
|
281
|
-
if (uncertaintyRange > 0.4) {
|
|
282
|
-
observations.push('High uncertainty detected');
|
|
283
|
-
triggeredActions.push('gather_more_evidence');
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
// Check for low confidence
|
|
287
|
-
if (confidence.overall < 0.4) {
|
|
288
|
-
observations.push('Low confidence in this thought');
|
|
289
|
-
triggeredActions.push('review_reasoning');
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
// Check for missing assumptions
|
|
293
|
-
if (thought.assumptions.length === 0) {
|
|
294
|
-
observations.push('No explicit assumptions documented');
|
|
295
|
-
triggeredActions.push('document_assumptions');
|
|
296
|
-
}
|
|
297
|
-
|
|
298
|
-
if (observations.length === 0) return null;
|
|
299
|
-
|
|
300
|
-
return {
|
|
301
|
-
type: 'pattern',
|
|
302
|
-
description: observations.join('; '),
|
|
303
|
-
triggeredActions
|
|
304
|
-
};
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
/**
|
|
308
|
-
* Get observation history
|
|
309
|
-
* @returns Observation history
|
|
310
|
-
*/
|
|
311
|
-
getObservationHistory(): MetaObservation[] {
|
|
312
|
-
return [...this.observationHistory];
|
|
313
|
-
}
|
|
314
|
-
}
|
|
@@ -1,312 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Reasoner Agent Implementation
|
|
3
|
-
* @module agents/reasoner-agent
|
|
4
|
-
* @version 1.0.0
|
|
5
|
-
*/
|
|
6
|
-
|
|
7
|
-
import { BaseAgent } from './base-agent.js';
|
|
8
|
-
import {
|
|
9
|
-
AgentType,
|
|
10
|
-
ReasoningStrategy,
|
|
11
|
-
type Task,
|
|
12
|
-
type TaskResult,
|
|
13
|
-
type AgentCapability
|
|
14
|
-
} from '../types/index.js';
|
|
15
|
-
import { LLMAdapter } from '../utils/llm-adapter.js';
|
|
16
|
-
|
|
17
|
-
/**
|
|
18
|
-
* Reasoning step
|
|
19
|
-
*/
|
|
20
|
-
interface ReasoningStep {
|
|
21
|
-
step: number;
|
|
22
|
-
content: string;
|
|
23
|
-
confidence: number;
|
|
24
|
-
isFinal: boolean;
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
/**
|
|
28
|
-
* Reasoner agent that performs various reasoning strategies
|
|
29
|
-
*/
|
|
30
|
-
export class ReasonerAgent extends BaseAgent {
|
|
31
|
-
/** Reasoning strategy */
|
|
32
|
-
private strategy: ReasoningStrategy;
|
|
33
|
-
|
|
34
|
-
/** Maximum iterations */
|
|
35
|
-
private maxIterations: number;
|
|
36
|
-
|
|
37
|
-
/** Confidence threshold */
|
|
38
|
-
private confidenceThreshold: number;
|
|
39
|
-
|
|
40
|
-
/**
|
|
41
|
-
* Create a new reasoner agent
|
|
42
|
-
* @param config - Agent configuration
|
|
43
|
-
*/
|
|
44
|
-
constructor(config: {
|
|
45
|
-
name: string;
|
|
46
|
-
strategy?: ReasoningStrategy;
|
|
47
|
-
maxIterations?: number;
|
|
48
|
-
confidenceThreshold?: number;
|
|
49
|
-
capabilities?: AgentCapability[];
|
|
50
|
-
}) {
|
|
51
|
-
const defaultCapabilities: AgentCapability[] = [
|
|
52
|
-
{
|
|
53
|
-
name: 'chain_of_thought',
|
|
54
|
-
description: 'Step-by-step reasoning',
|
|
55
|
-
confidence: 0.9,
|
|
56
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
57
|
-
},
|
|
58
|
-
{
|
|
59
|
-
name: 'tree_of_thought',
|
|
60
|
-
description: 'Explore multiple reasoning paths',
|
|
61
|
-
confidence: 0.85,
|
|
62
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
63
|
-
},
|
|
64
|
-
{
|
|
65
|
-
name: 'analogical_reasoning',
|
|
66
|
-
description: 'Reason by analogy',
|
|
67
|
-
confidence: 0.8,
|
|
68
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
69
|
-
},
|
|
70
|
-
{
|
|
71
|
-
name: 'abductive_reasoning',
|
|
72
|
-
description: 'Inference to best explanation',
|
|
73
|
-
confidence: 0.75,
|
|
74
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
75
|
-
},
|
|
76
|
-
{
|
|
77
|
-
name: 'problem_solving',
|
|
78
|
-
description: 'General problem-solving',
|
|
79
|
-
confidence: 0.8,
|
|
80
|
-
performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
|
|
81
|
-
}
|
|
82
|
-
];
|
|
83
|
-
|
|
84
|
-
super({
|
|
85
|
-
name: config.name,
|
|
86
|
-
type: AgentType.REASONER,
|
|
87
|
-
subtype: config.strategy || ReasoningStrategy.CHAIN_OF_THOUGHT,
|
|
88
|
-
capabilities: config.capabilities || defaultCapabilities,
|
|
89
|
-
maxConcurrentTasks: 3,
|
|
90
|
-
confidenceThreshold: config.confidenceThreshold || 0.7
|
|
91
|
-
});
|
|
92
|
-
|
|
93
|
-
this.strategy = config.strategy || ReasoningStrategy.CHAIN_OF_THOUGHT;
|
|
94
|
-
this.maxIterations = config.maxIterations || 10;
|
|
95
|
-
this.confidenceThreshold = config.confidenceThreshold || 0.7;
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
/**
|
|
99
|
-
* Get agent type
|
|
100
|
-
* @returns Agent type
|
|
101
|
-
*/
|
|
102
|
-
getType(): string {
|
|
103
|
-
return AgentType.REASONER;
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
/**
|
|
107
|
-
* Get agent capabilities
|
|
108
|
-
* @returns Array of capabilities
|
|
109
|
-
*/
|
|
110
|
-
getCapabilities(): AgentCapability[] {
|
|
111
|
-
return this.config.capabilities;
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
/**
|
|
115
|
-
* Process a task using the configured reasoning strategy
|
|
116
|
-
* @param task - Task to process
|
|
117
|
-
* @returns Task result
|
|
118
|
-
*/
|
|
119
|
-
async process(task: Task): Promise<TaskResult> {
|
|
120
|
-
const startTime = Date.now();
|
|
121
|
-
|
|
122
|
-
switch (this.strategy) {
|
|
123
|
-
case ReasoningStrategy.CHAIN_OF_THOUGHT:
|
|
124
|
-
return this.chainOfThought(task, startTime);
|
|
125
|
-
case ReasoningStrategy.TREE_OF_THOUGHT:
|
|
126
|
-
return this.treeOfThought(task, startTime);
|
|
127
|
-
case ReasoningStrategy.ANALOGICAL:
|
|
128
|
-
return this.analogicalReasoning(task, startTime);
|
|
129
|
-
case ReasoningStrategy.ABDUCTIVE:
|
|
130
|
-
return this.abductiveReasoning(task, startTime);
|
|
131
|
-
default:
|
|
132
|
-
return this.chainOfThought(task, startTime);
|
|
133
|
-
}
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
/**
|
|
137
|
-
* Chain of thought reasoning
|
|
138
|
-
* @param task - Task to process
|
|
139
|
-
* @param startTime - Start time
|
|
140
|
-
* @returns Task result
|
|
141
|
-
*/
|
|
142
|
-
private async chainOfThought(task: Task, startTime: number): Promise<TaskResult> {
|
|
143
|
-
const steps: ReasoningStep[] = [];
|
|
144
|
-
const input = task.input as { problem?: string; question?: string };
|
|
145
|
-
const problem = input.problem || input.question || String(task.input);
|
|
146
|
-
|
|
147
|
-
let iteration = 0;
|
|
148
|
-
let currentContext = problem;
|
|
149
|
-
|
|
150
|
-
while (iteration < this.maxIterations) {
|
|
151
|
-
iteration++;
|
|
152
|
-
|
|
153
|
-
const prompt = `Based on the current context, generate the next reasoning step.
|
|
154
|
-
Current Context: ${currentContext}
|
|
155
|
-
Iteration: ${iteration}
|
|
156
|
-
Problem: ${problem}`;
|
|
157
|
-
|
|
158
|
-
const response = await LLMAdapter.call(prompt, "You are a logical reasoner. Provide one clear reasoning step.");
|
|
159
|
-
const stepContent = response.content || `Error: ${response.error}`;
|
|
160
|
-
|
|
161
|
-
const stepConfidence = this.calculateStepConfidence(stepContent, iteration);
|
|
162
|
-
|
|
163
|
-
const step: ReasoningStep = {
|
|
164
|
-
step: iteration,
|
|
165
|
-
content: stepContent,
|
|
166
|
-
confidence: stepConfidence,
|
|
167
|
-
isFinal: iteration >= this.maxIterations - 1 || stepConfidence >= this.confidenceThreshold || !!response.error
|
|
168
|
-
};
|
|
169
|
-
|
|
170
|
-
steps.push(step);
|
|
171
|
-
|
|
172
|
-
if (step.isFinal) {
|
|
173
|
-
break;
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
currentContext += `\n${stepContent}`;
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
const conclusionPrompt = `Based on these reasoning steps, provide a final conclusion for the problem: "${problem}"\n\nSteps:\n${steps.map(s => s.content).join('\n')}`;
|
|
180
|
-
const conclusionResponse = await LLMAdapter.call(conclusionPrompt);
|
|
181
|
-
const conclusion = conclusionResponse.content || "Failed to generate conclusion.";
|
|
182
|
-
|
|
183
|
-
const avgConfidence = steps.length > 0
|
|
184
|
-
? steps.reduce((sum, s) => sum + s.confidence, 0) / steps.length
|
|
185
|
-
: 0;
|
|
186
|
-
|
|
187
|
-
return this.createTaskResult(
|
|
188
|
-
task.id,
|
|
189
|
-
{
|
|
190
|
-
conclusion,
|
|
191
|
-
reasoningChain: steps,
|
|
192
|
-
iterations: iteration,
|
|
193
|
-
strategy: 'chain_of_thought',
|
|
194
|
-
problem
|
|
195
|
-
},
|
|
196
|
-
avgConfidence,
|
|
197
|
-
Date.now() - startTime,
|
|
198
|
-
{
|
|
199
|
-
reasoningSteps: steps.length,
|
|
200
|
-
intermediateResults: steps.map(s => s.content)
|
|
201
|
-
}
|
|
202
|
-
);
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
/**
|
|
206
|
-
* Tree of thought reasoning
|
|
207
|
-
* @param task - Task to process
|
|
208
|
-
* @param startTime - Start time
|
|
209
|
-
* @returns Task result
|
|
210
|
-
*/
|
|
211
|
-
private async treeOfThought(task: Task, startTime: number): Promise<TaskResult> {
|
|
212
|
-
const input = task.input as { problem?: string; question?: string };
|
|
213
|
-
const problem = input.problem || input.question || String(task.input);
|
|
214
|
-
|
|
215
|
-
const numPaths = 3;
|
|
216
|
-
const candidates: Array<{
|
|
217
|
-
path: ReasoningStep[];
|
|
218
|
-
confidence: number;
|
|
219
|
-
}> = [];
|
|
220
|
-
|
|
221
|
-
for (let i = 0; i < numPaths; i++) {
|
|
222
|
-
const prompt = `Solve this problem by exploring one specific reasoning path: "${problem}"\nPath #${i+1}:`;
|
|
223
|
-
const response = await LLMAdapter.call(prompt, "You are a creative reasoner. Explore an alternative path.");
|
|
224
|
-
|
|
225
|
-
const step: ReasoningStep = {
|
|
226
|
-
step: 1,
|
|
227
|
-
content: response.content || "Failed to generate path.",
|
|
228
|
-
confidence: 0.7 + Math.random() * 0.2,
|
|
229
|
-
isFinal: true
|
|
230
|
-
};
|
|
231
|
-
|
|
232
|
-
candidates.push({ path: [step], confidence: step.confidence });
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
const best = candidates.reduce((max, c) =>
|
|
236
|
-
c.confidence > max.confidence ? c : max,
|
|
237
|
-
candidates[0]
|
|
238
|
-
);
|
|
239
|
-
|
|
240
|
-
return this.createTaskResult(
|
|
241
|
-
task.id,
|
|
242
|
-
{
|
|
243
|
-
conclusion: best.path[0].content,
|
|
244
|
-
bestPath: best.path,
|
|
245
|
-
pathsExplored: candidates.length,
|
|
246
|
-
strategy: 'tree_of_thought',
|
|
247
|
-
problem
|
|
248
|
-
},
|
|
249
|
-
best.confidence,
|
|
250
|
-
Date.now() - startTime
|
|
251
|
-
);
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
/**
|
|
255
|
-
* Analogical reasoning
|
|
256
|
-
* @param task - Task to process
|
|
257
|
-
* @param startTime - Start time
|
|
258
|
-
* @returns Task result
|
|
259
|
-
*/
|
|
260
|
-
private async analogicalReasoning(task: Task, startTime: number): Promise<TaskResult> {
|
|
261
|
-
const input = task.input as { problem?: string; question?: string; domain?: string };
|
|
262
|
-
const problem = input.problem || input.question || String(task.input);
|
|
263
|
-
|
|
264
|
-
const prompt = `Reason about this problem using analogies from other domains: "${problem}"`;
|
|
265
|
-
const response = await LLMAdapter.call(prompt, "You are an expert at analogical reasoning.");
|
|
266
|
-
const conclusion = response.content || "Failed to generate analogical reasoning.";
|
|
267
|
-
|
|
268
|
-
return this.createTaskResult(
|
|
269
|
-
task.id,
|
|
270
|
-
{
|
|
271
|
-
conclusion,
|
|
272
|
-
strategy: 'analogical',
|
|
273
|
-
problem
|
|
274
|
-
},
|
|
275
|
-
0.85,
|
|
276
|
-
Date.now() - startTime
|
|
277
|
-
);
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
/**
|
|
281
|
-
* Abductive reasoning
|
|
282
|
-
* @param task - Task to process
|
|
283
|
-
* @param startTime - Start time
|
|
284
|
-
* @returns Task result
|
|
285
|
-
*/
|
|
286
|
-
private async abductiveReasoning(task: Task, startTime: number): Promise<TaskResult> {
|
|
287
|
-
const input = task.input as { observation?: string; problem?: string };
|
|
288
|
-
const observation = input.observation || input.problem || String(task.input);
|
|
289
|
-
|
|
290
|
-
const prompt = `Provide the best possible explanation (abductive reasoning) for this observation: "${observation}"`;
|
|
291
|
-
const response = await LLMAdapter.call(prompt, "You are an expert at abductive reasoning.");
|
|
292
|
-
const conclusion = response.content || "Failed to generate explanation.";
|
|
293
|
-
|
|
294
|
-
return this.createTaskResult(
|
|
295
|
-
task.id,
|
|
296
|
-
{
|
|
297
|
-
conclusion,
|
|
298
|
-
strategy: 'abductive',
|
|
299
|
-
observation
|
|
300
|
-
},
|
|
301
|
-
0.8,
|
|
302
|
-
Date.now() - startTime
|
|
303
|
-
);
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
private calculateStepConfidence(content: string, step: number): number {
|
|
307
|
-
const baseConfidence = 0.5;
|
|
308
|
-
const stepBonus = Math.min(0.3, step * 0.03);
|
|
309
|
-
const contentBonus = content.length > 50 ? 0.1 : 0;
|
|
310
|
-
return Math.min(0.95, baseConfidence + stepBonus + contentBonus);
|
|
311
|
-
}
|
|
312
|
-
}
|