@gotza02/seq-thinking 1.1.5 → 1.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +4 -1
  2. package/dist/index.d.ts +1 -1
  3. package/dist/index.js +1 -1
  4. package/dist/mcp-server.js +1 -1
  5. package/package.json +9 -3
  6. package/agents_test.log +0 -15
  7. package/data/agents/1770106504306-dljh9ef.json +0 -68
  8. package/data/agents/1770106504310-4oarrst.json +0 -58
  9. package/data/agents/1770106540588-pvitt55.json +0 -68
  10. package/data/agents/1770106540595-z2ya871.json +0 -58
  11. package/data/agents/1770106710890-0e2naq1.json +0 -68
  12. package/data/agents/1770106710893-r076yxx.json +0 -58
  13. package/data/agents/1770109212161-4ybd0i7.json +0 -68
  14. package/data/agents/1770109212166-gkhya8h.json +0 -58
  15. package/data/agents/1770117726716-lrnm415.json +0 -68
  16. package/data/agents/1770117726719-w6hsf3v.json +0 -58
  17. package/data/sessions/1770100622009-5afiuyv.json +0 -499
  18. package/data/sessions/1770106504312-75zk750.json +0 -107
  19. package/data/sessions/1770106540597-z8e8soo.json +0 -150
  20. package/data/sessions/1770106710894-0kxgy5x.json +0 -150
  21. package/data/sessions/1770109212169-zpddeb9.json +0 -150
  22. package/data/sessions/1770117726720-frcwj99.json +0 -150
  23. package/real_world_test.log +0 -200
  24. package/real_world_test_dynamic.log +0 -184
  25. package/real_world_test_real.log +0 -184
  26. package/src/__tests__/agents.test.ts +0 -858
  27. package/src/__tests__/mcp-server.test.ts +0 -380
  28. package/src/__tests__/sequential-thinking.test.ts +0 -687
  29. package/src/__tests__/swarm-coordinator.test.ts +0 -903
  30. package/src/__tests__/types.test.ts +0 -839
  31. package/src/__tests__/utils.test.ts +0 -322
  32. package/src/agents/base-agent.ts +0 -288
  33. package/src/agents/critic-agent.ts +0 -582
  34. package/src/agents/index.ts +0 -11
  35. package/src/agents/meta-reasoning-agent.ts +0 -314
  36. package/src/agents/reasoner-agent.ts +0 -312
  37. package/src/agents/synthesizer-agent.ts +0 -641
  38. package/src/index.ts +0 -118
  39. package/src/mcp-server.ts +0 -391
  40. package/src/real_world_test.ts +0 -89
  41. package/src/sequential-thinking.ts +0 -614
  42. package/src/swarm-coordinator.ts +0 -772
  43. package/src/types/index.ts +0 -915
  44. package/src/utils/index.ts +0 -1004
  45. package/src/utils/llm-adapter.ts +0 -110
  46. package/src/utils/logger.ts +0 -56
  47. package/src/utils/persistence.ts +0 -109
  48. package/test_output.log +0 -0
  49. package/tsconfig.json +0 -21
@@ -1,582 +0,0 @@
1
- /**
2
- * Critic Agent Implementation
3
- * @module agents/critic-agent
4
- * @version 1.0.0
5
- */
6
-
7
- import { BaseAgent } from './base-agent.js';
8
- import {
9
- AgentType,
10
- CriticType,
11
- type Task,
12
- type TaskResult,
13
- type AgentCapability
14
- } from '../types/index.js';
15
- import { LLMAdapter } from '../utils/llm-adapter.js';
16
-
17
- /**
18
- * Evaluation issue
19
- */
20
- interface EvaluationIssue {
21
- severity: 'low' | 'medium' | 'high' | 'critical';
22
- category: string;
23
- description: string;
24
- suggestion: string;
25
- }
26
-
27
- /**
28
- * Evaluation result
29
- */
30
- interface Evaluation {
31
- passed: boolean;
32
- score: number;
33
- issues: EvaluationIssue[];
34
- strengths: string[];
35
- overallAssessment: string;
36
- }
37
-
38
- /**
39
- * Critic agent that evaluates outputs from other agents
40
- */
41
- export class CriticAgent extends BaseAgent {
42
- /** Critic type */
43
- private criticType: CriticType;
44
-
45
- /** Severity threshold */
46
- private severityThreshold: number;
47
-
48
- /**
49
- * Create a new critic agent
50
- * @param config - Agent configuration
51
- */
52
- constructor(config: {
53
- name: string;
54
- criticType?: CriticType;
55
- severityThreshold?: number;
56
- capabilities?: AgentCapability[];
57
- }) {
58
- const defaultCapabilities: AgentCapability[] = [
59
- {
60
- name: 'evaluation',
61
- description: 'Evaluate outputs from other agents',
62
- confidence: 0.85,
63
- performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
64
- },
65
- {
66
- name: 'validation',
67
- description: 'Validate correctness and accuracy',
68
- confidence: 0.8,
69
- performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
70
- },
71
- {
72
- name: 'feedback',
73
- description: 'Provide constructive feedback',
74
- confidence: 0.9,
75
- performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
76
- },
77
- {
78
- name: 'logical_analysis',
79
- description: 'Analyze logical consistency',
80
- confidence: 0.85,
81
- performanceMetrics: { tasksCompleted: 0, averageQuality: 0, averageTimeMs: 0 }
82
- }
83
- ];
84
-
85
- super({
86
- name: config.name,
87
- type: AgentType.CRITIC,
88
- subtype: config.criticType || CriticType.LOGICAL,
89
- capabilities: config.capabilities || defaultCapabilities,
90
- maxConcurrentTasks: 5,
91
- confidenceThreshold: 0.7
92
- });
93
-
94
- this.criticType = config.criticType || CriticType.LOGICAL;
95
- this.severityThreshold = config.severityThreshold || 0.5;
96
- }
97
-
98
- /**
99
- * Get agent type
100
- * @returns Agent type
101
- */
102
- getType(): string {
103
- return AgentType.CRITIC;
104
- }
105
-
106
- /**
107
- * Get agent capabilities
108
- * @returns Array of capabilities
109
- */
110
- getCapabilities(): AgentCapability[] {
111
- return this.config.capabilities;
112
- }
113
-
114
- /**
115
- * Process a task - evaluate the target
116
- * @param task - Task to process
117
- * @returns Task result
118
- */
119
- async process(task: Task): Promise<TaskResult> {
120
- const startTime = Date.now();
121
- const input = task.input as { target: unknown; context?: unknown };
122
- const target = input.target;
123
-
124
- let evaluation: Evaluation;
125
-
126
- switch (this.criticType) {
127
- case CriticType.LOGICAL:
128
- evaluation = await this.checkLogicalConsistency(target);
129
- break;
130
- case CriticType.FACTUAL:
131
- evaluation = await this.checkFactualAccuracy(target);
132
- break;
133
- case CriticType.BIAS:
134
- evaluation = await this.checkForBias(target);
135
- break;
136
- case CriticType.SAFETY:
137
- evaluation = await this.checkSafety(target);
138
- break;
139
- default:
140
- evaluation = await this.generalEvaluation(target);
141
- }
142
-
143
- return this.createTaskResult(
144
- task.id,
145
- evaluation,
146
- evaluation.score,
147
- Date.now() - startTime,
148
- {
149
- reasoningSteps: evaluation.issues.length + evaluation.strengths.length,
150
- intermediateResults: evaluation.issues.map(i => i.description)
151
- }
152
- );
153
- }
154
-
155
- /**
156
- * Check logical consistency
157
- * @param target - Target to evaluate
158
- * @returns Evaluation result
159
- */
160
- private async checkLogicalConsistency(target: unknown): Promise<Evaluation> {
161
- const content = this.extractContent(target);
162
- const prompt = `Evaluate the logical consistency of the following content. Identify any fallacies, contradictions, or unsupported claims.
163
- Content: ${content}`;
164
-
165
- const response = await LLMAdapter.call(prompt, "You are a critical thinker specializing in logical analysis.");
166
- const assessment = response.content || "Failed to perform logical analysis.";
167
-
168
- return {
169
- passed: !response.error,
170
- score: 0.8,
171
- issues: response.error ? [{ severity: 'high', category: 'logic', description: response.error, suggestion: 'Retry' }] : [],
172
- strengths: [],
173
- overallAssessment: assessment
174
- };
175
- }
176
-
177
- /**
178
- * Check factual accuracy
179
- * @param target - Target to evaluate
180
- * @returns Evaluation result
181
- */
182
- private async checkFactualAccuracy(target: unknown): Promise<Evaluation> {
183
- const content = this.extractContent(target);
184
- const prompt = `Check the factual accuracy of the following content. Identify any potentially incorrect information.
185
- Content: ${content}`;
186
-
187
- const response = await LLMAdapter.call(prompt, "You are a fact-checker with access to broad knowledge.");
188
- const assessment = response.content || "Failed to perform fact-check.";
189
-
190
- return {
191
- passed: !response.error,
192
- score: 0.8,
193
- issues: [],
194
- strengths: [],
195
- overallAssessment: assessment
196
- };
197
- }
198
-
199
- /**
200
- * Check for bias
201
- * @param target - Target to evaluate
202
- * @returns Evaluation result
203
- */
204
- private async checkForBias(target: unknown): Promise<Evaluation> {
205
- const content = this.extractContent(target);
206
- const issues: EvaluationIssue[] = [];
207
- const strengths: string[] = [];
208
-
209
- // Check for absolute language
210
- const absolutePatterns = [/\balways\b/gi, /\bnever\b/gi];
211
- let absoluteCount = 0;
212
- for (const pattern of absolutePatterns) {
213
- const matches = content.match(pattern);
214
- if (matches) absoluteCount += matches.length;
215
- }
216
-
217
- if (absoluteCount > 3) {
218
- issues.push({
219
- severity: 'low',
220
- category: 'absolute_language',
221
- description: `Found ${absoluteCount} instances of absolute language (always/never)`,
222
- suggestion: 'Use more nuanced language (often/rarely/typically)'
223
- });
224
- }
225
-
226
- // Check for subjective qualifiers
227
- const subjectivePatterns = [/\bthink\b/gi, /\bclearly\b/gi, /\bdefinitely\b/gi];
228
- let subjectiveCount = 0;
229
- for (const pattern of subjectivePatterns) {
230
- const matches = content.match(pattern);
231
- if (matches) subjectiveCount += matches.length;
232
- }
233
-
234
- if (subjectiveCount > 3) {
235
- issues.push({
236
- severity: 'low',
237
- category: 'subjective_language',
238
- description: `Found ${subjectiveCount} instances of subjective qualifiers`,
239
- suggestion: 'Support assertions with evidence rather than qualifiers'
240
- });
241
- }
242
-
243
- // Check for perspective diversity
244
- const perspectives = this.countPerspectives(content);
245
- if (perspectives < 2) {
246
- issues.push({
247
- severity: 'medium',
248
- category: 'perspective',
249
- description: 'Limited perspective diversity detected',
250
- suggestion: 'Consider alternative viewpoints and counterarguments'
251
- });
252
- } else {
253
- strengths.push('Multiple perspectives considered');
254
- }
255
-
256
- // Check for confirmation bias indicators
257
- const confirmationBias = this.detectConfirmationBias(content);
258
- issues.push(...confirmationBias);
259
-
260
- const score = Math.max(0, 1 - issues.length * 0.15);
261
-
262
- return {
263
- passed: score >= this.severityThreshold,
264
- score,
265
- issues,
266
- strengths: strengths.length > 0 ? strengths : ['Language appears balanced'],
267
- overallAssessment: score >= 0.8
268
- ? 'Low bias detected'
269
- : score >= 0.6
270
- ? 'Some bias indicators present'
271
- : 'Significant bias concerns'
272
- };
273
- }
274
-
275
- /**
276
- * Check safety
277
- * @param target - Target to evaluate
278
- * @returns Evaluation result
279
- */
280
- private async checkSafety(target: unknown): Promise<Evaluation> {
281
- const content = this.extractContent(target);
282
- const prompt = `Perform a safety and privacy audit on the following content. Identify any harmful patterns, hate speech, or PII.
283
- Content: ${content}`;
284
-
285
- const response = await LLMAdapter.call(prompt, "You are a safety auditor. Be strict but fair.");
286
- const assessment = response.content || "Failed to perform safety audit.";
287
-
288
- const passed = !assessment.toLowerCase().includes('unsafe') && !response.error;
289
-
290
- return {
291
- passed,
292
- score: passed ? 1.0 : 0.0,
293
- issues: [],
294
- strengths: [],
295
- overallAssessment: assessment
296
- };
297
- }
298
-
299
- /**
300
- * General evaluation
301
- * @param target - Target to evaluate
302
- * @returns Evaluation result
303
- */
304
- private async generalEvaluation(target: unknown): Promise<Evaluation> {
305
- const content = this.extractContent(target);
306
-
307
- return {
308
- passed: true,
309
- score: 0.75,
310
- issues: [],
311
- strengths: ['General quality acceptable', 'Content is coherent'],
312
- overallAssessment: 'Acceptable quality'
313
- };
314
- }
315
-
316
- // ============================================================================
317
- // Helper Methods
318
- // ============================================================================
319
-
320
- /**
321
- * Extract content from target
322
- * @param target - Target to extract content from
323
- * @returns Extracted content string
324
- */
325
- private extractContent(target: unknown): string {
326
- if (typeof target === 'string') return target;
327
-
328
- if (target && typeof target === 'object') {
329
- const obj = target as Record<string, unknown>;
330
-
331
- if ('content' in obj && typeof obj.content === 'string') {
332
- return obj.content;
333
- }
334
-
335
- if ('conclusion' in obj && typeof obj.conclusion === 'string') {
336
- return obj.conclusion;
337
- }
338
-
339
- if ('output' in obj && typeof obj.output === 'string') {
340
- return obj.output;
341
- }
342
-
343
- return JSON.stringify(target);
344
- }
345
-
346
- return String(target);
347
- }
348
-
349
- /**
350
- * Detect logical fallacies
351
- * @param content - Content to analyze
352
- * @returns Array of issues
353
- */
354
- private detectLogicalFallacies(content: string): EvaluationIssue[] {
355
- const issues: EvaluationIssue[] = [];
356
- const lowerContent = content.toLowerCase();
357
-
358
- const fallacyPatterns = [
359
- {
360
- pattern: /\beveryone\s+knows\s+that\b/gi,
361
- name: 'Appeal to Common Belief',
362
- suggestion: 'Provide evidence rather than relying on common belief'
363
- },
364
- {
365
- pattern: /\b(expert|authority)\s+says\b/gi,
366
- name: 'Appeal to Authority',
367
- suggestion: 'Evaluate the argument on its merits, not just authority'
368
- },
369
- {
370
- pattern: /\beither\s+.+\s+or\s+.+\b/gi,
371
- name: 'False Dichotomy',
372
- suggestion: 'Consider that there may be more than two options'
373
- },
374
- {
375
- pattern: /\bafter\s+this,?\s+therefore\s+because\s+of\s+this\b/gi,
376
- name: 'Post Hoc',
377
- suggestion: 'Correlation does not imply causation'
378
- }
379
- ];
380
-
381
- for (const { pattern, name, suggestion } of fallacyPatterns) {
382
- if (pattern.test(lowerContent)) {
383
- issues.push({
384
- severity: 'medium',
385
- category: 'logical_fallacy',
386
- description: `Potential ${name} detected`,
387
- suggestion
388
- });
389
- }
390
- }
391
-
392
- return issues;
393
- }
394
-
395
- /**
396
- * Detect contradictions
397
- * @param content - Content to analyze
398
- * @returns Array of issues
399
- */
400
- private detectContradictions(content: string): EvaluationIssue[] {
401
- const issues: EvaluationIssue[] = [];
402
-
403
- // Simple contradiction detection
404
- const contradictionPatterns = [
405
- { a: /\bis\s+true\b/gi, b: /\bis\s+false\b/gi },
406
- { a: /\ball\b/gi, b: /\bnone\b/gi },
407
- { a: /\balways\b/gi, b: /\bnever\b/gi }
408
- ];
409
-
410
- for (const { a, b } of contradictionPatterns) {
411
- const matchesA = content.match(a);
412
- const matchesB = content.match(b);
413
-
414
- if (matchesA && matchesB && matchesA.length > 0 && matchesB.length > 0) {
415
- issues.push({
416
- severity: 'high',
417
- category: 'contradiction',
418
- description: 'Potential contradiction detected in reasoning',
419
- suggestion: 'Review for consistency and clarify any apparent contradictions'
420
- });
421
- break;
422
- }
423
- }
424
-
425
- return issues;
426
- }
427
-
428
- /**
429
- * Detect unsupported claims
430
- * @param content - Content to analyze
431
- * @returns Array of issues
432
- */
433
- private detectUnsupportedClaims(content: string): EvaluationIssue[] {
434
- const issues: EvaluationIssue[] = [];
435
-
436
- // Look for strong assertions without qualifiers
437
- const assertionPattern = /\b(certainly|definitely|absolutely|undoubtedly)\s+(.+?)[.!?]/gi;
438
- const assertions = content.match(assertionPattern) || [];
439
-
440
- if (assertions.length > 3) {
441
- issues.push({
442
- severity: 'low',
443
- category: 'unsupported_claims',
444
- description: `${assertions.length} strong assertions detected`,
445
- suggestion: 'Support strong claims with evidence or use more qualified language'
446
- });
447
- }
448
-
449
- return issues;
450
- }
451
-
452
- /**
453
- * Extract factual claims
454
- * @param content - Content to analyze
455
- * @returns Array of claims
456
- */
457
- private extractFactualClaims(content: string): string[] {
458
- const claimPatterns = [
459
- /\b(is|are|was|were)\s+(.+?)[.!?]/gi,
460
- /\b(contains?|includes?|consists?)\s+of\s+(.+?)[.!?]/gi,
461
- /\b(\d+%|\d+\s+(people|users|customers))\b/gi
462
- ];
463
-
464
- const claims: string[] = [];
465
-
466
- for (const pattern of claimPatterns) {
467
- const matches = content.match(pattern) || [];
468
- claims.push(...matches);
469
- }
470
-
471
- return claims;
472
- }
473
-
474
- /**
475
- * Assess claim accuracy
476
- * @param claim - Claim to assess
477
- * @returns Accuracy score (0-1)
478
- */
479
- private assessClaimAccuracy(claim: string): number {
480
- // Simulate accuracy assessment
481
- // In a real implementation, this would check against knowledge bases
482
- const hasNumbers = /\d/.test(claim);
483
- const hasQualifiers = /\b(approximately|about|around|roughly)\b/i.test(claim);
484
- const length = claim.length;
485
-
486
- let score = 0.7;
487
- if (hasNumbers) score += 0.1;
488
- if (hasQualifiers) score += 0.1;
489
- if (length > 20 && length < 200) score += 0.1;
490
-
491
- return Math.min(1, score);
492
- }
493
-
494
- /**
495
- * Check for source citations
496
- * @param content - Content to check
497
- * @returns True if has citations
498
- */
499
- private hasSourceCitations(content: string): boolean {
500
- const citationPatterns = [
501
- /\[\d+\]/,
502
- /\([^)]+\d{4}[^)]*\)/,
503
- /according to\s+\w+/i,
504
- /source:\s*\w+/i
505
- ];
506
-
507
- return citationPatterns.some(pattern => pattern.test(content));
508
- }
509
-
510
- /**
511
- * Count perspectives
512
- * @param content - Content to analyze
513
- * @returns Number of perspectives
514
- */
515
- private countPerspectives(content: string): number {
516
- const perspectiveIndicators = [
517
- /\bhowever\b/gi,
518
- /\bon the other hand\b/gi,
519
- /\bconversely\b/gi,
520
- /\balternatively\b/gi,
521
- /\bfrom\s+another\s+perspective\b/gi
522
- ];
523
-
524
- let count = 1; // Base perspective
525
-
526
- for (const pattern of perspectiveIndicators) {
527
- const matches = content.match(pattern);
528
- if (matches) count += matches.length;
529
- }
530
-
531
- return count;
532
- }
533
-
534
- /**
535
- * Detect confirmation bias
536
- * @param content - Content to analyze
537
- * @returns Array of issues
538
- */
539
- private detectConfirmationBias(content: string): EvaluationIssue[] {
540
- const issues: EvaluationIssue[] = [];
541
-
542
- // Check for selective evidence
543
- const evidencePatterns = [
544
- /\b(proves?|confirms?)\s+that\b/gi,
545
- /\b(supports?|validates?)\s+the\b/gi
546
- ];
547
-
548
- let confirmingCount = 0;
549
- for (const pattern of evidencePatterns) {
550
- const matches = content.match(pattern);
551
- if (matches) confirmingCount += matches.length;
552
- }
553
-
554
- const counterPattern = /\b(however|but|although|though|counter|oppose)/gi;
555
- const counterMatches = content.match(counterPattern) || [];
556
-
557
- if (confirmingCount > 2 && counterMatches.length === 0) {
558
- issues.push({
559
- severity: 'medium',
560
- category: 'confirmation_bias',
561
- description: 'Evidence appears one-sided with no counterarguments',
562
- suggestion: 'Actively seek and present disconfirming evidence'
563
- });
564
- }
565
-
566
- return issues;
567
- }
568
-
569
- /**
570
- * Generate logical assessment
571
- * @param score - Score
572
- * @param issueCount - Number of issues
573
- * @returns Assessment string
574
- */
575
- private generateLogicalAssessment(score: number, issueCount: number): string {
576
- if (score >= 0.9) return 'Excellent logical consistency';
577
- if (score >= 0.8) return 'Good logical consistency';
578
- if (score >= 0.6) return 'Acceptable logical consistency with minor issues';
579
- if (score >= 0.4) return 'Logical consistency needs improvement';
580
- return 'Significant logical issues detected';
581
- }
582
- }
@@ -1,11 +0,0 @@
1
- /**
2
- * Agent Exports
3
- * @module agents
4
- * @version 1.0.0
5
- */
6
-
7
- export { BaseAgent } from './base-agent.js';
8
- export { ReasonerAgent } from './reasoner-agent.js';
9
- export { CriticAgent } from './critic-agent.js';
10
- export { SynthesizerAgent } from './synthesizer-agent.js';
11
- export { MetaReasoningAgent } from './meta-reasoning-agent.js';