dialectic 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursor/commands/setup-test.mdc +175 -0
- package/.cursor/rules/basic-code-cleanup.mdc +1110 -0
- package/.cursor/rules/riper5.mdc +96 -0
- package/.env.example +6 -0
- package/AGENTS.md +1052 -0
- package/LICENSE +21 -0
- package/README.md +93 -0
- package/WARP.md +113 -0
- package/dialectic-1.0.0.tgz +0 -0
- package/dialectic.js +10 -0
- package/docs/commands.md +375 -0
- package/docs/configuration.md +882 -0
- package/docs/context_summarization.md +1023 -0
- package/docs/debate_flow.md +1127 -0
- package/docs/eval_flow.md +795 -0
- package/docs/evaluator.md +141 -0
- package/examples/debate-config-openrouter.json +48 -0
- package/examples/debate_config1.json +48 -0
- package/examples/eval/eval1/eval_config1.json +13 -0
- package/examples/eval/eval1/result1.json +62 -0
- package/examples/eval/eval1/result2.json +97 -0
- package/examples/eval_summary_format.md +11 -0
- package/examples/example3/debate-config.json +64 -0
- package/examples/example3/eval_config2.json +25 -0
- package/examples/example3/problem.md +17 -0
- package/examples/example3/rounds_test/eval_run.sh +16 -0
- package/examples/example3/rounds_test/run_test.sh +16 -0
- package/examples/kata1/architect-only-solution_2-rounds.json +121 -0
- package/examples/kata1/architect-perf-solution_2-rounds.json +234 -0
- package/examples/kata1/debate-config-kata1.json +54 -0
- package/examples/kata1/eval_architect-only_2-rounds.json +97 -0
- package/examples/kata1/eval_architect-perf_2-rounds.json +97 -0
- package/examples/kata1/kata1-report.md +12224 -0
- package/examples/kata1/kata1-report_temps-01_01_01_07.md +2451 -0
- package/examples/kata1/kata1.md +5 -0
- package/examples/kata1/meta.txt +1 -0
- package/examples/kata2/debate-config.json +54 -0
- package/examples/kata2/eval_config1.json +21 -0
- package/examples/kata2/eval_config2.json +25 -0
- package/examples/kata2/kata2.md +5 -0
- package/examples/kata2/only_architect/debate-config.json +45 -0
- package/examples/kata2/only_architect/eval_run.sh +11 -0
- package/examples/kata2/only_architect/run_test.sh +5 -0
- package/examples/kata2/rounds_test/eval_run.sh +11 -0
- package/examples/kata2/rounds_test/run_test.sh +5 -0
- package/examples/kata2/summary_length_test/eval_run.sh +11 -0
- package/examples/kata2/summary_length_test/eval_run_w_clarify.sh +7 -0
- package/examples/kata2/summary_length_test/run_test.sh +5 -0
- package/examples/task-queue/debate-config.json +76 -0
- package/examples/task-queue/debate_report.md +566 -0
- package/examples/task-queue/task-queue-system.md +25 -0
- package/jest.config.ts +13 -0
- package/multi_agent_debate_spec.md +2980 -0
- package/package.json +38 -0
- package/sanity-check-problem.txt +9 -0
- package/src/agents/prompts/architect-prompts.ts +203 -0
- package/src/agents/prompts/generalist-prompts.ts +157 -0
- package/src/agents/prompts/index.ts +41 -0
- package/src/agents/prompts/judge-prompts.ts +19 -0
- package/src/agents/prompts/kiss-prompts.ts +230 -0
- package/src/agents/prompts/performance-prompts.ts +142 -0
- package/src/agents/prompts/prompt-types.ts +68 -0
- package/src/agents/prompts/security-prompts.ts +149 -0
- package/src/agents/prompts/shared.ts +144 -0
- package/src/agents/prompts/testing-prompts.ts +149 -0
- package/src/agents/role-based-agent.ts +386 -0
- package/src/cli/commands/debate.ts +761 -0
- package/src/cli/commands/eval.ts +475 -0
- package/src/cli/commands/report.ts +265 -0
- package/src/cli/index.ts +79 -0
- package/src/core/agent.ts +198 -0
- package/src/core/clarifications.ts +34 -0
- package/src/core/judge.ts +257 -0
- package/src/core/orchestrator.ts +432 -0
- package/src/core/state-manager.ts +322 -0
- package/src/eval/evaluator-agent.ts +130 -0
- package/src/eval/prompts/system.md +41 -0
- package/src/eval/prompts/user.md +64 -0
- package/src/providers/llm-provider.ts +25 -0
- package/src/providers/openai-provider.ts +84 -0
- package/src/providers/openrouter-provider.ts +122 -0
- package/src/providers/provider-factory.ts +64 -0
- package/src/types/agent.types.ts +141 -0
- package/src/types/config.types.ts +47 -0
- package/src/types/debate.types.ts +237 -0
- package/src/types/eval.types.ts +85 -0
- package/src/utils/common.ts +104 -0
- package/src/utils/context-formatter.ts +102 -0
- package/src/utils/context-summarizer.ts +143 -0
- package/src/utils/env-loader.ts +46 -0
- package/src/utils/exit-codes.ts +5 -0
- package/src/utils/id.ts +11 -0
- package/src/utils/logger.ts +48 -0
- package/src/utils/paths.ts +10 -0
- package/src/utils/progress-ui.ts +313 -0
- package/src/utils/prompt-loader.ts +79 -0
- package/src/utils/report-generator.ts +301 -0
- package/tests/clarifications.spec.ts +128 -0
- package/tests/cli.debate.spec.ts +144 -0
- package/tests/config-loading.spec.ts +206 -0
- package/tests/context-summarizer.spec.ts +131 -0
- package/tests/debate-config-custom.json +38 -0
- package/tests/env-loader.spec.ts +149 -0
- package/tests/eval.command.spec.ts +1191 -0
- package/tests/logger.spec.ts +19 -0
- package/tests/openai-provider.spec.ts +26 -0
- package/tests/openrouter-provider.spec.ts +279 -0
- package/tests/orchestrator-summary.spec.ts +386 -0
- package/tests/orchestrator.spec.ts +207 -0
- package/tests/prompt-loader.spec.ts +52 -0
- package/tests/prompts/architect.md +16 -0
- package/tests/provider-factory.spec.ts +150 -0
- package/tests/report.command.spec.ts +546 -0
- package/tests/role-based-agent-summary.spec.ts +476 -0
- package/tests/security-agent.spec.ts +221 -0
- package/tests/shared-prompts.spec.ts +318 -0
- package/tests/state-manager.spec.ts +251 -0
- package/tests/summary-prompts.spec.ts +153 -0
- package/tsconfig.json +49 -0
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import { LLM_PROVIDERS } from './agent.types';
|
|
2
|
+
|
|
3
|
+
export interface EvaluatorConfig {
|
|
4
|
+
id: string;
|
|
5
|
+
name: string;
|
|
6
|
+
model: string;
|
|
7
|
+
provider: typeof LLM_PROVIDERS.OPENAI | typeof LLM_PROVIDERS.OPENROUTER;
|
|
8
|
+
systemPromptPath?: string;
|
|
9
|
+
userPromptPath?: string;
|
|
10
|
+
timeout?: number; // milliseconds (ignored for execution after refactor)
|
|
11
|
+
enabled?: boolean; // default true
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export interface EvaluatorRunOptions {
|
|
15
|
+
verbose?: boolean;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export interface EvaluatorInputs {
|
|
19
|
+
problem: string;
|
|
20
|
+
clarificationsMarkdown: string; // fenced code blocks including NA entries
|
|
21
|
+
finalSolution: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export interface ParsedEvaluation {
|
|
25
|
+
evaluation?: {
|
|
26
|
+
functional_completeness?: {
|
|
27
|
+
score?: number;
|
|
28
|
+
reasoning?: string;
|
|
29
|
+
};
|
|
30
|
+
non_functional?: {
|
|
31
|
+
performance_scalability?: { score?: number; reasoning?: string };
|
|
32
|
+
security?: { score?: number; reasoning?: string };
|
|
33
|
+
maintainability_evolvability?: { score?: number; reasoning?: string };
|
|
34
|
+
regulatory_compliance?: { score?: number; reasoning?: string };
|
|
35
|
+
testability?: { score?: number; reasoning?: string };
|
|
36
|
+
};
|
|
37
|
+
};
|
|
38
|
+
overall_summary?: {
|
|
39
|
+
strengths?: string;
|
|
40
|
+
weaknesses?: string;
|
|
41
|
+
overall_score?: number;
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export interface AggregatedAverages {
|
|
46
|
+
functional_completeness: number | null; // null => N/A
|
|
47
|
+
performance_scalability: number | null;
|
|
48
|
+
security: number | null;
|
|
49
|
+
maintainability_evolvability: number | null;
|
|
50
|
+
regulatory_compliance: number | null;
|
|
51
|
+
testability: number | null;
|
|
52
|
+
overall_score: number | null;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
export interface AggregatedJsonOutput {
|
|
56
|
+
evaluation: {
|
|
57
|
+
functional_completeness: { average_score: number | null };
|
|
58
|
+
non_functional: {
|
|
59
|
+
performance_scalability: { average_score: number | null };
|
|
60
|
+
security: { average_score: number | null };
|
|
61
|
+
maintainability_evolvability: { average_score: number | null };
|
|
62
|
+
regulatory_compliance: { average_score: number | null };
|
|
63
|
+
testability: { average_score: number | null };
|
|
64
|
+
};
|
|
65
|
+
};
|
|
66
|
+
overall_score: number | null;
|
|
67
|
+
agents: Record<string, ParsedEvaluation>; // keyed by evaluator id
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
export function isEnabledEvaluator(cfg: EvaluatorConfig): boolean {
|
|
71
|
+
return cfg.enabled !== false;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
export function clampScoreToRange(val: unknown): number | undefined {
|
|
75
|
+
if (typeof val !== 'number' || !Number.isFinite(val)) return undefined;
|
|
76
|
+
if (val < 1) return 1;
|
|
77
|
+
if (val > 10) return 10;
|
|
78
|
+
return val;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
export function round2(val: number): number {
|
|
82
|
+
return Math.round((val + Number.EPSILON) * 100) / 100;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import fs from 'fs';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import { round2 } from '../types/eval.types';
|
|
4
|
+
import { EXIT_INVALID_ARGS } from './exit-codes';
|
|
5
|
+
|
|
6
|
+
const FILE_ENCODING_UTF8 = 'utf-8';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Validates that a value is a finite number and returns it, or undefined if invalid.
|
|
10
|
+
*
|
|
11
|
+
* @param x - The value to validate as a number.
|
|
12
|
+
* @returns The number if valid and finite, otherwise undefined.
|
|
13
|
+
*/
|
|
14
|
+
export function numOrUndefined(x: unknown): number | undefined {
|
|
15
|
+
return typeof x === 'number' && Number.isFinite(x) ? x : undefined;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Calculates the average of an array of numbers.
|
|
20
|
+
*
|
|
21
|
+
* @param {number[]} values - An array of numbers to average.
|
|
22
|
+
* @returns {number | null} The average rounded to 2 decimal places, or null if the array is empty.
|
|
23
|
+
*/
|
|
24
|
+
export function averageOrNull(values: number[]): number | null {
|
|
25
|
+
if (values.length === 0) return null;
|
|
26
|
+
const sum = values.reduce((a, b) => a + b, 0);
|
|
27
|
+
return round2(sum / values.length);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Creates a validation error with a custom error code.
|
|
32
|
+
*
|
|
33
|
+
* This function is used throughout the CLI to create errors with specific exit codes
|
|
34
|
+
* for validation failures and invalid arguments.
|
|
35
|
+
*
|
|
36
|
+
* @param message - The error message to associate with the error.
|
|
37
|
+
* @param code - The numeric error code indicating the exit or validation type.
|
|
38
|
+
* @returns An Error object with the specified message and an added 'code' property.
|
|
39
|
+
*/
|
|
40
|
+
export function createValidationError(message: string, code: number): Error {
|
|
41
|
+
const err: any = new Error(message);
|
|
42
|
+
err.code = code;
|
|
43
|
+
return err;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Reads a JSON file from the given path, validates its existence and file type, parses its contents,
|
|
48
|
+
* and returns the parsed object. Throws a validation error with an appropriate exit code if the file
|
|
49
|
+
* does not exist, is not a regular file, or does not contain valid JSON.
|
|
50
|
+
*
|
|
51
|
+
* @template T The expected return type for the parsed JSON object.
|
|
52
|
+
* @param filePath - The path to the JSON file, relative to the current working directory.
|
|
53
|
+
* @param errorContext - Optional context to include in error messages (e.g., "Debate file", "Config file").
|
|
54
|
+
* Defaults to "File" if not provided.
|
|
55
|
+
* @returns The parsed JSON object of type T.
|
|
56
|
+
* @throws {Error} Throws a validation error with a specific exit code if:
|
|
57
|
+
* - The file does not exist (EXIT_INVALID_ARGS).
|
|
58
|
+
* - The path is not a file (EXIT_INVALID_ARGS).
|
|
59
|
+
* - The file contains invalid JSON (EXIT_INVALID_ARGS).
|
|
60
|
+
*/
|
|
61
|
+
export function readJsonFile<T>(filePath: string, errorContext: string = 'File'): T {
|
|
62
|
+
const abs = path.resolve(process.cwd(), filePath);
|
|
63
|
+
if (!fs.existsSync(abs)) {
|
|
64
|
+
throw createValidationError(`${errorContext} not found: ${abs}`, EXIT_INVALID_ARGS);
|
|
65
|
+
}
|
|
66
|
+
const stat = fs.statSync(abs);
|
|
67
|
+
if (!stat.isFile()) {
|
|
68
|
+
throw createValidationError(`Path is not a file: ${abs}`, EXIT_INVALID_ARGS);
|
|
69
|
+
}
|
|
70
|
+
const raw = fs.readFileSync(abs, FILE_ENCODING_UTF8);
|
|
71
|
+
try {
|
|
72
|
+
return JSON.parse(raw) as T;
|
|
73
|
+
} catch (parseError: unknown) {
|
|
74
|
+
const message = parseError instanceof Error ? parseError.message : 'Unknown parsing error';
|
|
75
|
+
throw createValidationError(`Invalid JSON format in ${errorContext.toLowerCase()}: ${abs} (${message})`, EXIT_INVALID_ARGS);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Writes content to a file, creating parent directories if needed.
|
|
81
|
+
* Normalizes the path relative to the current working directory and ensures
|
|
82
|
+
* all parent directories exist before writing the file.
|
|
83
|
+
*
|
|
84
|
+
* @param relativePath - The file path relative to the current working directory.
|
|
85
|
+
* @param content - The content to write to the file.
|
|
86
|
+
* @returns Promise resolving to the absolute path of the file that was written.
|
|
87
|
+
* @throws {Error} Propagates any errors from file system operations (directory creation or file writing).
|
|
88
|
+
*/
|
|
89
|
+
export async function writeFileWithDirectories(relativePath: string, content: string): Promise<string> {
|
|
90
|
+
// Normalize path relative to current working directory
|
|
91
|
+
const absolutePath = path.resolve(process.cwd(), relativePath);
|
|
92
|
+
|
|
93
|
+
// Ensure parent directories exist
|
|
94
|
+
const parentDir = path.dirname(absolutePath);
|
|
95
|
+
if (!fs.existsSync(parentDir)) {
|
|
96
|
+
fs.mkdirSync(parentDir, { recursive: true });
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Write file with UTF-8 encoding
|
|
100
|
+
await fs.promises.writeFile(absolutePath, content, FILE_ENCODING_UTF8);
|
|
101
|
+
|
|
102
|
+
return absolutePath;
|
|
103
|
+
}
|
|
104
|
+
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import type { DebateContext, DebateRound, AgentClarifications } from '../types/debate.types';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Formats debate history into a readable string for LLM prompts.
|
|
5
|
+
* Groups contributions by round and formats with clear labels.
|
|
6
|
+
*
|
|
7
|
+
* @param history - Array of debate rounds to format.
|
|
8
|
+
* @returns A formatted string representation of the debate history.
|
|
9
|
+
*/
|
|
10
|
+
export function formatHistory(history: DebateRound[]): string {
|
|
11
|
+
return history.map(round => {
|
|
12
|
+
const contributions = round.contributions.map(c => {
|
|
13
|
+
const firstLine = c.content.split('\n')[0] || '';
|
|
14
|
+
const preview = firstLine.length > 100 ? firstLine.substring(0, 100) + '...' : firstLine;
|
|
15
|
+
return ` [${c.agentRole}] ${c.type}: ${preview}`;
|
|
16
|
+
}).join('\n');
|
|
17
|
+
return `Round ${round.roundNumber}:\n${contributions}`;
|
|
18
|
+
}).join('\n\n');
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Formats a context section for inclusion in prompts.
|
|
23
|
+
* Searches backwards through rounds to find this agent's most recent summary.
|
|
24
|
+
* Falls back to full history only if includeFullHistory is true and no summary found.
|
|
25
|
+
* Returns empty string if no context available or includeFullHistory is false.
|
|
26
|
+
*
|
|
27
|
+
* @param context - The debate context containing history.
|
|
28
|
+
* @param agentId - The agent ID to look up the summary for.
|
|
29
|
+
* @param includeFullHistory - Whether to fall back to full history when no summary is found.
|
|
30
|
+
* @returns A formatted context section, or empty string if no context.
|
|
31
|
+
*/
|
|
32
|
+
export function formatContextSection(context: DebateContext, agentId: string, includeFullHistory: boolean = true): string {
|
|
33
|
+
if (!context?.history || context.history.length === 0) {
|
|
34
|
+
return '';
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Search backwards through rounds to find this agent's most recent summary
|
|
38
|
+
for (let i = context.history.length - 1; i >= 0; i--) {
|
|
39
|
+
const round = context.history[i];
|
|
40
|
+
if (!round) continue;
|
|
41
|
+
|
|
42
|
+
const agentSummary = round.summaries?.[agentId];
|
|
43
|
+
|
|
44
|
+
if (agentSummary) {
|
|
45
|
+
return `=== Previous Debate Context ===\n\n` +
|
|
46
|
+
`[SUMMARY from Round ${round.roundNumber}]\n` +
|
|
47
|
+
`${agentSummary.summary}\n\n` +
|
|
48
|
+
`===================================\n\n`;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// No summary found for this agent
|
|
53
|
+
if (includeFullHistory) {
|
|
54
|
+
// Fall back to full history only if includeFullHistory is true
|
|
55
|
+
return `=== Previous Debate Rounds ===\n\n` +
|
|
56
|
+
`${formatHistory(context.history)}\n\n` +
|
|
57
|
+
`===================================\n\n`;
|
|
58
|
+
} else {
|
|
59
|
+
// Return empty string if includeFullHistory is false
|
|
60
|
+
return '';
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Prepends a context section to a prompt if context is available.
|
|
66
|
+
* Adds proper formatting and separation.
|
|
67
|
+
*
|
|
68
|
+
* @param prompt - The base prompt to prepend context to.
|
|
69
|
+
* @param context - The debate context.
|
|
70
|
+
* @param agentId - The agent ID for finding their specific summary.
|
|
71
|
+
* @param includeFullHistory - Whether to fall back to full history when no summary is found.
|
|
72
|
+
* @returns The prompt with context prepended, or the original prompt if no context.
|
|
73
|
+
*/
|
|
74
|
+
export function prependContext(prompt: string, context?: DebateContext, agentId?: string, includeFullHistory: boolean = true): string {
|
|
75
|
+
if (!context || !agentId) {
|
|
76
|
+
return prompt;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Render clarifications first (if any), then previous summaries/history
|
|
80
|
+
const clar = context.clarifications && context.clarifications.length > 0 ? formatClarifications(context.clarifications) : '';
|
|
81
|
+
|
|
82
|
+
const rest = formatContextSection(context, agentId, includeFullHistory);
|
|
83
|
+
const full = `${clar}${clar ? '\n' : ''}${rest}`.trim();
|
|
84
|
+
if (!full) return prompt;
|
|
85
|
+
return full + '\n' + prompt;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Formats Clarifications section for prompts, grouped by agent.
|
|
90
|
+
*/
|
|
91
|
+
export function formatClarifications(groups: AgentClarifications[]): string {
|
|
92
|
+
let text = '## Clarifications\n\n';
|
|
93
|
+
for (const group of groups) {
|
|
94
|
+
text += `### ${group.agentName} (${group.role})\n`;
|
|
95
|
+
for (const item of group.items) {
|
|
96
|
+
text += `Question (${item.id}):\n\n\`\`\`text\n${item.question}\n\`\`\`\n\n`;
|
|
97
|
+
text += `Answer:\n\n\`\`\`text\n${item.answer}\n\`\`\`\n\n`;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
return text + '\n';
|
|
101
|
+
}
|
|
102
|
+
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
import type { AgentRole, LLM_PROVIDERS } from '../types/agent.types';
|
|
2
|
+
import type { SummarizationConfig, SummarizationMetadata } from '../types/debate.types';
|
|
3
|
+
import { LLMProvider } from '../providers/llm-provider';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Default model to use for summarization.
|
|
7
|
+
* Using GPT-4 for its strong summarization capabilities.
|
|
8
|
+
*/
|
|
9
|
+
const DEFAULT_SUMMARY_MODEL = 'gpt-4';
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Default temperature for summarization LLM calls.
|
|
13
|
+
* Lower temperature (0.3) produces more consistent, factual summaries.
|
|
14
|
+
*/
|
|
15
|
+
const DEFAULT_SUMMARY_TEMPERATURE = 0.3;
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Result of a summarization operation.
|
|
19
|
+
*/
|
|
20
|
+
export interface SummarizationResult {
|
|
21
|
+
summary: string; /** The generated summary text. */
|
|
22
|
+
metadata: SummarizationMetadata; /** Metadata about the summarization operation. */
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Interface for context summarization strategies.
|
|
27
|
+
* Allows pluggable summarization implementations (e.g., length-based, semantic, hierarchical).
|
|
28
|
+
*/
|
|
29
|
+
export interface ContextSummarizer {
|
|
30
|
+
/**
|
|
31
|
+
* Summarizes the given content from the perspective of a specific agent role.
|
|
32
|
+
*
|
|
33
|
+
* @param content - The full content to summarize.
|
|
34
|
+
* @param role - The agent role for perspective-based summarization.
|
|
35
|
+
* @param config - Summarization configuration (threshold, maxLength, etc.).
|
|
36
|
+
* @param systemPrompt - The system prompt to use for the LLM.
|
|
37
|
+
* @param summaryPrompt - The summarization-specific prompt template.
|
|
38
|
+
* @returns A promise resolving to the summary and metadata.
|
|
39
|
+
*/
|
|
40
|
+
summarize(
|
|
41
|
+
content: string,
|
|
42
|
+
role: AgentRole,
|
|
43
|
+
config: SummarizationConfig,
|
|
44
|
+
systemPrompt: string,
|
|
45
|
+
summaryPrompt: string
|
|
46
|
+
): Promise<SummarizationResult>;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Length-based summarization strategy using LLM.
|
|
51
|
+
* Summarizes content when it exceeds a character threshold.
|
|
52
|
+
*/
|
|
53
|
+
export class LengthBasedSummarizer implements ContextSummarizer {
|
|
54
|
+
private readonly model?: string;
|
|
55
|
+
private readonly temperature?: number;
|
|
56
|
+
private readonly providerName?: typeof LLM_PROVIDERS[keyof typeof LLM_PROVIDERS];
|
|
57
|
+
|
|
58
|
+
constructor(
|
|
59
|
+
private provider: LLMProvider,
|
|
60
|
+
options?: {
|
|
61
|
+
model?: string;
|
|
62
|
+
temperature?: number;
|
|
63
|
+
provider?: typeof LLM_PROVIDERS[keyof typeof LLM_PROVIDERS];
|
|
64
|
+
}
|
|
65
|
+
) {
|
|
66
|
+
if (options && options.model !== undefined) {
|
|
67
|
+
this.model = options.model;
|
|
68
|
+
}
|
|
69
|
+
if (options && options.temperature !== undefined) {
|
|
70
|
+
this.temperature = options.temperature;
|
|
71
|
+
}
|
|
72
|
+
if (options && options.provider !== undefined) {
|
|
73
|
+
this.providerName = options.provider;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Summarizes content using an LLM call with role-specific prompts.
|
|
79
|
+
*
|
|
80
|
+
* @param content - The full content to summarize.
|
|
81
|
+
* @param role - The agent role for perspective-based summarization.
|
|
82
|
+
* @param config - Summarization configuration.
|
|
83
|
+
* @param systemPrompt - The system prompt for the LLM.
|
|
84
|
+
* @param summaryPrompt - The summarization prompt template.
|
|
85
|
+
* @returns The summary and metadata.
|
|
86
|
+
*/
|
|
87
|
+
async summarize(
|
|
88
|
+
content: string,
|
|
89
|
+
_role: AgentRole,
|
|
90
|
+
config: SummarizationConfig,
|
|
91
|
+
systemPrompt: string,
|
|
92
|
+
summaryPrompt: string
|
|
93
|
+
): Promise<SummarizationResult> {
|
|
94
|
+
const beforeChars = content.length;
|
|
95
|
+
const startTime = Date.now();
|
|
96
|
+
|
|
97
|
+
// Call LLM to generate summary using configured values with fallbacks
|
|
98
|
+
const selectedModel = this.model ?? DEFAULT_SUMMARY_MODEL;
|
|
99
|
+
const selectedTemperature = this.temperature ?? DEFAULT_SUMMARY_TEMPERATURE;
|
|
100
|
+
|
|
101
|
+
const response = await this.provider.complete({
|
|
102
|
+
model: selectedModel,
|
|
103
|
+
temperature: selectedTemperature,
|
|
104
|
+
systemPrompt,
|
|
105
|
+
userPrompt: summaryPrompt,
|
|
106
|
+
});
|
|
107
|
+
|
|
108
|
+
const latencyMs = Date.now() - startTime;
|
|
109
|
+
|
|
110
|
+
// Truncate summary to maxLength if needed
|
|
111
|
+
let summaryText = response.text.trim();
|
|
112
|
+
if (summaryText.length > config.maxLength) {
|
|
113
|
+
summaryText = summaryText.substring(0, config.maxLength);
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
const afterChars = summaryText.length;
|
|
117
|
+
|
|
118
|
+
const metadata: SummarizationMetadata = {
|
|
119
|
+
beforeChars,
|
|
120
|
+
afterChars,
|
|
121
|
+
method: config.method,
|
|
122
|
+
timestamp: new Date(),
|
|
123
|
+
latencyMs,
|
|
124
|
+
};
|
|
125
|
+
|
|
126
|
+
if (response.usage?.totalTokens != null) {
|
|
127
|
+
metadata.tokensUsed = response.usage.totalTokens;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Record model, temperature, and provider used for summarization
|
|
131
|
+
metadata.model = selectedModel;
|
|
132
|
+
metadata.temperature = selectedTemperature;
|
|
133
|
+
if (this.providerName) {
|
|
134
|
+
metadata.provider = this.providerName;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return {
|
|
138
|
+
summary: summaryText,
|
|
139
|
+
metadata,
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import fs from 'fs';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import { writeStderr } from '../cli/index';
|
|
4
|
+
import dotenv from 'dotenv';
|
|
5
|
+
|
|
6
|
+
// Constants for environment file loading
|
|
7
|
+
const DEFAULT_ENV_FILENAME = '.env';
|
|
8
|
+
const ERROR_ENV_FILE_NOT_FOUND = 'Environment file not found';
|
|
9
|
+
const WARN_DEFAULT_ENV_MISSING = 'No .env file found at';
|
|
10
|
+
const ERROR_ENV_FILE_LOAD_FAILED = 'Failed to load environment file';
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Loads environment variables from a .env file using the dotenv library.
|
|
14
|
+
*
|
|
15
|
+
* By default, attempts to load '.env' from the current working directory.
|
|
16
|
+
* If the default .env file doesn't exist, continues silently (non-breaking).
|
|
17
|
+
* If a custom env file path is specified and doesn't exist, throws an error.
|
|
18
|
+
* In verbose mode, warns about missing default .env files to stderr.
|
|
19
|
+
*
|
|
20
|
+
* @param envFilePath - Optional path to a custom .env file, relative to process.cwd()
|
|
21
|
+
* @param verbose - Whether to output verbose logging about .env file loading
|
|
22
|
+
* @throws {Error} If explicitly specified env file doesn't exist or dotenv parsing fails
|
|
23
|
+
*/
|
|
24
|
+
export function loadEnvironmentFile(envFilePath?: string, verbose?: boolean): void {
|
|
25
|
+
const fileName = envFilePath || DEFAULT_ENV_FILENAME;
|
|
26
|
+
const resolvedPath = path.resolve(process.cwd(), fileName);
|
|
27
|
+
const isDefaultFile = !envFilePath;
|
|
28
|
+
|
|
29
|
+
if (!fs.existsSync(resolvedPath)) {
|
|
30
|
+
if (isDefaultFile) { // Silent failure for default .env file, with optional verbose warning
|
|
31
|
+
|
|
32
|
+
if (verbose === true) {
|
|
33
|
+
writeStderr(`${WARN_DEFAULT_ENV_MISSING} ${resolvedPath}. Continuing without loading environment variables.\n`);
|
|
34
|
+
}
|
|
35
|
+
return;
|
|
36
|
+
} else { // Error for explicitly specified env file
|
|
37
|
+
throw new Error(`${ERROR_ENV_FILE_NOT_FOUND}: ${resolvedPath}`);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
const result = dotenv.config({ path: resolvedPath });
|
|
42
|
+
|
|
43
|
+
if (result.error) {
|
|
44
|
+
throw new Error(`${ERROR_ENV_FILE_LOAD_FAILED}: ${result.error.message}`);
|
|
45
|
+
}
|
|
46
|
+
}
|
package/src/utils/id.ts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
export function generateDebateId(now: Date = new Date()): string {
|
|
2
|
+
const pad = (n: number) => n.toString().padStart(2, '0');
|
|
3
|
+
const yyyy = now.getFullYear();
|
|
4
|
+
const MM = pad(now.getMonth() + 1);
|
|
5
|
+
const dd = pad(now.getDate());
|
|
6
|
+
const hh = pad(now.getHours());
|
|
7
|
+
const mm = pad(now.getMinutes());
|
|
8
|
+
const ss = pad(now.getSeconds());
|
|
9
|
+
const rand = Math.random().toString(36).slice(2, 6);
|
|
10
|
+
return `deb-${yyyy}${MM}${dd}-${hh}${mm}${ss}-${rand}`;
|
|
11
|
+
}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import { WARNING_COLOR, INFO_COLOR } from '../cli/index';
|
|
2
|
+
|
|
3
|
+
// Lazy optional chalk import to avoid ESM issues in test environment
|
|
4
|
+
let chalk: any;
|
|
5
|
+
try {
|
|
6
|
+
// eslint-disable-next-line @typescript-eslint/no-var-requires
|
|
7
|
+
chalk = require('chalk');
|
|
8
|
+
} catch {
|
|
9
|
+
chalk = null;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
function color(method: string, message: string): string {
|
|
13
|
+
return chalk && chalk[method] ? chalk[method](message) : message;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export class Logger {
|
|
17
|
+
constructor(private verbose: boolean = false) {}
|
|
18
|
+
|
|
19
|
+
info(message: string): void {
|
|
20
|
+
console.log(color('cyan', message));
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
success(message: string): void {
|
|
24
|
+
console.log(color('green', message));
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
warn(message: string): void {
|
|
28
|
+
console.warn(color(WARNING_COLOR, message));
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
error(message: string): void {
|
|
32
|
+
console.error(color('red', message));
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
debug(message: string): void {
|
|
36
|
+
if (this.verbose) {
|
|
37
|
+
console.log(color(INFO_COLOR, message));
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
agentAction(agentName: string, action: string): void {
|
|
42
|
+
console.log(`[${agentName}] ${action}`);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
separator(): void {
|
|
46
|
+
console.log('━'.repeat(60));
|
|
47
|
+
}
|
|
48
|
+
}
|