dialectic 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/.cursor/commands/setup-test.mdc +175 -0
  2. package/.cursor/rules/basic-code-cleanup.mdc +1110 -0
  3. package/.cursor/rules/riper5.mdc +96 -0
  4. package/.env.example +6 -0
  5. package/AGENTS.md +1052 -0
  6. package/LICENSE +21 -0
  7. package/README.md +93 -0
  8. package/WARP.md +113 -0
  9. package/dialectic-1.0.0.tgz +0 -0
  10. package/dialectic.js +10 -0
  11. package/docs/commands.md +375 -0
  12. package/docs/configuration.md +882 -0
  13. package/docs/context_summarization.md +1023 -0
  14. package/docs/debate_flow.md +1127 -0
  15. package/docs/eval_flow.md +795 -0
  16. package/docs/evaluator.md +141 -0
  17. package/examples/debate-config-openrouter.json +48 -0
  18. package/examples/debate_config1.json +48 -0
  19. package/examples/eval/eval1/eval_config1.json +13 -0
  20. package/examples/eval/eval1/result1.json +62 -0
  21. package/examples/eval/eval1/result2.json +97 -0
  22. package/examples/eval_summary_format.md +11 -0
  23. package/examples/example3/debate-config.json +64 -0
  24. package/examples/example3/eval_config2.json +25 -0
  25. package/examples/example3/problem.md +17 -0
  26. package/examples/example3/rounds_test/eval_run.sh +16 -0
  27. package/examples/example3/rounds_test/run_test.sh +16 -0
  28. package/examples/kata1/architect-only-solution_2-rounds.json +121 -0
  29. package/examples/kata1/architect-perf-solution_2-rounds.json +234 -0
  30. package/examples/kata1/debate-config-kata1.json +54 -0
  31. package/examples/kata1/eval_architect-only_2-rounds.json +97 -0
  32. package/examples/kata1/eval_architect-perf_2-rounds.json +97 -0
  33. package/examples/kata1/kata1-report.md +12224 -0
  34. package/examples/kata1/kata1-report_temps-01_01_01_07.md +2451 -0
  35. package/examples/kata1/kata1.md +5 -0
  36. package/examples/kata1/meta.txt +1 -0
  37. package/examples/kata2/debate-config.json +54 -0
  38. package/examples/kata2/eval_config1.json +21 -0
  39. package/examples/kata2/eval_config2.json +25 -0
  40. package/examples/kata2/kata2.md +5 -0
  41. package/examples/kata2/only_architect/debate-config.json +45 -0
  42. package/examples/kata2/only_architect/eval_run.sh +11 -0
  43. package/examples/kata2/only_architect/run_test.sh +5 -0
  44. package/examples/kata2/rounds_test/eval_run.sh +11 -0
  45. package/examples/kata2/rounds_test/run_test.sh +5 -0
  46. package/examples/kata2/summary_length_test/eval_run.sh +11 -0
  47. package/examples/kata2/summary_length_test/eval_run_w_clarify.sh +7 -0
  48. package/examples/kata2/summary_length_test/run_test.sh +5 -0
  49. package/examples/task-queue/debate-config.json +76 -0
  50. package/examples/task-queue/debate_report.md +566 -0
  51. package/examples/task-queue/task-queue-system.md +25 -0
  52. package/jest.config.ts +13 -0
  53. package/multi_agent_debate_spec.md +2980 -0
  54. package/package.json +38 -0
  55. package/sanity-check-problem.txt +9 -0
  56. package/src/agents/prompts/architect-prompts.ts +203 -0
  57. package/src/agents/prompts/generalist-prompts.ts +157 -0
  58. package/src/agents/prompts/index.ts +41 -0
  59. package/src/agents/prompts/judge-prompts.ts +19 -0
  60. package/src/agents/prompts/kiss-prompts.ts +230 -0
  61. package/src/agents/prompts/performance-prompts.ts +142 -0
  62. package/src/agents/prompts/prompt-types.ts +68 -0
  63. package/src/agents/prompts/security-prompts.ts +149 -0
  64. package/src/agents/prompts/shared.ts +144 -0
  65. package/src/agents/prompts/testing-prompts.ts +149 -0
  66. package/src/agents/role-based-agent.ts +386 -0
  67. package/src/cli/commands/debate.ts +761 -0
  68. package/src/cli/commands/eval.ts +475 -0
  69. package/src/cli/commands/report.ts +265 -0
  70. package/src/cli/index.ts +79 -0
  71. package/src/core/agent.ts +198 -0
  72. package/src/core/clarifications.ts +34 -0
  73. package/src/core/judge.ts +257 -0
  74. package/src/core/orchestrator.ts +432 -0
  75. package/src/core/state-manager.ts +322 -0
  76. package/src/eval/evaluator-agent.ts +130 -0
  77. package/src/eval/prompts/system.md +41 -0
  78. package/src/eval/prompts/user.md +64 -0
  79. package/src/providers/llm-provider.ts +25 -0
  80. package/src/providers/openai-provider.ts +84 -0
  81. package/src/providers/openrouter-provider.ts +122 -0
  82. package/src/providers/provider-factory.ts +64 -0
  83. package/src/types/agent.types.ts +141 -0
  84. package/src/types/config.types.ts +47 -0
  85. package/src/types/debate.types.ts +237 -0
  86. package/src/types/eval.types.ts +85 -0
  87. package/src/utils/common.ts +104 -0
  88. package/src/utils/context-formatter.ts +102 -0
  89. package/src/utils/context-summarizer.ts +143 -0
  90. package/src/utils/env-loader.ts +46 -0
  91. package/src/utils/exit-codes.ts +5 -0
  92. package/src/utils/id.ts +11 -0
  93. package/src/utils/logger.ts +48 -0
  94. package/src/utils/paths.ts +10 -0
  95. package/src/utils/progress-ui.ts +313 -0
  96. package/src/utils/prompt-loader.ts +79 -0
  97. package/src/utils/report-generator.ts +301 -0
  98. package/tests/clarifications.spec.ts +128 -0
  99. package/tests/cli.debate.spec.ts +144 -0
  100. package/tests/config-loading.spec.ts +206 -0
  101. package/tests/context-summarizer.spec.ts +131 -0
  102. package/tests/debate-config-custom.json +38 -0
  103. package/tests/env-loader.spec.ts +149 -0
  104. package/tests/eval.command.spec.ts +1191 -0
  105. package/tests/logger.spec.ts +19 -0
  106. package/tests/openai-provider.spec.ts +26 -0
  107. package/tests/openrouter-provider.spec.ts +279 -0
  108. package/tests/orchestrator-summary.spec.ts +386 -0
  109. package/tests/orchestrator.spec.ts +207 -0
  110. package/tests/prompt-loader.spec.ts +52 -0
  111. package/tests/prompts/architect.md +16 -0
  112. package/tests/provider-factory.spec.ts +150 -0
  113. package/tests/report.command.spec.ts +546 -0
  114. package/tests/role-based-agent-summary.spec.ts +476 -0
  115. package/tests/security-agent.spec.ts +221 -0
  116. package/tests/shared-prompts.spec.ts +318 -0
  117. package/tests/state-manager.spec.ts +251 -0
  118. package/tests/summary-prompts.spec.ts +153 -0
  119. package/tsconfig.json +49 -0
@@ -0,0 +1,122 @@
1
+ import OpenAI from 'openai';
2
+ import { CompletionRequest, CompletionResponse, LLMProvider } from './llm-provider';
3
+
4
+ /**
5
+ * OpenRouter API configuration constants
6
+ */
7
+ const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
8
+ const OPENROUTER_HTTP_REFERER = 'dialectic';
9
+ const OPENROUTER_X_TITLE = 'Dialectic - Multi-Agent Debate';
10
+
11
+ /**
12
+ * OpenRouter provider implementation using OpenAI SDK with OpenRouter-specific configuration.
13
+ *
14
+ * This provider leverages the OpenAI SDK for compatibility while using OpenRouter's API
15
+ * endpoint and authentication. It supports the same fallback strategy as the OpenAI provider
16
+ * (Responses API → Chat Completions API) and handles OpenRouter-specific response formats.
17
+ *
18
+ * OpenRouter models are specified using their full qualified names (e.g., "openai/gpt-4",
19
+ * "anthropic/claude-3-sonnet") as provided by the user in their configuration.
20
+ */
21
+ export class OpenRouterProvider implements LLMProvider {
22
+ private client: OpenAI;
23
+
24
+ /**
25
+ * Creates a new OpenRouter provider instance.
26
+ * @param apiKey - OpenRouter API key for authentication
27
+ */
28
+ constructor(apiKey: string) {
29
+ this.client = new OpenAI({
30
+ apiKey,
31
+ baseURL: OPENROUTER_BASE_URL,
32
+ defaultHeaders: {
33
+ 'HTTP-Referer': OPENROUTER_HTTP_REFERER,
34
+ 'X-Title': OPENROUTER_X_TITLE,
35
+ },
36
+ });
37
+ }
38
+
39
+ /**
40
+ * Makes a completion request to OpenRouter API.
41
+ *
42
+ * Uses the same fallback strategy as OpenAI provider:
43
+ * 1. Attempts to use Responses API (newer interface)
44
+ * 2. Falls back to Chat Completions API if Responses API fails
45
+ *
46
+ * @param request - The completion request containing model, prompts, and parameters
47
+ * @returns Promise resolving to completion response with text and usage metadata
48
+ */
49
+ async complete(request: CompletionRequest): Promise<CompletionResponse> {
50
+ // Try Responses API first
51
+ try {
52
+ // Build Responses API payload conditionally
53
+ const respPayload: any = {
54
+ model: request.model,
55
+ temperature: request.temperature,
56
+ input: [
57
+ { role: 'system', content: request.systemPrompt },
58
+ { role: 'user', content: request.userPrompt },
59
+ ],
60
+ };
61
+ if (request.maxTokens != null) respPayload.max_output_tokens = request.maxTokens;
62
+ if (request.stopSequences) respPayload.stop = request.stopSequences;
63
+
64
+ const resp = await (this.client as any).responses?.create?.(respPayload);
65
+
66
+ if (resp && resp.output_text) {
67
+ const usage = resp?.usage ?? resp?.output?.usage;
68
+ const out: CompletionResponse = { text: resp.output_text as string };
69
+ if (usage) {
70
+ out.usage = {
71
+ inputTokens: usage.input_tokens ?? usage.inputTokens,
72
+ outputTokens: usage.output_tokens ?? usage.outputTokens,
73
+ totalTokens: usage.total_tokens ?? usage.totalTokens,
74
+ };
75
+ }
76
+ return out;
77
+ }
78
+ // Some SDK shapes use output[0]?.content[0]?.text
79
+ const outText: string | undefined = resp?.output?.[0]?.content?.[0]?.text;
80
+ if (outText) {
81
+ const usage = resp?.usage ?? resp?.output?.usage;
82
+ const out: CompletionResponse = { text: outText };
83
+ if (usage) {
84
+ out.usage = {
85
+ inputTokens: usage.input_tokens ?? usage.inputTokens,
86
+ outputTokens: usage.output_tokens ?? usage.outputTokens,
87
+ totalTokens: usage.total_tokens ?? usage.totalTokens,
88
+ };
89
+ }
90
+ return out;
91
+ }
92
+
93
+ // Fallback if Responses API returned unexpected shape
94
+ throw new Error('Unexpected Responses API response shape');
95
+ } catch (_err) {
96
+ // Fallback to Chat Completions API
97
+ const chatPayload: any = {
98
+ model: request.model,
99
+ messages: [
100
+ { role: 'system', content: request.systemPrompt },
101
+ { role: 'user', content: request.userPrompt },
102
+ ],
103
+ temperature: request.temperature,
104
+ };
105
+ if (request.maxTokens != null) chatPayload.max_tokens = request.maxTokens;
106
+ if (request.stopSequences) chatPayload.stop = request.stopSequences;
107
+
108
+ const chat = await this.client.chat.completions.create(chatPayload);
109
+ const txt = chat.choices[0]?.message?.content ?? '';
110
+ const usage = (chat as any).usage;
111
+ const out: CompletionResponse = { text: txt };
112
+ if (usage) {
113
+ out.usage = {
114
+ inputTokens: usage.prompt_tokens ?? usage.input_tokens,
115
+ outputTokens: usage.completion_tokens ?? usage.output_tokens,
116
+ totalTokens: usage.total_tokens,
117
+ };
118
+ }
119
+ return out;
120
+ }
121
+ }
122
+ }
@@ -0,0 +1,64 @@
1
+ import { LLMProvider } from './llm-provider';
2
+ import { OpenAIProvider } from './openai-provider';
3
+ import { OpenRouterProvider } from './openrouter-provider';
4
+ import { EXIT_CONFIG_ERROR } from '../utils/exit-codes';
5
+ import { LLM_PROVIDERS } from '../types/agent.types';
6
+
7
+ /**
8
+ * Environment variable names for API keys
9
+ */
10
+ const OPENAI_API_KEY_ENV = 'OPENAI_API_KEY';
11
+ const OPENROUTER_API_KEY_ENV = 'OPENROUTER_API_KEY';
12
+
13
+
14
+ /**
15
+ * Helper function to create a provider instance with API key validation.
16
+ *
17
+ * @param envVarName - The name of the environment variable containing the API key
18
+ * @param ProviderClass - The provider class constructor
19
+ * @returns An LLM provider instance
20
+ * @throws {Error} If the API key is missing or empty
21
+ */
22
+ function createProviderWithApiKey<T extends LLMProvider>(
23
+ envVarName: string,
24
+ ProviderClass: new (apiKey: string) => T
25
+ ): T {
26
+ const apiKey = process.env[envVarName];
27
+ if (!apiKey || apiKey.trim() === '') {
28
+ const err: any = new Error(`${envVarName} is not set`);
29
+ err.code = EXIT_CONFIG_ERROR;
30
+ throw err;
31
+ }
32
+ return new ProviderClass(apiKey);
33
+ }
34
+
35
+ /**
36
+ * Creates an LLM provider instance based on the specified provider type.
37
+ *
38
+ * This factory function handles provider creation and API key retrieval from environment
39
+ * variables. It implements fail-fast error handling with clear error messages for
40
+ * configuration issues.
41
+ *
42
+ * @param providerType - The type of provider to create ("openai" or "openrouter")
43
+ * @returns An LLM provider instance
44
+ * @throws {Error} If the provider type is invalid or the required API key is missing
45
+ */
46
+ export function createProvider(providerType: string): LLMProvider {
47
+
48
+ switch (providerType) {
49
+ case LLM_PROVIDERS.OPENAI: {
50
+ return createProviderWithApiKey(OPENAI_API_KEY_ENV, OpenAIProvider);
51
+ }
52
+
53
+ case LLM_PROVIDERS.OPENROUTER: {
54
+ return createProviderWithApiKey(OPENROUTER_API_KEY_ENV, OpenRouterProvider);
55
+ }
56
+
57
+ default: {
58
+ const supportedTypes = Object.values(LLM_PROVIDERS).join(', ');
59
+ const err: any = new Error(`Unsupported provider type: ${providerType}. Supported types are: ${supportedTypes}`);
60
+ err.code = EXIT_CONFIG_ERROR;
61
+ throw err;
62
+ }
63
+ }
64
+ }
@@ -0,0 +1,141 @@
1
+ import type { SummarizationConfig } from './config.types';
2
+
3
+ /**
4
+ * The role of the agent.
5
+ */
6
+ export const AGENT_ROLES = {
7
+ ARCHITECT: "architect",
8
+ SECURITY: "security",
9
+ PERFORMANCE: "performance",
10
+ TESTING: "testing",
11
+ GENERALIST: "generalist",
12
+ KISS: "kiss",
13
+ } as const;
14
+
15
+ export type AgentRole = (typeof AGENT_ROLES)[keyof typeof AGENT_ROLES];
16
+
17
+ export const LLM_PROVIDERS = {
18
+ OPENAI: "openai",
19
+ OPENROUTER: "openrouter",
20
+ } as const;
21
+
22
+ export const PROMPT_SOURCES = {
23
+ BUILT_IN: "built-in",
24
+ FILE: "file",
25
+ } as const;
26
+
27
+ export type PromptSourceType = (typeof PROMPT_SOURCES)[keyof typeof PROMPT_SOURCES];
28
+
29
+ /**
30
+ * Configuration for an AI agent.
31
+ *
32
+ * @property id - Unique identifier for the agent.
33
+ * @property name - Human-readable name for the agent.
34
+ * @property role - The functional role of the agent (e.g., architect, security).
35
+ * @property model - The LLM model name to use (e.g., "gpt-4").
36
+ * @property provider - The LLM provider; currently only supports "openai".
37
+ * @property temperature - Sampling temperature for the LLM (range: 0.0 - 1.0).
38
+ * @property systemPromptPath - (Optional) Filesystem path to a markdown/text file containing the system prompt to prime the agent. Resolved relative to the configuration file directory.
39
+ * @property summaryPromptPath - (Optional) Filesystem path to a markdown/text file containing the summary prompt. Resolved relative to the configuration file directory.
40
+ * @property summarization - (Optional) Per-agent summarization configuration that overrides system-wide settings.
41
+ * @property enabled - (Optional) Whether the agent is enabled; defaults to true if omitted.
42
+ */
43
+ export interface AgentConfig {
44
+ /** Unique identifier for the agent. */
45
+ id: string;
46
+ /** Human-readable name for the agent. */
47
+ name: string;
48
+ /** The functional role of the agent. */
49
+ role: AgentRole;
50
+ /** The LLM model name to use (e.g., "gpt-4"). */
51
+ model: string;
52
+ /** The LLM provider; supports "openai" or "openrouter". */
53
+ provider: typeof LLM_PROVIDERS.OPENAI | typeof LLM_PROVIDERS.OPENROUTER;
54
+ /** Sampling temperature for the LLM (range: 0.0 - 1.0). */
55
+ temperature: number;
56
+ /** (Optional) Filesystem path to a markdown/text file containing the system prompt to prime the agent. Resolved relative to the configuration file directory. */
57
+ systemPromptPath?: string;
58
+ /** (Optional) Filesystem path to a markdown/text file containing the summary prompt. Resolved relative to the configuration file directory. */
59
+ summaryPromptPath?: string;
60
+ /** (Optional) Filesystem path to a markdown/text file containing the clarification questions prompt. Resolved relative to the configuration file directory. */
61
+ clarificationPromptPath?: string;
62
+ /** (Optional) Per-agent summarization configuration that overrides system-wide settings. */
63
+ summarization?: SummarizationConfig;
64
+ /** (Optional) Whether the agent is enabled; defaults to true if omitted. */
65
+ enabled?: boolean;
66
+ }
67
+
68
+ /**
69
+ * Metadata for a contribution made by an agent.
70
+ *
71
+ * @property tokensUsed - (Optional) Number of tokens used in the contribution.
72
+ * @property latencyMs - (Optional) Latency in milliseconds for the contribution.
73
+ * @property model - (Optional) The LLM model used for the contribution.
74
+ */
75
+ export interface ContributionMetadata {
76
+ tokensUsed?: number;
77
+ latencyMs?: number;
78
+ model?: string;
79
+ }
80
+
81
+ /**
82
+ * Represents a generic response from an agent, such as a proposal, critique, or refinement.
83
+ *
84
+ * @property content - The main textual content of the agent's response (e.g., solution, critique, or refinement).
85
+ * @property metadata - Metadata about the response, including token usage, latency, and model information.
86
+ */
87
+ export interface AgentResponse {
88
+ /** The main textual content of the agent's response. */
89
+ content: string;
90
+ /** Metadata about the response, such as tokens used, latency, and model. */
91
+ metadata: ContributionMetadata;
92
+ }
93
+
94
+ /**
95
+ * Provenance information for a system prompt, indicating whether it was loaded from a file or using built-in defaults.
96
+ *
97
+ * @property source - The source of the system prompt ('built-in' for default, 'file' for loaded from filesystem).
98
+ * @property absPath - (Optional) The absolute filesystem path to the prompt file, if source is 'file'.
99
+ */
100
+ export interface PromptSource {
101
+ source: PromptSourceType;
102
+ absPath?: string;
103
+ }
104
+
105
+ /**
106
+ * Metadata about an agent's prompt source for logging and persistence.
107
+ *
108
+ * @property agentId - The unique identifier of the agent.
109
+ * @property role - The role of the agent.
110
+ * @property source - Whether the prompt came from a file or built-in default.
111
+ * @property path - (Optional) The file path if loaded from a file.
112
+ */
113
+ export interface AgentPromptMetadata {
114
+ agentId: string;
115
+ role: AgentRole;
116
+ source: PromptSourceType;
117
+ path?: string;
118
+ }
119
+
120
+ /**
121
+ * Metadata about a judge's prompt source for logging and persistence.
122
+ *
123
+ * @property id - The unique identifier of the judge.
124
+ * @property source - Whether the prompt came from a file or built-in default.
125
+ * @property path - (Optional) The file path if loaded from a file.
126
+ * @property summarySource - Whether the summary prompt came from a file or built-in default.
127
+ * @property summaryPath - (Optional) The file path for summary prompt if loaded from a file.
128
+ */
129
+ export interface JudgePromptMetadata {
130
+ id: string;
131
+ source: PromptSourceType;
132
+ path?: string;
133
+ summarySource?: PromptSourceType;
134
+ summaryPath?: string;
135
+ }
136
+
137
+ //the next two are just for convenience (readability) and potential future use if we need to distinguish between different types of responses
138
+
139
+ export interface Proposal extends AgentResponse {}
140
+
141
+ export interface Critique extends AgentResponse {}
@@ -0,0 +1,47 @@
1
+ import { AgentConfig } from './agent.types';
2
+ import { DebateConfig, SummarizationMethod } from './debate.types';
3
+
4
+ export interface SummarizationConfig {
5
+ enabled: boolean;
6
+ threshold: number;
7
+ maxLength: number;
8
+ method: SummarizationMethod;
9
+ promptPath?: string;
10
+ }
11
+
12
+ /** Default value for summarization enabled flag */
13
+ export const DEFAULT_SUMMARIZATION_ENABLED = true;
14
+
15
+ /** Default character count threshold for triggering summarization */
16
+ export const DEFAULT_SUMMARIZATION_THRESHOLD = 5000;
17
+
18
+ /** Default maximum length for generated summaries */
19
+ export const DEFAULT_SUMMARIZATION_MAX_LENGTH = 2500;
20
+
21
+ /** Default summarization method */
22
+ export const DEFAULT_SUMMARIZATION_METHOD: SummarizationMethod = 'length-based';
23
+
24
+ /**
25
+ * Represents the top-level system configuration for a debate session.
26
+ *
27
+ * This interface defines the structure of the configuration file used to initialize
28
+ * agents, judge, and debate parameters. It is typically loaded from a JSON file.
29
+ *
30
+ * @property agents - An array of agent configurations. Each agent participates in the debate.
31
+ * @property judge - (Optional) The configuration for the judge agent responsible for synthesizing the final solution.
32
+ * @property debate - (Optional) Debate-level configuration, such as number of rounds and other settings.
33
+ * @property configDir - (Optional, internal) The absolute directory path of the loaded configuration file.
34
+ * This is set internally by the loader to resolve relative paths for prompts and other files.
35
+ * It is not user-provided.
36
+ */
37
+ export interface SystemConfig {
38
+
39
+ agents: AgentConfig[]; // List of agent configurations participating in the debate.
40
+ judge?: AgentConfig; // (Optional) Configuration for the judge agent.
41
+ debate?: DebateConfig; // (Optional) Debate-level configuration options.
42
+ /**
43
+ * (Internal) Directory of the loaded configuration file, used for resolving relative paths.
44
+ * Set by the loader, not by the user.
45
+ */
46
+ configDir?: string;
47
+ }
@@ -0,0 +1,237 @@
1
+ import { AgentRole, AgentPromptMetadata, JudgePromptMetadata, LLM_PROVIDERS } from './agent.types';
2
+
3
+ /** String literal constants for termination types */
4
+ export const TERMINATION_TYPES = {
5
+ FIXED: 'fixed',
6
+ CONVERGENCE: 'convergence',
7
+ QUALITY: 'quality',
8
+ } as const;
9
+
10
+ /** Union type of all termination types */
11
+ export type TerminationType = (typeof TERMINATION_TYPES)[keyof typeof TERMINATION_TYPES];
12
+
13
+ /** String literal constants for synthesis methods */
14
+ export const SYNTHESIS_METHODS = {
15
+ JUDGE: 'judge',
16
+ VOTING: 'voting',
17
+ MERGE: 'merge',
18
+ } as const;
19
+
20
+ /** Union type of all synthesis methods */
21
+ export type SynthesisMethod = (typeof SYNTHESIS_METHODS)[keyof typeof SYNTHESIS_METHODS];
22
+
23
+ /** String literal constants for debate statuses */
24
+ export const DEBATE_STATUS = {
25
+ PENDING: 'pending',
26
+ RUNNING: 'running',
27
+ COMPLETED: 'completed',
28
+ FAILED: 'failed',
29
+ } as const;
30
+
31
+ /** Union type of all debate statuses */
32
+ export type DebateStatus = (typeof DEBATE_STATUS)[keyof typeof DEBATE_STATUS];
33
+
34
+ /** String literal constants for contribution types */
35
+ export const CONTRIBUTION_TYPES = {
36
+ PROPOSAL: 'proposal',
37
+ CRITIQUE: 'critique',
38
+ REFINEMENT: 'refinement',
39
+ } as const;
40
+
41
+ /** Union type of all contribution types */
42
+ export type ContributionType = (typeof CONTRIBUTION_TYPES)[keyof typeof CONTRIBUTION_TYPES];
43
+
44
+ /** String literal constants for summarization methods */
45
+ export const SUMMARIZATION_METHODS = {
46
+ LENGTH_BASED: 'length-based',
47
+ } as const;
48
+
49
+ /** Union type of all summarization methods */
50
+ export type SummarizationMethod = (typeof SUMMARIZATION_METHODS)[keyof typeof SUMMARIZATION_METHODS];
51
+
52
+ /**
53
+ * Metadata for a summarization operation performed by an agent.
54
+ */
55
+ export interface SummarizationMetadata {
56
+ beforeChars: number; /** Character count before summarization. */
57
+ afterChars: number; /** Character count after summarization. */
58
+ method: SummarizationMethod; /** Summarization method used. */
59
+ timestamp: Date; /** When the summarization occurred. */
60
+ latencyMs?: number; /** Optional latency in milliseconds for the summarization LLM call. */
61
+ tokensUsed?: number; /** Optional number of tokens used in the summarization LLM call. */
62
+ model?: string; /** Optional model used for the summarization. */
63
+ temperature?: number; /** Optional temperature used for the summarization. */
64
+ provider?: typeof LLM_PROVIDERS[keyof typeof LLM_PROVIDERS]; /** Optional provider used for the summarization. */
65
+ }
66
+
67
+ /**
68
+ * A summary generated by an agent for their perspective of the debate history.
69
+ */
70
+ export interface DebateSummary {
71
+ agentId: string; /** Unique identifier of the agent that created the summary. */
72
+ agentRole: AgentRole; /** The role of the agent that created the summary. */
73
+ summary: string; /** The summarized content. */
74
+ metadata: SummarizationMetadata; /** Metadata about the summarization operation. */
75
+ }
76
+
77
+ /**
78
+ * Result of preparing debate context for an agent.
79
+ * Contains the prepared context and optional summary if summarization occurred.
80
+ */
81
+ export interface ContextPreparationResult {
82
+ context: DebateContext; /** The prepared context for the agent. */
83
+ summary?: DebateSummary; /** Optional summary if summarization was performed. */
84
+ }
85
+
86
+ /**
87
+ * Configuration for context summarization behavior.
88
+ * Controls when and how agents summarize debate history to manage context size.
89
+ */
90
+ export interface SummarizationConfig {
91
+ enabled: boolean; /** Whether summarization is enabled. */
92
+ threshold: number; /** Character count threshold for triggering summarization. */
93
+ maxLength: number; /** Maximum length of generated summary in characters. */
94
+ method: SummarizationMethod; /** Summarization method to use. */
95
+ promptPath?: string; /** Optional path to custom summarization prompt file. */
96
+ }
97
+
98
+ /**
99
+ * Configuration controlling how a debate is executed.
100
+ */
101
+ export interface DebateConfig {
102
+
103
+ rounds: number; /** Number of complete rounds to execute (>= 1). */
104
+ terminationCondition: { type: TerminationType; threshold?: number }; /** Termination condition; currently only 'fixed' is supported at runtime. */
105
+ synthesisMethod: SynthesisMethod; /** Method used to synthesize the final solution. */
106
+ includeFullHistory: boolean; /** Whether to include full debate history in the context passed to agents and judge. */
107
+ timeoutPerRound: number; /** Maximum time allowed per round in milliseconds. */
108
+ summarization?: SummarizationConfig; /** Optional system-wide summarization configuration. Agents can override with their own settings. */
109
+ /** Whether to run a one-time interactive clarifications phase before the debate starts. */
110
+ interactiveClarifications?: boolean;
111
+ /** Maximum number of clarification questions to accept per agent (default 5). */
112
+ clarificationsMaxPerAgent?: number;
113
+ }
114
+
115
+ /**
116
+ * In-memory (and persisted) state for a debate execution.
117
+ */
118
+ export interface DebateState {
119
+ id: string; /** Unique debate identifier. */
120
+ problem: string; /** Problem statement under discussion. */
121
+ context?: string; /** Optional additional context for the problem. */
122
+ status: DebateStatus; /** Current status of the debate. */
123
+ currentRound: number; /** The currently active round number (1-indexed, 0 when no rounds have started). */
124
+ rounds: DebateRound[]; /** All executed rounds and their contributions. */
125
+ finalSolution?: Solution; /** Final solution, if completed. */
126
+ judgeSummary?: DebateSummary; /** Optional summary generated by the judge for synthesis. */
127
+ createdAt: Date; /** Creation timestamp. */
128
+ updatedAt: Date; /** Last updated timestamp. */
129
+ /**
130
+ * Provenance of system prompts used by agents and judge for this debate (persisted once per debate).
131
+ */
132
+ promptSources?: {
133
+ agents: AgentPromptMetadata[];
134
+ judge: JudgePromptMetadata;
135
+ };
136
+ /** Optional clarifications collected from agents and answered by the user before round 1. */
137
+ clarifications?: AgentClarifications[];
138
+ }
139
+
140
+ /**
141
+ * A single debate round containing contributions for all phases.
142
+ */
143
+ export interface DebateRound {
144
+ roundNumber: number; /** Round index (1-indexed). */
145
+ contributions: Contribution[]; /** All contributions made within this round. */
146
+ summaries?: Record<string, DebateSummary>; /** Optional summaries keyed by agentId for this round. */
147
+ timestamp: Date; /** Timestamp when the round was created. */
148
+ }
149
+
150
+ /**
151
+ * A single contribution from an agent within a round.
152
+ */
153
+ export interface Contribution {
154
+ agentId: string; /** Unique identifier of the contributing agent. */
155
+ agentRole: AgentRole; /** The role of the contributing agent. */
156
+ type: ContributionType; /** The contribution type (proposal, critique, or refinement). */
157
+ content: string; /** The main textual content. */
158
+ targetAgentId?: string; /** The agent id this critique targets (only for critiques). */
159
+ metadata: {
160
+ tokensUsed?: number; /** Optional number of tokens used. */
161
+ latencyMs?: number; /** Optional latency in milliseconds. */
162
+ model?: string; /** Optional model used for the contribution. */
163
+ };
164
+ }
165
+
166
+ /**
167
+ * Final synthesized solution returned by the judge.
168
+ */
169
+ export interface Solution {
170
+ description: string; /** Summary description of the solution. */
171
+ implementation?: string; /** Optional implementation guidance or snippet. */
172
+ tradeoffs: string[]; /** Trade-offs considered. */
173
+ recommendations: string[]; /** Concrete recommendations. */
174
+ confidence: number; /** Confidence score (0-100). */
175
+ synthesizedBy: string; /** Judge agent id that performed the synthesis. */
176
+ }
177
+
178
+ /**
179
+ * Top-level result of a debate run including the final solution and metadata.
180
+ */
181
+ export interface DebateResult {
182
+ debateId: string; /** Debate identifier for correlating with persisted state. */
183
+ solution: Solution; /** Final solution. */
184
+ rounds: DebateRound[]; /** Executed rounds and their contributions. */
185
+ /** Aggregate metadata about the debate execution. */
186
+ metadata: {
187
+ totalRounds: number; /** Number of rounds actually executed. */
188
+ totalTokens?: number; /** Optional total tokens used across all contributions (if computed). */
189
+ durationMs: number; /** Total duration in milliseconds. */
190
+ };
191
+ }
192
+
193
+ /**
194
+ * Context object provided to agents and judge.
195
+ */
196
+ export interface DebateContext {
197
+
198
+ problem: string; /** Problem statement. */
199
+ context?: string; /** Optional additional context for the current request. */
200
+ history?: DebateRound[]; /** Optional full history of rounds when enabled. */
201
+ includeFullHistory?: boolean; /** Whether to fall back to full history when no summary is found. */
202
+ clarifications?: AgentClarifications[]; /** Optional clarifications to include in prompts (grouped by agent). */
203
+ }
204
+
205
+ /**
206
+ * A single clarification item: a question and its answer (or "NA").
207
+ */
208
+ export interface ClarificationItem {
209
+ id: string;
210
+ question: string;
211
+ answer: string; // may be "NA"
212
+ }
213
+
214
+ /**
215
+ * Group of clarifications produced by a single agent.
216
+ */
217
+ export interface AgentClarifications {
218
+ agentId: string;
219
+ agentName: string;
220
+ role: import('./agent.types').AgentRole;
221
+ items: ClarificationItem[];
222
+ }
223
+
224
+ /**
225
+ * A single clarifying question as produced by an agent before the debate.
226
+ */
227
+ export interface ClarificationQuestion {
228
+ id?: string;
229
+ text: string;
230
+ }
231
+
232
+ /**
233
+ * Structured response returned by an agent when asked for clarifying questions.
234
+ */
235
+ export interface ClarificationQuestionsResponse {
236
+ questions: ClarificationQuestion[];
237
+ }