@wundam/orchex 1.0.0-rc.27 → 1.0.0-rc.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -13,7 +13,7 @@ Your AI assistant does tasks one at a time. Orchex makes it do 10 at once — sa
13
13
  - **`orchex run`** — Describe what you want, get parallel execution. Auto-generates plans, previews waves, executes with ownership enforcement.
14
14
  - **`orchex learn`** — The advanced path. Paste a markdown plan, get executable parallel streams with dependency inference and anti-pattern detection.
15
15
  - **Self-Healing** — Categorized error analysis with targeted fix streams. Not blind retry. Model validation before execution prevents wasted API calls.
16
- - **Multi-LLM** — OpenAI, Gemini, Claude, DeepSeek, Ollama, AWS Bedrock. Dynamic model registry auto-discovers available models. Key-aware routing prevents "model not found" errors.
16
+ - **Multi-LLM** — OpenAI, Gemini, Claude, DeepSeek, Kimi (Moonshot AI), Ollama, AWS Bedrock. Dynamic model registry auto-discovers available models. Key-aware routing prevents "model not found" errors.
17
17
  - **BYOK** — Bring your own API key from any supported provider. You control costs.
18
18
 
19
19
  ## Prerequisites
@@ -24,6 +24,7 @@ Your AI assistant does tasks one at a time. Orchex makes it do 10 at once — sa
24
24
  - `OPENAI_API_KEY` for OpenAI (GPT-4.1, o1, o3)
25
25
  - `GEMINI_API_KEY` for Google Gemini
26
26
  - `DEEPSEEK_API_KEY` for DeepSeek (V3, Coder, R1)
27
+ - `KIMI_API_KEY` for Kimi / Moonshot AI (K2, moonshot-v1; `MOONSHOT_API_KEY` alias accepted)
27
28
  - Configure Ollama for local models
28
29
  - `AWS_ACCESS_KEY_ID` + `AWS_SECRET_ACCESS_KEY` for AWS Bedrock
29
30
 
package/dist/artifacts.js CHANGED
@@ -420,6 +420,10 @@ export async function writeArtifact(projectDir, streamId, artifact) {
420
420
  await fs.writeFile(artifactPath, JSON.stringify(artifact, null, 2), 'utf-8');
421
421
  return artifactPath;
422
422
  }
423
+ // Artifact-application trust boundary. Plan-level ownership is enforced
424
+ // upstream by validatePlan (src/intelligence/plan-contract.ts); this
425
+ // function guards the different trust boundary where LLM-generated
426
+ // artifacts try to apply their output.
423
427
  /**
424
428
  * Check whether all file operations fall within the stream's owns patterns.
425
429
  *
@@ -1,5 +1,17 @@
1
1
  import { extractArtifact } from './artifacts.js';
2
2
  import { validateOwnership } from './utils/ownership-validator.js';
3
+ function parseProviderErrorBody(body) {
4
+ try {
5
+ const parsed = JSON.parse(body);
6
+ const message = parsed?.error?.message ?? parsed?.message;
7
+ if (typeof message === 'string')
8
+ return message;
9
+ }
10
+ catch {
11
+ // Not JSON — return null
12
+ }
13
+ return null;
14
+ }
3
15
  /**
4
16
  * Returns actionable error messages for cloud API failures.
5
17
  * Status-code-specific guidance tells users exactly how to fix each error.
@@ -18,6 +30,16 @@ function formatCloudError(status, body) {
18
30
  const modelHint = modelMatch ? ` "${modelMatch[1]}"` : '';
19
31
  return `Model${modelHint} not found (404)${detail}.\n\nRun \`orchex config --model <model>\` to use a different model.`;
20
32
  }
33
+ case 400: {
34
+ const providerMsg = parseProviderErrorBody(body);
35
+ if (providerMsg && /credit balance|insufficient|billing/i.test(providerMsg)) {
36
+ return `LLM provider billing error (400): ${providerMsg}\n\nTop up credits at your provider's billing page, or switch providers with \`orchex config --provider <provider>\`.`;
37
+ }
38
+ if (providerMsg) {
39
+ return `LLM provider error (400): ${providerMsg}`;
40
+ }
41
+ return `Cloud API error: 400${detail}`;
42
+ }
21
43
  case 429:
22
44
  return `Quota exceeded (429)${detail}.\n\nYou've used all cloud runs for this period. Upgrade at https://orchex.dev/pricing`;
23
45
  default:
@@ -180,11 +202,18 @@ export class CloudExecutor {
180
202
  };
181
203
  }
182
204
  if (job.status === 'failed') {
205
+ const rawError = job.error ?? 'Cloud execution failed';
206
+ const providerMsg = parseProviderErrorBody(rawError);
207
+ const formattedError = providerMsg
208
+ ? (/credit balance|insufficient|billing/i.test(providerMsg)
209
+ ? `LLM provider billing error: ${providerMsg}\n\nTop up credits at your provider's billing page, or switch providers with \`orchex config --provider <provider>\`.`
210
+ : `LLM provider error: ${providerMsg}`)
211
+ : rawError;
183
212
  return {
184
213
  success: false,
185
214
  rawResponse: job.output ?? '',
186
215
  tokensUsed: job.tokensUsed ?? { input: 0, output: 0 },
187
- error: job.error ?? 'Cloud execution failed',
216
+ error: formattedError,
188
217
  };
189
218
  }
190
219
  // Job still pending/running - wait with adaptive interval
package/dist/config.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import { z } from 'zod';
2
2
  export declare const PRODUCTION_URL = "https://orchex.dev";
3
- export declare const LLMProviderSchema: z.ZodEnum<["anthropic", "openai", "gemini", "ollama", "deepseek", "bedrock"]>;
3
+ export declare const LLMProviderSchema: z.ZodEnum<["anthropic", "openai", "gemini", "ollama", "deepseek", "bedrock", "kimi"]>;
4
4
  export type LLMProvider = z.infer<typeof LLMProviderSchema>;
5
5
  /**
6
6
  * Detect the LLM provider based on available environment variables.
@@ -51,7 +51,7 @@ export declare const ConfigSchema: z.ZodObject<{
51
51
  /** User's subscription tier (synced from cloud on login) */
52
52
  tier: z.ZodDefault<z.ZodEnum<["free", "pro", "team", "enterprise"]>>;
53
53
  /** LLM provider (auto-detected if not set) */
54
- provider: z.ZodOptional<z.ZodEnum<["anthropic", "openai", "gemini", "ollama", "deepseek", "bedrock"]>>;
54
+ provider: z.ZodOptional<z.ZodEnum<["anthropic", "openai", "gemini", "ollama", "deepseek", "bedrock", "kimi"]>>;
55
55
  /** LLM model (uses provider default if not set) */
56
56
  model: z.ZodOptional<z.ZodString>;
57
57
  /** Cached trial runs remaining (synced from cloud on login) */
@@ -77,13 +77,13 @@ export declare const ConfigSchema: z.ZodObject<{
77
77
  endpoint?: string | undefined;
78
78
  };
79
79
  apiKey?: string | undefined;
80
- provider?: "anthropic" | "openai" | "gemini" | "ollama" | "deepseek" | "bedrock" | undefined;
80
+ provider?: "anthropic" | "openai" | "gemini" | "ollama" | "deepseek" | "bedrock" | "kimi" | undefined;
81
81
  model?: string | undefined;
82
82
  trialRunsRemaining?: number | undefined;
83
83
  accountCreatedAt?: string | undefined;
84
84
  }, {
85
85
  apiKey?: string | undefined;
86
- provider?: "anthropic" | "openai" | "gemini" | "ollama" | "deepseek" | "bedrock" | undefined;
86
+ provider?: "anthropic" | "openai" | "gemini" | "ollama" | "deepseek" | "bedrock" | "kimi" | undefined;
87
87
  mode?: "local" | "cloud" | undefined;
88
88
  apiUrl?: string | undefined;
89
89
  tier?: "free" | "pro" | "team" | "enterprise" | undefined;
package/dist/config.js CHANGED
@@ -10,7 +10,7 @@ export const PRODUCTION_URL = 'https://orchex.dev';
10
10
  // ============================================================================
11
11
  // LLM Provider Configuration
12
12
  // ============================================================================
13
- export const LLMProviderSchema = z.enum(['anthropic', 'openai', 'gemini', 'ollama', 'deepseek', 'bedrock']);
13
+ export const LLMProviderSchema = z.enum(['anthropic', 'openai', 'gemini', 'ollama', 'deepseek', 'bedrock', 'kimi']);
14
14
  /**
15
15
  * Detect the LLM provider based on available environment variables.
16
16
  * Priority: ORCHEX_PROVIDER env var > first available API key
@@ -18,7 +18,7 @@ export const LLMProviderSchema = z.enum(['anthropic', 'openai', 'gemini', 'ollam
18
18
  export function detectProvider() {
19
19
  // Explicit provider override
20
20
  const explicit = process.env.ORCHEX_PROVIDER?.toLowerCase();
21
- if (explicit && ['anthropic', 'openai', 'gemini', 'ollama', 'deepseek', 'bedrock'].includes(explicit)) {
21
+ if (explicit && ['anthropic', 'openai', 'gemini', 'ollama', 'deepseek', 'bedrock', 'kimi'].includes(explicit)) {
22
22
  return explicit;
23
23
  }
24
24
  // Auto-detect from available API keys (priority order)
@@ -30,6 +30,8 @@ export function detectProvider() {
30
30
  return 'gemini';
31
31
  if (process.env.DEEPSEEK_API_KEY)
32
32
  return 'deepseek';
33
+ if (process.env.KIMI_API_KEY || process.env.MOONSHOT_API_KEY)
34
+ return 'kimi';
33
35
  if (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY)
34
36
  return 'bedrock';
35
37
  if (process.env.OLLAMA_BASE_URL || process.env.OLLAMA_HOST)
@@ -63,6 +65,8 @@ export function getProviderApiKey(provider) {
63
65
  return process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY;
64
66
  case 'deepseek':
65
67
  return process.env.DEEPSEEK_API_KEY;
68
+ case 'kimi':
69
+ return process.env.KIMI_API_KEY || process.env.MOONSHOT_API_KEY;
66
70
  case 'bedrock':
67
71
  return undefined; // Bedrock uses AWS credentials, not API key
68
72
  case 'ollama':
@@ -76,6 +80,8 @@ export function getProviderBaseUrl(provider) {
76
80
  switch (provider) {
77
81
  case 'deepseek':
78
82
  return process.env.DEEPSEEK_BASE_URL || 'https://api.deepseek.com/v1';
83
+ case 'kimi':
84
+ return process.env.KIMI_BASE_URL || 'https://api.moonshot.ai/v1';
79
85
  case 'ollama':
80
86
  return process.env.OLLAMA_BASE_URL || process.env.OLLAMA_HOST || 'http://localhost:11434';
81
87
  default:
@@ -93,6 +99,7 @@ export const DEFAULT_MODELS = {
93
99
  deepseek: 'deepseek-coder',
94
100
  bedrock: 'claude-3.5-sonnet',
95
101
  ollama: 'llama3.3:70b',
102
+ kimi: 'kimi-k2-0905-preview',
96
103
  };
97
104
  /**
98
105
  * Get the configured provider.
package/dist/cost.js CHANGED
@@ -10,6 +10,7 @@ export const PROVIDER_RATES = {
10
10
  openai: { input: 0.03, output: 0.06 }, // GPT-4 Turbo
11
11
  gemini: { input: 0.00075, output: 0.003 }, // Gemini 1.5 Pro, 2026 pricing
12
12
  deepseek: { input: 0.00014, output: 0.0006 }, // Deepseek-VL
13
+ kimi: { input: 0.0006, output: 0.0025 }, // Kimi K2 rate (Q1 2026 public)
13
14
  ollama: { input: 0, output: 0 }, // Local, free
14
15
  };
15
16
  import { getModelCosts } from './intelligence/index.js';
@@ -91,9 +91,9 @@ export { Semaphore };
91
91
  * Default models for each provider.
92
92
  * Re-exported from config.ts for backward compatibility.
93
93
  */
94
- export declare const DEFAULT_MODELS: Record<"anthropic" | "openai" | "gemini" | "ollama" | "deepseek" | "bedrock", string>;
94
+ export declare const DEFAULT_MODELS: Record<"anthropic" | "openai" | "gemini" | "ollama" | "deepseek" | "bedrock" | "kimi", string>;
95
95
  /**
96
96
  * Map a generic model name to provider-specific model.
97
97
  * Allows users to specify "claude-sonnet-4-5" and it gets translated appropriately.
98
98
  */
99
- export declare function mapModelToProvider(requestedModel: string, provider: 'anthropic' | 'openai' | 'gemini' | 'ollama' | 'deepseek'): string;
99
+ export declare function mapModelToProvider(requestedModel: string, provider: 'anthropic' | 'openai' | 'gemini' | 'ollama' | 'deepseek' | 'kimi'): string;
@@ -198,6 +198,9 @@ export function mapModelToProvider(requestedModel, provider) {
198
198
  if (requestedModel.startsWith('deepseek-')) {
199
199
  return requestedModel;
200
200
  }
201
+ if (requestedModel.startsWith('kimi-') || requestedModel.startsWith('moonshot-')) {
202
+ return requestedModel;
203
+ }
201
204
  if (requestedModel.includes(':') || requestedModel.startsWith('llama') || requestedModel.startsWith('mistral')) {
202
205
  return requestedModel;
203
206
  }
@@ -12,6 +12,7 @@ export { GeminiExecutor } from './gemini-executor.js';
12
12
  export { OllamaExecutor } from './ollama-executor.js';
13
13
  export { DeepseekExecutor } from './deepseek-executor.js';
14
14
  export { BedrockExecutor } from './bedrock-executor.js';
15
+ export { KimiExecutor } from './kimi-executor.js';
15
16
  export * from './base.js';
16
17
  /**
17
18
  * Options for creating an executor.
@@ -11,6 +11,7 @@ import { GeminiExecutor } from './gemini-executor.js';
11
11
  import { OllamaExecutor } from './ollama-executor.js';
12
12
  import { DeepseekExecutor } from './deepseek-executor.js';
13
13
  import { BedrockExecutor } from './bedrock-executor.js';
14
+ import { KimiExecutor } from './kimi-executor.js';
14
15
  // Re-export individual executors
15
16
  export { ClaudeExecutor } from '../claude-executor.js';
16
17
  export { OpenAIExecutor } from './openai-executor.js';
@@ -18,6 +19,7 @@ export { GeminiExecutor } from './gemini-executor.js';
18
19
  export { OllamaExecutor } from './ollama-executor.js';
19
20
  export { DeepseekExecutor } from './deepseek-executor.js';
20
21
  export { BedrockExecutor } from './bedrock-executor.js';
22
+ export { KimiExecutor } from './kimi-executor.js';
21
23
  // Re-export base utilities
22
24
  export * from './base.js';
23
25
  /**
@@ -90,6 +92,15 @@ export function createExecutor(options = {}) {
90
92
  timeoutMs: options.timeoutMs,
91
93
  maxConcurrency: options.maxConcurrency,
92
94
  }));
95
+ case 'kimi':
96
+ return new KimiExecutor(defined({
97
+ apiKey: options.apiKey ?? getProviderApiKey('kimi'),
98
+ baseUrl: options.baseUrl ?? getProviderBaseUrl('kimi'),
99
+ maxRetries: options.maxRetries,
100
+ retryDelayMs: options.retryDelayMs,
101
+ timeoutMs: options.timeoutMs,
102
+ maxConcurrency: options.maxConcurrency,
103
+ }));
93
104
  case 'bedrock':
94
105
  return new BedrockExecutor(defined({
95
106
  maxRetries: options.maxRetries,
@@ -98,7 +109,7 @@ export function createExecutor(options = {}) {
98
109
  maxConcurrency: options.maxConcurrency,
99
110
  }));
100
111
  default:
101
- throw new Error(`Unknown provider: ${provider}. Supported: anthropic, openai, gemini, ollama, deepseek, bedrock`);
112
+ throw new Error(`Unknown provider: ${provider}. Supported: anthropic, openai, gemini, ollama, deepseek, bedrock, kimi`);
102
113
  }
103
114
  }
104
115
  /**
@@ -116,6 +127,7 @@ export function getProviderStatus() {
116
127
  ollama: 'Ollama (local)',
117
128
  deepseek: 'DeepSeek',
118
129
  bedrock: 'AWS Bedrock',
130
+ kimi: 'Kimi (Moonshot AI)',
119
131
  };
120
132
  if (provider === 'ollama') {
121
133
  return {
@@ -162,6 +174,8 @@ export function listConfiguredProviders() {
162
174
  configured.push('gemini');
163
175
  if (process.env.DEEPSEEK_API_KEY)
164
176
  configured.push('deepseek');
177
+ if (process.env.KIMI_API_KEY || process.env.MOONSHOT_API_KEY)
178
+ configured.push('kimi');
165
179
  if (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY)
166
180
  configured.push('bedrock');
167
181
  if (process.env.OLLAMA_BASE_URL || process.env.OLLAMA_HOST)
@@ -189,6 +203,7 @@ export async function getFullStatus() {
189
203
  ollama: 'Ollama (local)',
190
204
  deepseek: 'DeepSeek',
191
205
  bedrock: 'AWS Bedrock',
206
+ kimi: 'Kimi (Moonshot AI)',
192
207
  };
193
208
  if (provider === 'ollama') {
194
209
  return {
@@ -0,0 +1,18 @@
1
+ import type { ExecutorStrategy, ExecutionRequest, ExecutionResult } from '../types.js';
2
+ import { ExecutorConfig } from './base.js';
3
+ export interface KimiExecutorConfig extends ExecutorConfig {
4
+ apiKey?: string;
5
+ baseUrl?: string;
6
+ }
7
+ export declare class KimiExecutor implements ExecutorStrategy {
8
+ readonly provider = "kimi";
9
+ private config;
10
+ private semaphore;
11
+ private apiKey;
12
+ private baseUrl;
13
+ constructor(config?: Partial<KimiExecutorConfig>);
14
+ execute(request: ExecutionRequest): Promise<ExecutionResult>;
15
+ private executeWithRetry;
16
+ private callApi;
17
+ private buildMessages;
18
+ }
@@ -0,0 +1,136 @@
1
+ import { DEFAULT_EXECUTOR_CONFIG, Semaphore, sleep, isRetryableError, calculateBackoff, extractArtifactDetailed, buildOpenAIMessages, DEFAULT_MODELS, withCircuitBreaker, classifyExecutionError, } from './base.js';
2
+ const KIMI_BASE_URL = 'https://api.moonshot.ai/v1';
3
+ const KIMI_MAX_OUTPUT_TOKENS = 8192;
4
+ export class KimiExecutor {
5
+ provider = 'kimi';
6
+ config;
7
+ semaphore;
8
+ apiKey;
9
+ baseUrl;
10
+ constructor(config = {}) {
11
+ this.config = { ...DEFAULT_EXECUTOR_CONFIG, ...config };
12
+ this.apiKey = config.apiKey ?? process.env.KIMI_API_KEY ?? process.env.MOONSHOT_API_KEY ?? '';
13
+ this.baseUrl = config.baseUrl ?? process.env.KIMI_BASE_URL ?? KIMI_BASE_URL;
14
+ this.semaphore = new Semaphore(this.config.maxConcurrency);
15
+ if (!this.apiKey) {
16
+ throw new Error('Kimi API key required. Set KIMI_API_KEY environment variable.');
17
+ }
18
+ }
19
+ async execute(request) {
20
+ const release = await this.semaphore.acquire();
21
+ try {
22
+ return await withCircuitBreaker(this.provider, request.model, () => this.executeWithRetry(request));
23
+ }
24
+ finally {
25
+ release();
26
+ }
27
+ }
28
+ async executeWithRetry(request) {
29
+ let lastError;
30
+ for (let attempt = 0; attempt <= this.config.maxRetries; attempt++) {
31
+ if (attempt > 0 && lastError) {
32
+ const classified = classifyExecutionError(lastError);
33
+ const delay = classified.retryAfterMs ?? calculateBackoff(attempt, this.config.retryDelayMs);
34
+ await sleep(delay);
35
+ }
36
+ try {
37
+ return await this.callApi(request);
38
+ }
39
+ catch (error) {
40
+ lastError = error;
41
+ if (!isRetryableError(error)) {
42
+ return {
43
+ success: false,
44
+ rawResponse: '',
45
+ tokensUsed: { input: 0, output: 0 },
46
+ error: lastError.message,
47
+ };
48
+ }
49
+ if (attempt === this.config.maxRetries) {
50
+ break;
51
+ }
52
+ }
53
+ }
54
+ return {
55
+ success: false,
56
+ rawResponse: '',
57
+ tokensUsed: { input: 0, output: 0 },
58
+ error: `Failed after ${this.config.maxRetries + 1} attempts: ${lastError?.message}`,
59
+ };
60
+ }
61
+ async callApi(request) {
62
+ // Kimi models: kimi-k2-*, kimi-latest, moonshot-v1-*
63
+ const model = (request.model.startsWith('kimi-') || request.model.startsWith('moonshot-'))
64
+ ? request.model
65
+ : DEFAULT_MODELS.kimi;
66
+ const effectiveTimeout = request.timeoutMs ?? this.config.timeoutMs;
67
+ const messages = this.buildMessages(request);
68
+ const controller = new AbortController();
69
+ const timeoutId = setTimeout(() => controller.abort(), effectiveTimeout);
70
+ try {
71
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
72
+ method: 'POST',
73
+ headers: {
74
+ 'Content-Type': 'application/json',
75
+ 'Authorization': `Bearer ${this.apiKey}`,
76
+ },
77
+ body: JSON.stringify({
78
+ model,
79
+ messages,
80
+ max_tokens: Math.min(request.maxTokens, KIMI_MAX_OUTPUT_TOKENS),
81
+ stream: false,
82
+ }),
83
+ signal: controller.signal,
84
+ });
85
+ if (!response.ok) {
86
+ const errorBody = await response.text().catch(() => 'Unknown error');
87
+ const error = new Error(`Kimi API error: ${response.status} ${errorBody}`);
88
+ error.status = response.status;
89
+ throw error;
90
+ }
91
+ const data = await response.json();
92
+ const rawResponse = data.choices?.[0]?.message?.content ?? '';
93
+ const { artifact, diagnostic } = extractArtifactDetailed(rawResponse);
94
+ if (!artifact) {
95
+ const errorDetail = diagnostic.jsonRepaired
96
+ ? `Artifact JSON was repaired but still invalid (strategy: ${diagnostic.strategy})`
97
+ : diagnostic.error ?? 'No valid orchex-artifact block found in response';
98
+ return {
99
+ success: false,
100
+ rawResponse,
101
+ tokensUsed: {
102
+ input: data.usage?.prompt_tokens ?? 0,
103
+ output: data.usage?.completion_tokens ?? 0,
104
+ },
105
+ error: errorDetail,
106
+ };
107
+ }
108
+ return {
109
+ success: true,
110
+ rawResponse,
111
+ artifact,
112
+ tokensUsed: {
113
+ input: data.usage?.prompt_tokens ?? 0,
114
+ output: data.usage?.completion_tokens ?? 0,
115
+ },
116
+ };
117
+ }
118
+ catch (error) {
119
+ if (error.name === 'AbortError') {
120
+ return {
121
+ success: false,
122
+ rawResponse: '',
123
+ tokensUsed: { input: 0, output: 0 },
124
+ error: `API call timed out after ${effectiveTimeout}ms`,
125
+ };
126
+ }
127
+ throw error;
128
+ }
129
+ finally {
130
+ clearTimeout(timeoutId);
131
+ }
132
+ }
133
+ buildMessages(request) {
134
+ return buildOpenAIMessages(request);
135
+ }
136
+ }
package/dist/index.js CHANGED
@@ -6,6 +6,7 @@ import { ORCHEX_INSTRUCTIONS } from './mcp-instructions.js';
6
6
  import { registerResources } from './mcp-resources.js';
7
7
  import { broadcaster } from './execution-broadcaster.js';
8
8
  import { loadConfig, saveConfig, maskConfigForDisplay, resolveApiUrl, PRODUCTION_URL, LLMProviderSchema } from './config.js';
9
+ import { analyzeError } from './intelligence/index.js';
9
10
  import { buildVerificationMessage, buildStatusMessage, parseLoginApiResponse, } from './login-helpers.js';
10
11
  /** Opens browser cross-platform. Errors are silently ignored — URL already printed to terminal. */
11
12
  function openBrowser(url) {
@@ -335,11 +336,11 @@ async function handleConfigCommand(args) {
335
336
  updates.mode = 'local';
336
337
  break;
337
338
  case '--provider':
339
+ const validProviders = LLMProviderSchema.options;
338
340
  if (!value) {
339
- console.error('Error: --provider requires a value (anthropic, openai, gemini, deepseek, ollama)');
341
+ console.error(`Error: --provider requires a value (${validProviders.join(', ')})`);
340
342
  process.exit(1);
341
343
  }
342
- const validProviders = LLMProviderSchema.options;
343
344
  if (!validProviders.includes(value)) {
344
345
  console.error(`Error: Invalid provider "${value}". Valid: ${validProviders.join(', ')}`);
345
346
  process.exit(1);
@@ -619,6 +620,17 @@ async function handleRunCommand(args) {
619
620
  }
620
621
  // Init and execute
621
622
  console.log(`\nInitializing orchestration: "${plan.title}"\n`);
623
+ // Plan-contract boundary (Session 13): fail fast on invalid streams before
624
+ // they reach the legacy 5-layer validation. Additive — existing layers stay
625
+ // in place as defense-in-depth until Session 14 consolidates them.
626
+ {
627
+ const { validatePlan, formatPlanValidationErrors } = await import('./intelligence/index.js');
628
+ const validation = validatePlan(streamDefs, projectDir);
629
+ if (!validation.ok) {
630
+ console.error('Plan validation failed:\n' + formatPlanValidationErrors(validation.errors));
631
+ process.exit(1);
632
+ }
633
+ }
622
634
  await initOrchestration(projectDir, plan.title, streamDefs);
623
635
  const allResponses = [];
624
636
  let executionError;
@@ -638,9 +650,13 @@ async function handleRunCommand(args) {
638
650
  process.stdout.write(` ✓ ${event.data.streamId} — complete${dur}\n`);
639
651
  break;
640
652
  }
641
- case 'stream_failed':
642
- process.stdout.write(` ✗ ${event.data.streamId} failed: ${String(event.data.error).slice(0, 100)}\n`);
653
+ case 'stream_failed': {
654
+ const errStr = String(event.data.error);
655
+ const analysis = analyzeError(errStr);
656
+ const categoryTag = analysis.category !== 'unknown' ? ` [${analysis.category}]` : '';
657
+ process.stdout.write(` ✗ ${event.data.streamId} — failed${categoryTag}: ${errStr.slice(0, 200)}\n`);
643
658
  break;
659
+ }
644
660
  case 'stream_rate_limited':
645
661
  process.stdout.write(` ⏳ ${event.data.streamId} — rate limited (retry in ${Math.round((event.data.retryAfterMs ?? 0) / 1000)}s)\n`);
646
662
  break;
@@ -1,10 +1,12 @@
1
- export { gatherProjectContext, generatePlan } from './auto-planner.js';
1
+ export { gatherProjectContext, generatePlan, extractSpecPaths } from './auto-planner.js';
2
2
  export { parsePlanDocument, getSectionsAtLevel, isUnpopulatedTemplate } from './plan-parser.js';
3
3
  export { extractDeliverables, processDeliverables, formatDeliverablesReport } from './deliverable-extractor.js';
4
4
  export type { Deliverable } from './deliverable-extractor.js';
5
5
  export { buildDependencyGraph, formatDependencyReport } from './dependency-inferrer.js';
6
6
  export { generateStreams, formatStreamsForReview, toInitFormat, extractPrerequisites } from './stream-generator.js';
7
- export { formatPlanPreview, formatPlanPreviewText, type ModelDecision } from './plan-preview.js';
7
+ export { validatePlan, formatPlanValidationErrors, StreamDefinitionSchema as PlanContractStreamSchema } from './plan-contract.js';
8
+ export type { PlanValidationError, ValidatePlanResult, StreamDefinition as PlanContractStream } from './plan-contract.js';
9
+ export { formatPlanPreview, formatPlanPreviewText, generatePlanPreview, type ModelDecision, type PlanPreview, type GeneratePlanPreviewInput, type GeneratePlanPreviewResult, } from './plan-preview.js';
8
10
  export { detectSequentialEdits, autoFixSequentialEdits } from './sequential-diagnostics.js';
9
11
  export type { SequentialEditDiagnostic } from './sequential-diagnostics.js';
10
12
  export { createDiagnostics, detectOwnershipConflicts } from './diagnostics.js';