@krr2020/taskflow-core 0.1.0-beta.4 → 0.1.0-beta.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +1 -1
  2. package/dist/cli/index.js +41 -3
  3. package/dist/commands/base.d.ts +41 -0
  4. package/dist/commands/base.js +141 -0
  5. package/dist/commands/configure.d.ts +29 -0
  6. package/dist/commands/configure.js +187 -0
  7. package/dist/commands/init.js +15 -1
  8. package/dist/commands/prd/create.d.ts +1 -1
  9. package/dist/commands/prd/create.js +26 -8
  10. package/dist/commands/tasks/generate.d.ts +1 -1
  11. package/dist/commands/tasks/generate.js +81 -55
  12. package/dist/commands/upgrade.js +35 -2
  13. package/dist/commands/workflow/check.d.ts +16 -0
  14. package/dist/commands/workflow/check.js +355 -32
  15. package/dist/commands/workflow/do.js +157 -62
  16. package/dist/index.d.ts +4 -0
  17. package/dist/index.js +6 -0
  18. package/dist/lib/config-paths.js +6 -1
  19. package/dist/lib/file-validator.d.ts +119 -0
  20. package/dist/lib/file-validator.js +291 -0
  21. package/dist/lib/log-parser.d.ts +91 -0
  22. package/dist/lib/log-parser.js +178 -0
  23. package/dist/lib/retrospective.d.ts +27 -0
  24. package/dist/lib/retrospective.js +110 -0
  25. package/dist/lib/types.d.ts +8 -0
  26. package/dist/lib/types.js +12 -8
  27. package/dist/llm/base.d.ts +52 -0
  28. package/dist/llm/base.js +35 -0
  29. package/dist/llm/factory.d.ts +39 -0
  30. package/dist/llm/factory.js +102 -0
  31. package/dist/llm/index.d.ts +7 -0
  32. package/dist/llm/index.js +7 -0
  33. package/dist/llm/model-selector.d.ts +71 -0
  34. package/dist/llm/model-selector.js +139 -0
  35. package/dist/llm/providers/anthropic.d.ts +31 -0
  36. package/dist/llm/providers/anthropic.js +116 -0
  37. package/dist/llm/providers/index.d.ts +6 -0
  38. package/dist/llm/providers/index.js +6 -0
  39. package/dist/llm/providers/ollama.d.ts +28 -0
  40. package/dist/llm/providers/ollama.js +91 -0
  41. package/dist/llm/providers/openai-compatible.d.ts +30 -0
  42. package/dist/llm/providers/openai-compatible.js +93 -0
  43. package/dist/schemas/config.d.ts +82 -0
  44. package/dist/schemas/config.js +35 -0
  45. package/package.json +43 -43
  46. package/dist/lib/package-manager.d.ts +0 -17
  47. package/dist/lib/package-manager.js +0 -53
@@ -180,3 +180,113 @@ export function printRetroAddUsage() {
180
180
  console.log(`\n${colors.highlight("Example:")}`);
181
181
  console.log(`${colors.muted('taskflow retro add --category "Type Error" --pattern "Cannot find module" --solution "Check import path exists" --criticality "High"')}`);
182
182
  }
183
+ // ============================================================================
184
+ // Auto-Update Functions
185
+ // ============================================================================
186
+ /**
187
+ * Read retrospective file content
188
+ */
189
+ export function readRetrospectiveBeforeWork(refDir) {
190
+ const retroFile = getRetrospectiveFilePath(refDir);
191
+ if (!fs.existsSync(retroFile)) {
192
+ return "";
193
+ }
194
+ return fs.readFileSync(retroFile, "utf-8");
195
+ }
196
+ /**
197
+ * Extract NEW error patterns from parsed errors
198
+ * Compares against existing retrospective to avoid duplicates
199
+ */
200
+ export function extractNewPatterns(errors, refDir) {
201
+ const existingItems = loadRetrospective(refDir);
202
+ const newPatterns = [];
203
+ // Group errors by error code and message pattern
204
+ const errorGroups = new Map();
205
+ for (const error of errors) {
206
+ const key = error.code || error.message.substring(0, 50);
207
+ if (!errorGroups.has(key)) {
208
+ errorGroups.set(key, []);
209
+ }
210
+ errorGroups.get(key)?.push(error);
211
+ }
212
+ // For each group, check if it's a new pattern
213
+ for (const [, groupErrors] of errorGroups) {
214
+ const firstError = groupErrors[0];
215
+ if (!firstError)
216
+ continue;
217
+ // Create pattern from error message
218
+ const pattern = firstError.code || firstError.message;
219
+ // Check if this pattern already exists in retrospective
220
+ const alreadyExists = existingItems.some((item) => {
221
+ try {
222
+ const itemPattern = item.pattern.replace(/\\\|/g, "|");
223
+ const regex = new RegExp(itemPattern, "i");
224
+ return regex.test(pattern);
225
+ }
226
+ catch {
227
+ return item.pattern.toLowerCase().includes(pattern.toLowerCase());
228
+ }
229
+ });
230
+ if (!alreadyExists) {
231
+ // Determine category from error
232
+ let category = "Runtime";
233
+ if (firstError.code?.startsWith("TS")) {
234
+ category = "Type Error";
235
+ }
236
+ else if (firstError.message.includes("eslint") ||
237
+ firstError.message.includes("lint")) {
238
+ category = "Lint";
239
+ }
240
+ else if (firstError.message.includes("test")) {
241
+ category = "Test";
242
+ }
243
+ // Determine criticality based on severity
244
+ let criticality = "Medium";
245
+ if (firstError.severity === "error") {
246
+ criticality = "High";
247
+ }
248
+ else if (firstError.severity === "warning") {
249
+ criticality = "Low";
250
+ }
251
+ newPatterns.push({
252
+ category,
253
+ pattern,
254
+ solution: "Review error message and fix the underlying issue",
255
+ criticality,
256
+ errorCode: firstError.code,
257
+ affectedFiles: groupErrors.map((e) => e.file).filter(Boolean),
258
+ });
259
+ }
260
+ }
261
+ return newPatterns;
262
+ }
263
+ /**
264
+ * Append new patterns to retrospective file
265
+ * Uses existing addRetrospectiveEntry for each pattern
266
+ */
267
+ export function appendNewPatternsToRetrospective(refDir, patterns) {
268
+ const addedIds = [];
269
+ for (const pattern of patterns) {
270
+ const id = addRetrospectiveEntry(refDir, pattern.category, pattern.pattern, pattern.solution, pattern.criticality);
271
+ addedIds.push(id);
272
+ }
273
+ return addedIds;
274
+ }
275
+ /**
276
+ * Format a new pattern for display (before adding to retrospective)
277
+ */
278
+ export function formatNewPatternForDisplay(pattern) {
279
+ const lines = [];
280
+ lines.push(`${colors.highlight("New Error Pattern Detected:")}`);
281
+ lines.push(` Category: ${colors.muted(pattern.category)}`);
282
+ lines.push(` Pattern: ${colors.warning(pattern.pattern)}`);
283
+ if (pattern.errorCode) {
284
+ lines.push(` Code: ${colors.error(pattern.errorCode)}`);
285
+ }
286
+ lines.push(` Suggested Solution: ${colors.success(pattern.solution)}`);
287
+ lines.push(` Criticality: ${colors.state(pattern.criticality)}`);
288
+ if (pattern.affectedFiles.length > 0) {
289
+ lines.push(` Affected Files: ${colors.muted(pattern.affectedFiles.slice(0, 3).join(", "))}${pattern.affectedFiles.length > 3 ? "..." : ""}`);
290
+ }
291
+ return lines.join("\n");
292
+ }
@@ -281,6 +281,14 @@ export declare const TaskflowConfigSchema: z.ZodObject<{
281
281
  commands: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
282
282
  }, z.core.$strip>>;
283
283
  ai: z.ZodOptional<z.ZodObject<{
284
+ enabled: z.ZodDefault<z.ZodBoolean>;
285
+ provider: z.ZodOptional<z.ZodString>;
286
+ models: z.ZodOptional<z.ZodObject<{
287
+ planning: z.ZodOptional<z.ZodString>;
288
+ execution: z.ZodOptional<z.ZodString>;
289
+ analysis: z.ZodOptional<z.ZodString>;
290
+ default: z.ZodOptional<z.ZodString>;
291
+ }, z.core.$strip>>;
284
292
  autoContinueTask: z.ZodDefault<z.ZodBoolean>;
285
293
  clearContextOnComplete: z.ZodDefault<z.ZodBoolean>;
286
294
  }, z.core.$strip>>;
package/dist/lib/types.js CHANGED
@@ -154,14 +154,18 @@ export const TaskflowConfigSchema = z.object({
154
154
  .optional(),
155
155
  ai: z
156
156
  .object({
157
- autoContinueTask: z
158
- .boolean()
159
- .default(false)
160
- .describe("Automatically continue to next task without user confirmation"),
161
- clearContextOnComplete: z
162
- .boolean()
163
- .default(true)
164
- .describe("Clear AI model context after task completion"),
157
+ enabled: z.boolean().default(false),
158
+ provider: z.string().optional(),
159
+ models: z
160
+ .object({
161
+ planning: z.string().optional(),
162
+ execution: z.string().optional(),
163
+ analysis: z.string().optional(),
164
+ default: z.string().optional(),
165
+ })
166
+ .optional(),
167
+ autoContinueTask: z.boolean().default(false),
168
+ clearContextOnComplete: z.boolean().default(true),
165
169
  })
166
170
  .optional(),
167
171
  });
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Base LLM Provider interface
3
+ * All LLM providers must implement this interface
4
+ */
5
+ export interface LLMMessage {
6
+ role: "system" | "user" | "assistant";
7
+ content: string;
8
+ }
9
+ export interface LLMGenerationOptions {
10
+ maxTokens?: number;
11
+ temperature?: number;
12
+ topP?: number;
13
+ topK?: number;
14
+ }
15
+ export interface LLMGenerationResult {
16
+ content: string;
17
+ model: string;
18
+ tokensUsed?: number;
19
+ finishReason?: string;
20
+ }
21
+ export declare enum LLMProviderType {
22
+ OpenAICompatible = "openai-compatible",
23
+ Anthropic = "anthropic",
24
+ Ollama = "ollama"
25
+ }
26
+ export declare enum Phase {
27
+ Planning = "planning",
28
+ Execution = "execution",
29
+ Analysis = "analysis"
30
+ }
31
+ /**
32
+ * LLM Provider interface
33
+ * Defines the contract for all LLM provider implementations
34
+ */
35
+ export declare abstract class LLMProvider {
36
+ readonly type: LLMProviderType;
37
+ readonly model: string;
38
+ constructor(type: LLMProviderType, model: string);
39
+ /**
40
+ * Generate text from the LLM
41
+ */
42
+ abstract generate(messages: LLMMessage[], options?: LLMGenerationOptions): Promise<LLMGenerationResult>;
43
+ /**
44
+ * Check if the provider is properly configured
45
+ */
46
+ abstract isConfigured(): boolean;
47
+ /**
48
+ * Get the model name for the specified phase
49
+ * Override if provider has phase-specific models
50
+ */
51
+ getModelForPhase(_phase: Phase): string;
52
+ }
@@ -0,0 +1,35 @@
1
+ /**
2
+ * Base LLM Provider interface
3
+ * All LLM providers must implement this interface
4
+ */
5
+ export var LLMProviderType;
6
+ (function (LLMProviderType) {
7
+ LLMProviderType["OpenAICompatible"] = "openai-compatible";
8
+ LLMProviderType["Anthropic"] = "anthropic";
9
+ LLMProviderType["Ollama"] = "ollama";
10
+ })(LLMProviderType || (LLMProviderType = {}));
11
+ export var Phase;
12
+ (function (Phase) {
13
+ Phase["Planning"] = "planning";
14
+ Phase["Execution"] = "execution";
15
+ Phase["Analysis"] = "analysis";
16
+ })(Phase || (Phase = {}));
17
+ /**
18
+ * LLM Provider interface
19
+ * Defines the contract for all LLM provider implementations
20
+ */
21
+ export class LLMProvider {
22
+ type;
23
+ model;
24
+ constructor(type, model) {
25
+ this.type = type;
26
+ this.model = model;
27
+ }
28
+ /**
29
+ * Get the model name for the specified phase
30
+ * Override if provider has phase-specific models
31
+ */
32
+ getModelForPhase(_phase) {
33
+ return this.model;
34
+ }
35
+ }
@@ -0,0 +1,39 @@
1
+ /**
2
+ * LLM Provider Factory
3
+ * Factory for creating LLM providers and model selectors
4
+ */
5
+ import { type LLMGenerationOptions, type LLMGenerationResult, type LLMMessage, type LLMProvider, LLMProviderType } from "./base.js";
6
+ import { type AIConfig, ModelSelector } from "./model-selector.js";
7
+ export type { LLMMessage, LLMGenerationOptions, LLMGenerationResult };
8
+ export { LLMProvider, LLMProviderType } from "./base.js";
9
+ export { type AIConfig, ModelSelector } from "./model-selector.js";
10
+ export { AnthropicProvider, OllamaProvider, OpenAICompatibleProvider, } from "./providers/index.js";
11
+ /**
12
+ * Provider factory namespace
13
+ * Functions for creating LLM providers and model selectors
14
+ */
15
+ export declare const ProviderFactory: {
16
+ /**
17
+ * Create a model selector from configuration
18
+ */
19
+ createSelector(config: AIConfig): ModelSelector;
20
+ /**
21
+ * Create a single provider (backward compatible)
22
+ */
23
+ createProvider(type: LLMProviderType, model: string, apiKey?: string, baseUrl?: string): LLMProvider;
24
+ /**
25
+ * Test if a provider is configured and working
26
+ */
27
+ testProvider(provider: LLMProvider): Promise<{
28
+ success: boolean;
29
+ error?: string;
30
+ }>;
31
+ /**
32
+ * Get available providers
33
+ */
34
+ getAvailableProviders(): string[];
35
+ /**
36
+ * Get default model for provider
37
+ */
38
+ getDefaultModel(providerType: LLMProviderType): string;
39
+ };
@@ -0,0 +1,102 @@
1
+ /**
2
+ * LLM Provider Factory
3
+ * Factory for creating LLM providers and model selectors
4
+ */
5
+ import { LLMProviderType, } from "./base.js";
6
+ import { ModelSelector } from "./model-selector.js";
7
+ import { AnthropicProvider } from "./providers/anthropic.js";
8
+ import { OllamaProvider } from "./providers/ollama.js";
9
+ import { OpenAICompatibleProvider } from "./providers/openai-compatible.js";
10
+ export { LLMProvider, LLMProviderType } from "./base.js";
11
+ export { ModelSelector } from "./model-selector.js";
12
+ export { AnthropicProvider, OllamaProvider, OpenAICompatibleProvider, } from "./providers/index.js";
13
+ /**
14
+ * Provider factory namespace
15
+ * Functions for creating LLM providers and model selectors
16
+ */
17
+ export const ProviderFactory = {
18
+ /**
19
+ * Create a model selector from configuration
20
+ */
21
+ createSelector(config) {
22
+ return new ModelSelector(config);
23
+ },
24
+ /**
25
+ * Create a single provider (backward compatible)
26
+ */
27
+ createProvider(type, model, apiKey, baseUrl) {
28
+ switch (type) {
29
+ case LLMProviderType.OpenAICompatible: {
30
+ const config = { model };
31
+ if (apiKey)
32
+ config.apiKey = apiKey;
33
+ if (baseUrl)
34
+ config.baseUrl = baseUrl;
35
+ return OpenAICompatibleProvider.fromEnv(config);
36
+ }
37
+ case LLMProviderType.Anthropic: {
38
+ const config = { model };
39
+ if (apiKey)
40
+ config.apiKey = apiKey;
41
+ return AnthropicProvider.fromEnv(config);
42
+ }
43
+ case LLMProviderType.Ollama: {
44
+ const config = { model };
45
+ if (baseUrl)
46
+ config.baseUrl = baseUrl;
47
+ return OllamaProvider.fromEnv(config);
48
+ }
49
+ default:
50
+ throw new Error(`Unknown provider type: ${type}`);
51
+ }
52
+ },
53
+ /**
54
+ * Test if a provider is configured and working
55
+ */
56
+ async testProvider(provider) {
57
+ try {
58
+ if (!provider.isConfigured()) {
59
+ return { success: false, error: "Provider not configured" };
60
+ }
61
+ // Simple test request
62
+ await provider.generate([
63
+ {
64
+ role: "user",
65
+ content: "Hello, please respond with just 'OK'.",
66
+ },
67
+ ], { maxTokens: 10 });
68
+ return { success: true };
69
+ }
70
+ catch (error) {
71
+ return {
72
+ success: false,
73
+ error: error instanceof Error ? error.message : String(error),
74
+ };
75
+ }
76
+ },
77
+ /**
78
+ * Get available providers
79
+ */
80
+ getAvailableProviders() {
81
+ return [
82
+ LLMProviderType.OpenAICompatible,
83
+ LLMProviderType.Anthropic,
84
+ LLMProviderType.Ollama,
85
+ ];
86
+ },
87
+ /**
88
+ * Get default model for provider
89
+ */
90
+ getDefaultModel(providerType) {
91
+ switch (providerType) {
92
+ case LLMProviderType.OpenAICompatible:
93
+ return "gpt-4o-mini";
94
+ case LLMProviderType.Anthropic:
95
+ return "claude-3-5-sonnet-20241022";
96
+ case LLMProviderType.Ollama:
97
+ return "llama2";
98
+ default:
99
+ throw new Error(`Unknown provider type: ${providerType}`);
100
+ }
101
+ },
102
+ };
@@ -0,0 +1,7 @@
1
+ /**
2
+ * LLM package exports
3
+ */
4
+ export * from "./base.js";
5
+ export * from "./factory.js";
6
+ export * from "./model-selector.js";
7
+ export * from "./providers/index.js";
@@ -0,0 +1,7 @@
1
+ /**
2
+ * LLM package exports
3
+ */
4
+ export * from "./base.js";
5
+ export * from "./factory.js";
6
+ export * from "./model-selector.js";
7
+ export * from "./providers/index.js";
@@ -0,0 +1,71 @@
1
+ /**
2
+ * Model Selector
3
+ * Selects the appropriate model for each phase (planning, execution, analysis)
4
+ */
5
+ import { type LLMProvider, LLMProviderType, Phase } from "./base.js";
6
+ export interface ModelSelection {
7
+ planning: LLMProvider;
8
+ execution: LLMProvider;
9
+ analysis: LLMProvider;
10
+ }
11
+ export interface AIConfig {
12
+ enabled: boolean;
13
+ provider: LLMProviderType;
14
+ apiKey?: string;
15
+ models: {
16
+ default: string;
17
+ planning?: string;
18
+ execution?: string;
19
+ analysis?: string;
20
+ };
21
+ planningProvider?: LLMProviderType;
22
+ planningApiKey?: string;
23
+ executionProvider?: LLMProviderType;
24
+ executionApiKey?: string;
25
+ analysisProvider?: LLMProviderType;
26
+ analysisApiKey?: string;
27
+ ollamaBaseUrl?: string;
28
+ openaiBaseUrl?: string;
29
+ }
30
+ /**
31
+ * Model Selector class
32
+ * Manages per-phase model selection and provider instantiation
33
+ */
34
+ export declare class ModelSelector {
35
+ private selection;
36
+ private modelNames;
37
+ constructor(config: AIConfig);
38
+ /**
39
+ * Get the provider for a specific phase
40
+ */
41
+ getProvider(phase: Phase): LLMProvider;
42
+ /**
43
+ * Get the model name for a specific phase
44
+ */
45
+ getModelName(phase: Phase): string;
46
+ /**
47
+ * Check if any provider is configured
48
+ */
49
+ isConfigured(): boolean;
50
+ /**
51
+ * Create provider from configuration
52
+ */
53
+ private createProvider;
54
+ /**
55
+ * Create model selection from configuration
56
+ */
57
+ private createSelection;
58
+ /**
59
+ * Get base URL for provider
60
+ */
61
+ private getBaseUrlForProvider;
62
+ /**
63
+ * Create ModelSelector from minimal config (backward compatible)
64
+ */
65
+ static fromSimpleConfig(config: {
66
+ enabled: boolean;
67
+ provider: LLMProviderType;
68
+ apiKey?: string;
69
+ model?: string;
70
+ }): ModelSelector;
71
+ }
@@ -0,0 +1,139 @@
1
+ /**
2
+ * Model Selector
3
+ * Selects the appropriate model for each phase (planning, execution, analysis)
4
+ */
5
+ import { LLMProviderType, Phase } from "./base.js";
6
+ import { AnthropicProvider } from "./providers/anthropic.js";
7
+ import { OllamaProvider } from "./providers/ollama.js";
8
+ import { OpenAICompatibleProvider } from "./providers/openai-compatible.js";
9
+ /**
10
+ * Model Selector class
11
+ * Manages per-phase model selection and provider instantiation
12
+ */
13
+ export class ModelSelector {
14
+ selection;
15
+ modelNames;
16
+ constructor(config) {
17
+ this.selection = this.createSelection(config);
18
+ this.modelNames = {
19
+ planning: config.models.planning || config.models.default,
20
+ execution: config.models.execution || config.models.default,
21
+ analysis: config.models.analysis || config.models.default,
22
+ };
23
+ }
24
+ /**
25
+ * Get the provider for a specific phase
26
+ */
27
+ getProvider(phase) {
28
+ switch (phase) {
29
+ case Phase.Planning:
30
+ return this.selection.planning;
31
+ case Phase.Execution:
32
+ return this.selection.execution;
33
+ case Phase.Analysis:
34
+ return this.selection.analysis;
35
+ default:
36
+ return this.selection.planning;
37
+ }
38
+ }
39
+ /**
40
+ * Get the model name for a specific phase
41
+ */
42
+ getModelName(phase) {
43
+ switch (phase) {
44
+ case Phase.Planning:
45
+ return this.modelNames.planning;
46
+ case Phase.Execution:
47
+ return this.modelNames.execution;
48
+ case Phase.Analysis:
49
+ return this.modelNames.analysis;
50
+ default:
51
+ return this.modelNames.planning;
52
+ }
53
+ }
54
+ /**
55
+ * Check if any provider is configured
56
+ */
57
+ isConfigured() {
58
+ return (this.selection.planning.isConfigured() ||
59
+ this.selection.execution.isConfigured() ||
60
+ this.selection.analysis.isConfigured());
61
+ }
62
+ /**
63
+ * Create provider from configuration
64
+ */
65
+ createProvider(providerType, model, apiKey, baseUrl) {
66
+ switch (providerType) {
67
+ case LLMProviderType.OpenAICompatible: {
68
+ const config = { model };
69
+ if (apiKey)
70
+ config.apiKey = apiKey;
71
+ if (baseUrl)
72
+ config.baseUrl = baseUrl;
73
+ return OpenAICompatibleProvider.fromEnv(config);
74
+ }
75
+ case LLMProviderType.Anthropic: {
76
+ const config = { model };
77
+ if (apiKey)
78
+ config.apiKey = apiKey;
79
+ return AnthropicProvider.fromEnv(config);
80
+ }
81
+ case LLMProviderType.Ollama: {
82
+ const config = { model };
83
+ if (baseUrl)
84
+ config.baseUrl = baseUrl;
85
+ return OllamaProvider.fromEnv(config);
86
+ }
87
+ default:
88
+ throw new Error(`Unknown provider type: ${providerType}`);
89
+ }
90
+ }
91
+ /**
92
+ * Create model selection from configuration
93
+ */
94
+ createSelection(config) {
95
+ const defaultProvider = this.createProvider(config.provider, config.models.default, config.apiKey, this.getBaseUrlForProvider(config, config.provider));
96
+ // Planning provider
97
+ const planningProvider = config.planningProvider
98
+ ? this.createProvider(config.planningProvider, config.models.planning || config.models.default, config.planningApiKey, this.getBaseUrlForProvider(config, config.planningProvider))
99
+ : defaultProvider;
100
+ // Execution provider
101
+ const executionProvider = config.executionProvider
102
+ ? this.createProvider(config.executionProvider, config.models.execution || config.models.default, config.executionApiKey, this.getBaseUrlForProvider(config, config.executionProvider))
103
+ : defaultProvider;
104
+ // Analysis provider
105
+ const analysisProvider = config.analysisProvider
106
+ ? this.createProvider(config.analysisProvider, config.models.analysis || config.models.default, config.analysisApiKey, this.getBaseUrlForProvider(config, config.analysisProvider))
107
+ : defaultProvider;
108
+ return {
109
+ planning: planningProvider,
110
+ execution: executionProvider,
111
+ analysis: analysisProvider,
112
+ };
113
+ }
114
+ /**
115
+ * Get base URL for provider
116
+ */
117
+ getBaseUrlForProvider(config, providerType) {
118
+ if (providerType === LLMProviderType.Ollama) {
119
+ return config.ollamaBaseUrl;
120
+ }
121
+ if (providerType === LLMProviderType.OpenAICompatible) {
122
+ return config.openaiBaseUrl;
123
+ }
124
+ return undefined;
125
+ }
126
+ /**
127
+ * Create ModelSelector from minimal config (backward compatible)
128
+ */
129
+ static fromSimpleConfig(config) {
130
+ return new ModelSelector({
131
+ enabled: config.enabled,
132
+ provider: config.provider,
133
+ apiKey: config.apiKey ?? "",
134
+ models: {
135
+ default: config.model || "gpt-4o-mini",
136
+ },
137
+ });
138
+ }
139
+ }
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Anthropic Claude LLM Provider
3
+ * Supports Claude models via Anthropic API
4
+ */
5
+ import { type LLMGenerationOptions, type LLMGenerationResult, type LLMMessage, LLMProvider } from "../base.js";
6
+ export interface AnthropicConfig {
7
+ apiKey: string;
8
+ model: string;
9
+ maxTokens?: number;
10
+ }
11
+ export declare class AnthropicProvider extends LLMProvider {
12
+ private config;
13
+ private readonly DEFAULT_MAX_TOKENS;
14
+ constructor(config: AnthropicConfig);
15
+ /**
16
+ * Generate text using Anthropic Claude API
17
+ */
18
+ generate(messages: LLMMessage[], options?: LLMGenerationOptions): Promise<LLMGenerationResult>;
19
+ /**
20
+ * Check if provider is configured
21
+ */
22
+ isConfigured(): boolean;
23
+ /**
24
+ * Create provider from environment variables
25
+ */
26
+ static fromEnv(config: {
27
+ apiKey?: string;
28
+ model?: string;
29
+ maxTokens?: number;
30
+ }): AnthropicProvider;
31
+ }