@krr2020/taskflow-core 0.1.0-beta.3 → 0.1.0-beta.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/cli/index.js +42 -4
- package/dist/commands/base.d.ts +41 -0
- package/dist/commands/base.js +141 -0
- package/dist/commands/configure.d.ts +29 -0
- package/dist/commands/configure.js +187 -0
- package/dist/commands/init.js +21 -7
- package/dist/commands/prd/create.d.ts +1 -1
- package/dist/commands/prd/create.js +29 -11
- package/dist/commands/prd/generate-arch.d.ts +1 -1
- package/dist/commands/prd/generate-arch.js +6 -5
- package/dist/commands/retro/list.js +6 -5
- package/dist/commands/tasks/generate.d.ts +1 -1
- package/dist/commands/tasks/generate.js +83 -56
- package/dist/commands/upgrade.js +49 -16
- package/dist/commands/workflow/check.d.ts +17 -0
- package/dist/commands/workflow/check.js +482 -35
- package/dist/commands/workflow/commit.js +117 -60
- package/dist/commands/workflow/do.d.ts +1 -0
- package/dist/commands/workflow/do.js +206 -13
- package/dist/commands/workflow/next.js +4 -4
- package/dist/commands/workflow/resume.js +9 -6
- package/dist/commands/workflow/start.js +11 -11
- package/dist/index.d.ts +4 -0
- package/dist/index.js +6 -0
- package/dist/lib/config-paths.d.ts +15 -15
- package/dist/lib/config-paths.js +20 -15
- package/dist/lib/file-validator.d.ts +119 -0
- package/dist/lib/file-validator.js +291 -0
- package/dist/lib/git.js +4 -2
- package/dist/lib/log-parser.d.ts +91 -0
- package/dist/lib/log-parser.js +178 -0
- package/dist/lib/retrospective.d.ts +27 -0
- package/dist/lib/retrospective.js +111 -1
- package/dist/lib/types.d.ts +19 -6
- package/dist/lib/types.js +20 -1
- package/dist/lib/validation.d.ts +0 -3
- package/dist/lib/validation.js +1 -15
- package/dist/llm/base.d.ts +52 -0
- package/dist/llm/base.js +35 -0
- package/dist/llm/factory.d.ts +39 -0
- package/dist/llm/factory.js +102 -0
- package/dist/llm/index.d.ts +7 -0
- package/dist/llm/index.js +7 -0
- package/dist/llm/model-selector.d.ts +71 -0
- package/dist/llm/model-selector.js +139 -0
- package/dist/llm/providers/anthropic.d.ts +31 -0
- package/dist/llm/providers/anthropic.js +116 -0
- package/dist/llm/providers/index.d.ts +6 -0
- package/dist/llm/providers/index.js +6 -0
- package/dist/llm/providers/ollama.d.ts +28 -0
- package/dist/llm/providers/ollama.js +91 -0
- package/dist/llm/providers/openai-compatible.d.ts +30 -0
- package/dist/llm/providers/openai-compatible.js +93 -0
- package/dist/schemas/config.d.ts +82 -0
- package/dist/schemas/config.js +35 -0
- package/dist/schemas/task.d.ts +2 -2
- package/dist/state-machine.d.ts +12 -0
- package/dist/state-machine.js +2 -2
- package/package.json +1 -1
- package/dist/lib/package-manager.d.ts +0 -17
- package/dist/lib/package-manager.js +0 -53
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Provider Factory
|
|
3
|
+
* Factory for creating LLM providers and model selectors
|
|
4
|
+
*/
|
|
5
|
+
import { LLMProviderType, } from "./base.js";
|
|
6
|
+
import { ModelSelector } from "./model-selector.js";
|
|
7
|
+
import { AnthropicProvider } from "./providers/anthropic.js";
|
|
8
|
+
import { OllamaProvider } from "./providers/ollama.js";
|
|
9
|
+
import { OpenAICompatibleProvider } from "./providers/openai-compatible.js";
|
|
10
|
+
export { LLMProvider, LLMProviderType } from "./base.js";
|
|
11
|
+
export { ModelSelector } from "./model-selector.js";
|
|
12
|
+
export { AnthropicProvider, OllamaProvider, OpenAICompatibleProvider, } from "./providers/index.js";
|
|
13
|
+
/**
|
|
14
|
+
* Provider factory namespace
|
|
15
|
+
* Functions for creating LLM providers and model selectors
|
|
16
|
+
*/
|
|
17
|
+
export const ProviderFactory = {
|
|
18
|
+
/**
|
|
19
|
+
* Create a model selector from configuration
|
|
20
|
+
*/
|
|
21
|
+
createSelector(config) {
|
|
22
|
+
return new ModelSelector(config);
|
|
23
|
+
},
|
|
24
|
+
/**
|
|
25
|
+
* Create a single provider (backward compatible)
|
|
26
|
+
*/
|
|
27
|
+
createProvider(type, model, apiKey, baseUrl) {
|
|
28
|
+
switch (type) {
|
|
29
|
+
case LLMProviderType.OpenAICompatible: {
|
|
30
|
+
const config = { model };
|
|
31
|
+
if (apiKey)
|
|
32
|
+
config.apiKey = apiKey;
|
|
33
|
+
if (baseUrl)
|
|
34
|
+
config.baseUrl = baseUrl;
|
|
35
|
+
return OpenAICompatibleProvider.fromEnv(config);
|
|
36
|
+
}
|
|
37
|
+
case LLMProviderType.Anthropic: {
|
|
38
|
+
const config = { model };
|
|
39
|
+
if (apiKey)
|
|
40
|
+
config.apiKey = apiKey;
|
|
41
|
+
return AnthropicProvider.fromEnv(config);
|
|
42
|
+
}
|
|
43
|
+
case LLMProviderType.Ollama: {
|
|
44
|
+
const config = { model };
|
|
45
|
+
if (baseUrl)
|
|
46
|
+
config.baseUrl = baseUrl;
|
|
47
|
+
return OllamaProvider.fromEnv(config);
|
|
48
|
+
}
|
|
49
|
+
default:
|
|
50
|
+
throw new Error(`Unknown provider type: ${type}`);
|
|
51
|
+
}
|
|
52
|
+
},
|
|
53
|
+
/**
|
|
54
|
+
* Test if a provider is configured and working
|
|
55
|
+
*/
|
|
56
|
+
async testProvider(provider) {
|
|
57
|
+
try {
|
|
58
|
+
if (!provider.isConfigured()) {
|
|
59
|
+
return { success: false, error: "Provider not configured" };
|
|
60
|
+
}
|
|
61
|
+
// Simple test request
|
|
62
|
+
await provider.generate([
|
|
63
|
+
{
|
|
64
|
+
role: "user",
|
|
65
|
+
content: "Hello, please respond with just 'OK'.",
|
|
66
|
+
},
|
|
67
|
+
], { maxTokens: 10 });
|
|
68
|
+
return { success: true };
|
|
69
|
+
}
|
|
70
|
+
catch (error) {
|
|
71
|
+
return {
|
|
72
|
+
success: false,
|
|
73
|
+
error: error instanceof Error ? error.message : String(error),
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
},
|
|
77
|
+
/**
|
|
78
|
+
* Get available providers
|
|
79
|
+
*/
|
|
80
|
+
getAvailableProviders() {
|
|
81
|
+
return [
|
|
82
|
+
LLMProviderType.OpenAICompatible,
|
|
83
|
+
LLMProviderType.Anthropic,
|
|
84
|
+
LLMProviderType.Ollama,
|
|
85
|
+
];
|
|
86
|
+
},
|
|
87
|
+
/**
|
|
88
|
+
* Get default model for provider
|
|
89
|
+
*/
|
|
90
|
+
getDefaultModel(providerType) {
|
|
91
|
+
switch (providerType) {
|
|
92
|
+
case LLMProviderType.OpenAICompatible:
|
|
93
|
+
return "gpt-4o-mini";
|
|
94
|
+
case LLMProviderType.Anthropic:
|
|
95
|
+
return "claude-3-5-sonnet-20241022";
|
|
96
|
+
case LLMProviderType.Ollama:
|
|
97
|
+
return "llama2";
|
|
98
|
+
default:
|
|
99
|
+
throw new Error(`Unknown provider type: ${providerType}`);
|
|
100
|
+
}
|
|
101
|
+
},
|
|
102
|
+
};
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model Selector
|
|
3
|
+
* Selects the appropriate model for each phase (planning, execution, analysis)
|
|
4
|
+
*/
|
|
5
|
+
import { type LLMProvider, LLMProviderType, Phase } from "./base.js";
|
|
6
|
+
export interface ModelSelection {
|
|
7
|
+
planning: LLMProvider;
|
|
8
|
+
execution: LLMProvider;
|
|
9
|
+
analysis: LLMProvider;
|
|
10
|
+
}
|
|
11
|
+
export interface AIConfig {
|
|
12
|
+
enabled: boolean;
|
|
13
|
+
provider: LLMProviderType;
|
|
14
|
+
apiKey?: string;
|
|
15
|
+
models: {
|
|
16
|
+
default: string;
|
|
17
|
+
planning?: string;
|
|
18
|
+
execution?: string;
|
|
19
|
+
analysis?: string;
|
|
20
|
+
};
|
|
21
|
+
planningProvider?: LLMProviderType;
|
|
22
|
+
planningApiKey?: string;
|
|
23
|
+
executionProvider?: LLMProviderType;
|
|
24
|
+
executionApiKey?: string;
|
|
25
|
+
analysisProvider?: LLMProviderType;
|
|
26
|
+
analysisApiKey?: string;
|
|
27
|
+
ollamaBaseUrl?: string;
|
|
28
|
+
openaiBaseUrl?: string;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Model Selector class
|
|
32
|
+
* Manages per-phase model selection and provider instantiation
|
|
33
|
+
*/
|
|
34
|
+
export declare class ModelSelector {
|
|
35
|
+
private selection;
|
|
36
|
+
private modelNames;
|
|
37
|
+
constructor(config: AIConfig);
|
|
38
|
+
/**
|
|
39
|
+
* Get the provider for a specific phase
|
|
40
|
+
*/
|
|
41
|
+
getProvider(phase: Phase): LLMProvider;
|
|
42
|
+
/**
|
|
43
|
+
* Get the model name for a specific phase
|
|
44
|
+
*/
|
|
45
|
+
getModelName(phase: Phase): string;
|
|
46
|
+
/**
|
|
47
|
+
* Check if any provider is configured
|
|
48
|
+
*/
|
|
49
|
+
isConfigured(): boolean;
|
|
50
|
+
/**
|
|
51
|
+
* Create provider from configuration
|
|
52
|
+
*/
|
|
53
|
+
private createProvider;
|
|
54
|
+
/**
|
|
55
|
+
* Create model selection from configuration
|
|
56
|
+
*/
|
|
57
|
+
private createSelection;
|
|
58
|
+
/**
|
|
59
|
+
* Get base URL for provider
|
|
60
|
+
*/
|
|
61
|
+
private getBaseUrlForProvider;
|
|
62
|
+
/**
|
|
63
|
+
* Create ModelSelector from minimal config (backward compatible)
|
|
64
|
+
*/
|
|
65
|
+
static fromSimpleConfig(config: {
|
|
66
|
+
enabled: boolean;
|
|
67
|
+
provider: LLMProviderType;
|
|
68
|
+
apiKey?: string;
|
|
69
|
+
model?: string;
|
|
70
|
+
}): ModelSelector;
|
|
71
|
+
}
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model Selector
|
|
3
|
+
* Selects the appropriate model for each phase (planning, execution, analysis)
|
|
4
|
+
*/
|
|
5
|
+
import { LLMProviderType, Phase } from "./base.js";
|
|
6
|
+
import { AnthropicProvider } from "./providers/anthropic.js";
|
|
7
|
+
import { OllamaProvider } from "./providers/ollama.js";
|
|
8
|
+
import { OpenAICompatibleProvider } from "./providers/openai-compatible.js";
|
|
9
|
+
/**
|
|
10
|
+
* Model Selector class
|
|
11
|
+
* Manages per-phase model selection and provider instantiation
|
|
12
|
+
*/
|
|
13
|
+
export class ModelSelector {
|
|
14
|
+
selection;
|
|
15
|
+
modelNames;
|
|
16
|
+
constructor(config) {
|
|
17
|
+
this.selection = this.createSelection(config);
|
|
18
|
+
this.modelNames = {
|
|
19
|
+
planning: config.models.planning || config.models.default,
|
|
20
|
+
execution: config.models.execution || config.models.default,
|
|
21
|
+
analysis: config.models.analysis || config.models.default,
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Get the provider for a specific phase
|
|
26
|
+
*/
|
|
27
|
+
getProvider(phase) {
|
|
28
|
+
switch (phase) {
|
|
29
|
+
case Phase.Planning:
|
|
30
|
+
return this.selection.planning;
|
|
31
|
+
case Phase.Execution:
|
|
32
|
+
return this.selection.execution;
|
|
33
|
+
case Phase.Analysis:
|
|
34
|
+
return this.selection.analysis;
|
|
35
|
+
default:
|
|
36
|
+
return this.selection.planning;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Get the model name for a specific phase
|
|
41
|
+
*/
|
|
42
|
+
getModelName(phase) {
|
|
43
|
+
switch (phase) {
|
|
44
|
+
case Phase.Planning:
|
|
45
|
+
return this.modelNames.planning;
|
|
46
|
+
case Phase.Execution:
|
|
47
|
+
return this.modelNames.execution;
|
|
48
|
+
case Phase.Analysis:
|
|
49
|
+
return this.modelNames.analysis;
|
|
50
|
+
default:
|
|
51
|
+
return this.modelNames.planning;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Check if any provider is configured
|
|
56
|
+
*/
|
|
57
|
+
isConfigured() {
|
|
58
|
+
return (this.selection.planning.isConfigured() ||
|
|
59
|
+
this.selection.execution.isConfigured() ||
|
|
60
|
+
this.selection.analysis.isConfigured());
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Create provider from configuration
|
|
64
|
+
*/
|
|
65
|
+
createProvider(providerType, model, apiKey, baseUrl) {
|
|
66
|
+
switch (providerType) {
|
|
67
|
+
case LLMProviderType.OpenAICompatible: {
|
|
68
|
+
const config = { model };
|
|
69
|
+
if (apiKey)
|
|
70
|
+
config.apiKey = apiKey;
|
|
71
|
+
if (baseUrl)
|
|
72
|
+
config.baseUrl = baseUrl;
|
|
73
|
+
return OpenAICompatibleProvider.fromEnv(config);
|
|
74
|
+
}
|
|
75
|
+
case LLMProviderType.Anthropic: {
|
|
76
|
+
const config = { model };
|
|
77
|
+
if (apiKey)
|
|
78
|
+
config.apiKey = apiKey;
|
|
79
|
+
return AnthropicProvider.fromEnv(config);
|
|
80
|
+
}
|
|
81
|
+
case LLMProviderType.Ollama: {
|
|
82
|
+
const config = { model };
|
|
83
|
+
if (baseUrl)
|
|
84
|
+
config.baseUrl = baseUrl;
|
|
85
|
+
return OllamaProvider.fromEnv(config);
|
|
86
|
+
}
|
|
87
|
+
default:
|
|
88
|
+
throw new Error(`Unknown provider type: ${providerType}`);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Create model selection from configuration
|
|
93
|
+
*/
|
|
94
|
+
createSelection(config) {
|
|
95
|
+
const defaultProvider = this.createProvider(config.provider, config.models.default, config.apiKey, this.getBaseUrlForProvider(config, config.provider));
|
|
96
|
+
// Planning provider
|
|
97
|
+
const planningProvider = config.planningProvider
|
|
98
|
+
? this.createProvider(config.planningProvider, config.models.planning || config.models.default, config.planningApiKey, this.getBaseUrlForProvider(config, config.planningProvider))
|
|
99
|
+
: defaultProvider;
|
|
100
|
+
// Execution provider
|
|
101
|
+
const executionProvider = config.executionProvider
|
|
102
|
+
? this.createProvider(config.executionProvider, config.models.execution || config.models.default, config.executionApiKey, this.getBaseUrlForProvider(config, config.executionProvider))
|
|
103
|
+
: defaultProvider;
|
|
104
|
+
// Analysis provider
|
|
105
|
+
const analysisProvider = config.analysisProvider
|
|
106
|
+
? this.createProvider(config.analysisProvider, config.models.analysis || config.models.default, config.analysisApiKey, this.getBaseUrlForProvider(config, config.analysisProvider))
|
|
107
|
+
: defaultProvider;
|
|
108
|
+
return {
|
|
109
|
+
planning: planningProvider,
|
|
110
|
+
execution: executionProvider,
|
|
111
|
+
analysis: analysisProvider,
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Get base URL for provider
|
|
116
|
+
*/
|
|
117
|
+
getBaseUrlForProvider(config, providerType) {
|
|
118
|
+
if (providerType === LLMProviderType.Ollama) {
|
|
119
|
+
return config.ollamaBaseUrl;
|
|
120
|
+
}
|
|
121
|
+
if (providerType === LLMProviderType.OpenAICompatible) {
|
|
122
|
+
return config.openaiBaseUrl;
|
|
123
|
+
}
|
|
124
|
+
return undefined;
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* Create ModelSelector from minimal config (backward compatible)
|
|
128
|
+
*/
|
|
129
|
+
static fromSimpleConfig(config) {
|
|
130
|
+
return new ModelSelector({
|
|
131
|
+
enabled: config.enabled,
|
|
132
|
+
provider: config.provider,
|
|
133
|
+
apiKey: config.apiKey ?? "",
|
|
134
|
+
models: {
|
|
135
|
+
default: config.model || "gpt-4o-mini",
|
|
136
|
+
},
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic Claude LLM Provider
|
|
3
|
+
* Supports Claude models via Anthropic API
|
|
4
|
+
*/
|
|
5
|
+
import { type LLMGenerationOptions, type LLMGenerationResult, type LLMMessage, LLMProvider } from "../base.js";
|
|
6
|
+
export interface AnthropicConfig {
|
|
7
|
+
apiKey: string;
|
|
8
|
+
model: string;
|
|
9
|
+
maxTokens?: number;
|
|
10
|
+
}
|
|
11
|
+
export declare class AnthropicProvider extends LLMProvider {
|
|
12
|
+
private config;
|
|
13
|
+
private readonly DEFAULT_MAX_TOKENS;
|
|
14
|
+
constructor(config: AnthropicConfig);
|
|
15
|
+
/**
|
|
16
|
+
* Generate text using Anthropic Claude API
|
|
17
|
+
*/
|
|
18
|
+
generate(messages: LLMMessage[], options?: LLMGenerationOptions): Promise<LLMGenerationResult>;
|
|
19
|
+
/**
|
|
20
|
+
* Check if provider is configured
|
|
21
|
+
*/
|
|
22
|
+
isConfigured(): boolean;
|
|
23
|
+
/**
|
|
24
|
+
* Create provider from environment variables
|
|
25
|
+
*/
|
|
26
|
+
static fromEnv(config: {
|
|
27
|
+
apiKey?: string;
|
|
28
|
+
model?: string;
|
|
29
|
+
maxTokens?: number;
|
|
30
|
+
}): AnthropicProvider;
|
|
31
|
+
}
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic Claude LLM Provider
|
|
3
|
+
* Supports Claude models via Anthropic API
|
|
4
|
+
*/
|
|
5
|
+
import { LLMProvider, LLMProviderType, } from "../base.js";
|
|
6
|
+
export class AnthropicProvider extends LLMProvider {
|
|
7
|
+
config;
|
|
8
|
+
DEFAULT_MAX_TOKENS = 4096;
|
|
9
|
+
constructor(config) {
|
|
10
|
+
super(LLMProviderType.Anthropic, config.model);
|
|
11
|
+
this.config = config;
|
|
12
|
+
this.config.maxTokens = config.maxTokens || this.DEFAULT_MAX_TOKENS;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Generate text using Anthropic Claude API
|
|
16
|
+
*/
|
|
17
|
+
async generate(messages, options) {
|
|
18
|
+
if (!this.isConfigured()) {
|
|
19
|
+
throw new Error("Anthropic provider is not configured properly");
|
|
20
|
+
}
|
|
21
|
+
// Extract system message (Anthropic separates system message)
|
|
22
|
+
let systemMessage = "";
|
|
23
|
+
const apiMessages = [];
|
|
24
|
+
for (const msg of messages) {
|
|
25
|
+
if (msg.role === "system") {
|
|
26
|
+
systemMessage = msg.content;
|
|
27
|
+
}
|
|
28
|
+
else {
|
|
29
|
+
apiMessages.push(msg);
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
const requestBody = {
|
|
33
|
+
model: this.config.model,
|
|
34
|
+
messages: apiMessages,
|
|
35
|
+
max_tokens: options?.maxTokens || this.config.maxTokens,
|
|
36
|
+
stream: false,
|
|
37
|
+
};
|
|
38
|
+
if (systemMessage) {
|
|
39
|
+
requestBody.system = systemMessage;
|
|
40
|
+
}
|
|
41
|
+
if (options?.temperature !== undefined) {
|
|
42
|
+
requestBody.temperature = options.temperature;
|
|
43
|
+
}
|
|
44
|
+
if (options?.topP !== undefined) {
|
|
45
|
+
requestBody.top_p = options.topP;
|
|
46
|
+
}
|
|
47
|
+
if (options?.topK !== undefined) {
|
|
48
|
+
requestBody.top_k = options.topK;
|
|
49
|
+
}
|
|
50
|
+
const response = await fetch("https://api.anthropic.com/v1/messages", {
|
|
51
|
+
method: "POST",
|
|
52
|
+
headers: {
|
|
53
|
+
"Content-Type": "application/json",
|
|
54
|
+
"x-api-key": this.config.apiKey,
|
|
55
|
+
"anthropic-version": "2023-06-01",
|
|
56
|
+
},
|
|
57
|
+
body: JSON.stringify(requestBody),
|
|
58
|
+
});
|
|
59
|
+
if (!response.ok) {
|
|
60
|
+
const error = await response.text();
|
|
61
|
+
throw new Error(`Anthropic API error: ${response.status} - ${error}`);
|
|
62
|
+
}
|
|
63
|
+
const data = (await response.json());
|
|
64
|
+
const contentItem = data.content[0];
|
|
65
|
+
if (!contentItem) {
|
|
66
|
+
throw new Error("No content returned from Anthropic API");
|
|
67
|
+
}
|
|
68
|
+
return {
|
|
69
|
+
content: contentItem.text,
|
|
70
|
+
model: data.model,
|
|
71
|
+
tokensUsed: (data.usage?.input_tokens ?? 0) + (data.usage?.output_tokens ?? 0),
|
|
72
|
+
finishReason: data.stop_reason,
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Check if provider is configured
|
|
77
|
+
*/
|
|
78
|
+
isConfigured() {
|
|
79
|
+
return !!this.config.apiKey;
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Create provider from environment variables
|
|
83
|
+
*/
|
|
84
|
+
static fromEnv(config) {
|
|
85
|
+
const apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY || "";
|
|
86
|
+
const model = config.model ||
|
|
87
|
+
process.env.ANTHROPIC_MODEL ||
|
|
88
|
+
"claude-3-5-sonnet-20241022";
|
|
89
|
+
const maxTokensValue = config.maxTokens ||
|
|
90
|
+
Number.parseInt(process.env.ANTHROPIC_MAX_TOKENS || "4096", 10);
|
|
91
|
+
if (!apiKey) {
|
|
92
|
+
console.warn("Warning: Anthropic provider missing API key");
|
|
93
|
+
}
|
|
94
|
+
const maxTokens = Number.isNaN(maxTokensValue) ? undefined : maxTokensValue;
|
|
95
|
+
return new AnthropicProvider({
|
|
96
|
+
apiKey: expandEnvVar(apiKey),
|
|
97
|
+
model,
|
|
98
|
+
maxTokens: maxTokens ?? 4096,
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
/**
|
|
103
|
+
* Expand environment variable in string (e.g., "${VAR_NAME}" -> actual value)
|
|
104
|
+
*/
|
|
105
|
+
function expandEnvVar(value) {
|
|
106
|
+
if (!value) {
|
|
107
|
+
return value;
|
|
108
|
+
}
|
|
109
|
+
const envVarMatch = value.match(/^\$\{([^}]+)\}$/);
|
|
110
|
+
if (envVarMatch?.[1]) {
|
|
111
|
+
const envVar = envVarMatch[1];
|
|
112
|
+
const envValue = process.env[envVar];
|
|
113
|
+
return envValue ?? value;
|
|
114
|
+
}
|
|
115
|
+
return value;
|
|
116
|
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider exports
|
|
3
|
+
*/
|
|
4
|
+
export { type AnthropicConfig, AnthropicProvider } from "./anthropic.js";
|
|
5
|
+
export { type OllamaConfig, OllamaProvider } from "./ollama.js";
|
|
6
|
+
export { type OpenAICompatibleConfig, OpenAICompatibleProvider, } from "./openai-compatible.js";
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama LLM Provider
|
|
3
|
+
* Supports local LLM models via Ollama API
|
|
4
|
+
*/
|
|
5
|
+
import { type LLMGenerationOptions, type LLMGenerationResult, type LLMMessage, LLMProvider } from "../base.js";
|
|
6
|
+
export interface OllamaConfig {
|
|
7
|
+
baseUrl: string;
|
|
8
|
+
model: string;
|
|
9
|
+
}
|
|
10
|
+
export declare class OllamaProvider extends LLMProvider {
|
|
11
|
+
private config;
|
|
12
|
+
constructor(config: OllamaConfig);
|
|
13
|
+
/**
|
|
14
|
+
* Generate text using Ollama API
|
|
15
|
+
*/
|
|
16
|
+
generate(messages: LLMMessage[], options?: LLMGenerationOptions): Promise<LLMGenerationResult>;
|
|
17
|
+
/**
|
|
18
|
+
* Check if provider is configured
|
|
19
|
+
*/
|
|
20
|
+
isConfigured(): boolean;
|
|
21
|
+
/**
|
|
22
|
+
* Create provider from environment variables
|
|
23
|
+
*/
|
|
24
|
+
static fromEnv(config: {
|
|
25
|
+
baseUrl?: string;
|
|
26
|
+
model?: string;
|
|
27
|
+
}): OllamaProvider;
|
|
28
|
+
}
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama LLM Provider
|
|
3
|
+
* Supports local LLM models via Ollama API
|
|
4
|
+
*/
|
|
5
|
+
import { LLMProvider, LLMProviderType, } from "../base.js";
|
|
6
|
+
export class OllamaProvider extends LLMProvider {
|
|
7
|
+
config;
|
|
8
|
+
constructor(config) {
|
|
9
|
+
super(LLMProviderType.Ollama, config.model);
|
|
10
|
+
this.config = config;
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Generate text using Ollama API
|
|
14
|
+
*/
|
|
15
|
+
async generate(messages, options) {
|
|
16
|
+
if (!this.isConfigured()) {
|
|
17
|
+
throw new Error("Ollama provider is not configured properly");
|
|
18
|
+
}
|
|
19
|
+
// Convert messages to Ollama format
|
|
20
|
+
const prompt = messages
|
|
21
|
+
.map((msg) => `${msg.role}: ${msg.content}`)
|
|
22
|
+
.join("\n\n");
|
|
23
|
+
const requestBody = {
|
|
24
|
+
model: this.config.model,
|
|
25
|
+
prompt,
|
|
26
|
+
stream: false,
|
|
27
|
+
};
|
|
28
|
+
if (options?.temperature !== undefined) {
|
|
29
|
+
requestBody.temperature = options.temperature;
|
|
30
|
+
}
|
|
31
|
+
if (options?.topP !== undefined) {
|
|
32
|
+
requestBody.top_p = options.topP;
|
|
33
|
+
}
|
|
34
|
+
if (options?.topK !== undefined) {
|
|
35
|
+
requestBody.top_k = options.topK;
|
|
36
|
+
}
|
|
37
|
+
if (options?.maxTokens) {
|
|
38
|
+
requestBody.num_predict = options.maxTokens;
|
|
39
|
+
}
|
|
40
|
+
const response = await fetch(`${this.config.baseUrl}/api/generate`, {
|
|
41
|
+
method: "POST",
|
|
42
|
+
headers: {
|
|
43
|
+
"Content-Type": "application/json",
|
|
44
|
+
},
|
|
45
|
+
body: JSON.stringify(requestBody),
|
|
46
|
+
});
|
|
47
|
+
if (!response.ok) {
|
|
48
|
+
const error = await response.text();
|
|
49
|
+
throw new Error(`Ollama API error: ${response.status} - ${error}`);
|
|
50
|
+
}
|
|
51
|
+
const data = (await response.json());
|
|
52
|
+
return {
|
|
53
|
+
content: data.response,
|
|
54
|
+
model: data.model,
|
|
55
|
+
tokensUsed: data.eval_count,
|
|
56
|
+
finishReason: data.done ? "stop" : "length",
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
/**
|
|
60
|
+
* Check if provider is configured
|
|
61
|
+
*/
|
|
62
|
+
isConfigured() {
|
|
63
|
+
return !!(this.config.baseUrl && this.config.model);
|
|
64
|
+
}
|
|
65
|
+
/**
|
|
66
|
+
* Create provider from environment variables
|
|
67
|
+
*/
|
|
68
|
+
static fromEnv(config) {
|
|
69
|
+
const baseUrl = config.baseUrl || process.env.OLLAMA_BASE_URL || "http://localhost:11434";
|
|
70
|
+
const model = config.model || process.env.OLLAMA_MODEL || "llama2";
|
|
71
|
+
return new OllamaProvider({
|
|
72
|
+
baseUrl: expandEnvVar(baseUrl),
|
|
73
|
+
model: expandEnvVar(model),
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Expand environment variable in string (e.g., "${VAR_NAME}" -> actual value)
|
|
79
|
+
*/
|
|
80
|
+
function expandEnvVar(value) {
|
|
81
|
+
if (!value) {
|
|
82
|
+
return value;
|
|
83
|
+
}
|
|
84
|
+
const envVarMatch = value.match(/^\$\{([^}]+)\}$/);
|
|
85
|
+
if (envVarMatch?.[1]) {
|
|
86
|
+
const envVar = envVarMatch[1];
|
|
87
|
+
const envValue = process.env[envVar];
|
|
88
|
+
return envValue ?? value;
|
|
89
|
+
}
|
|
90
|
+
return value;
|
|
91
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI-compatible LLM Provider
|
|
3
|
+
* Supports: OpenAI, Azure OpenAI, Together AI, Groq, DeepSeek, and any OpenAI-compatible API
|
|
4
|
+
*/
|
|
5
|
+
import { type LLMGenerationOptions, type LLMGenerationResult, type LLMMessage, LLMProvider } from "../base.js";
|
|
6
|
+
export interface OpenAICompatibleConfig {
|
|
7
|
+
baseUrl: string;
|
|
8
|
+
apiKey: string;
|
|
9
|
+
model: string;
|
|
10
|
+
}
|
|
11
|
+
export declare class OpenAICompatibleProvider extends LLMProvider {
|
|
12
|
+
private config;
|
|
13
|
+
constructor(config: OpenAICompatibleConfig);
|
|
14
|
+
/**
|
|
15
|
+
* Generate text using OpenAI-compatible API
|
|
16
|
+
*/
|
|
17
|
+
generate(messages: LLMMessage[], options?: LLMGenerationOptions): Promise<LLMGenerationResult>;
|
|
18
|
+
/**
|
|
19
|
+
* Check if provider is configured
|
|
20
|
+
*/
|
|
21
|
+
isConfigured(): boolean;
|
|
22
|
+
/**
|
|
23
|
+
* Create provider from environment variables
|
|
24
|
+
*/
|
|
25
|
+
static fromEnv(config: {
|
|
26
|
+
baseUrl?: string;
|
|
27
|
+
apiKey?: string;
|
|
28
|
+
model?: string;
|
|
29
|
+
}): OpenAICompatibleProvider;
|
|
30
|
+
}
|