qa360 1.4.5 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/commands/ai.d.ts +41 -0
- package/dist/commands/ai.js +499 -0
- package/dist/commands/ask.js +12 -12
- package/dist/commands/coverage.d.ts +8 -0
- package/dist/commands/coverage.js +252 -0
- package/dist/commands/explain.d.ts +27 -0
- package/dist/commands/explain.js +630 -0
- package/dist/commands/flakiness.d.ts +73 -0
- package/dist/commands/flakiness.js +435 -0
- package/dist/commands/generate.d.ts +66 -0
- package/dist/commands/generate.js +438 -0
- package/dist/commands/init.d.ts +56 -9
- package/dist/commands/init.js +217 -10
- package/dist/commands/monitor.d.ts +27 -0
- package/dist/commands/monitor.js +225 -0
- package/dist/commands/ollama.d.ts +40 -0
- package/dist/commands/ollama.js +301 -0
- package/dist/commands/pack.d.ts +37 -9
- package/dist/commands/pack.js +240 -141
- package/dist/commands/regression.d.ts +8 -0
- package/dist/commands/regression.js +340 -0
- package/dist/commands/repair.d.ts +26 -0
- package/dist/commands/repair.js +307 -0
- package/dist/commands/retry.d.ts +43 -0
- package/dist/commands/retry.js +275 -0
- package/dist/commands/run.d.ts +8 -3
- package/dist/commands/run.js +45 -31
- package/dist/commands/slo.d.ts +8 -0
- package/dist/commands/slo.js +327 -0
- package/dist/core/adapters/playwright-native-api.d.ts +183 -0
- package/dist/core/adapters/playwright-native-api.js +461 -0
- package/dist/core/adapters/playwright-ui.d.ts +7 -0
- package/dist/core/adapters/playwright-ui.js +29 -1
- package/dist/core/ai/anthropic-provider.d.ts +50 -0
- package/dist/core/ai/anthropic-provider.js +211 -0
- package/dist/core/ai/deepseek-provider.d.ts +81 -0
- package/dist/core/ai/deepseek-provider.js +254 -0
- package/dist/core/ai/index.d.ts +60 -0
- package/dist/core/ai/index.js +18 -0
- package/dist/core/ai/llm-client.d.ts +45 -0
- package/dist/core/ai/llm-client.js +7 -0
- package/dist/core/ai/mock-provider.d.ts +49 -0
- package/dist/core/ai/mock-provider.js +121 -0
- package/dist/core/ai/ollama-provider.d.ts +78 -0
- package/dist/core/ai/ollama-provider.js +192 -0
- package/dist/core/ai/openai-provider.d.ts +48 -0
- package/dist/core/ai/openai-provider.js +188 -0
- package/dist/core/ai/provider-factory.d.ts +160 -0
- package/dist/core/ai/provider-factory.js +269 -0
- package/dist/core/auth/api-key-provider.d.ts +16 -0
- package/dist/core/auth/api-key-provider.js +63 -0
- package/dist/core/auth/aws-iam-provider.d.ts +35 -0
- package/dist/core/auth/aws-iam-provider.js +177 -0
- package/dist/core/auth/azure-ad-provider.d.ts +15 -0
- package/dist/core/auth/azure-ad-provider.js +99 -0
- package/dist/core/auth/basic-auth-provider.d.ts +26 -0
- package/dist/core/auth/basic-auth-provider.js +111 -0
- package/dist/core/auth/gcp-adc-provider.d.ts +27 -0
- package/dist/core/auth/gcp-adc-provider.js +126 -0
- package/dist/core/auth/index.d.ts +238 -0
- package/dist/core/auth/index.js +82 -0
- package/dist/core/auth/jwt-provider.d.ts +19 -0
- package/dist/core/auth/jwt-provider.js +160 -0
- package/dist/core/auth/manager.d.ts +84 -0
- package/dist/core/auth/manager.js +230 -0
- package/dist/core/auth/oauth2-provider.d.ts +17 -0
- package/dist/core/auth/oauth2-provider.js +114 -0
- package/dist/core/auth/totp-provider.d.ts +31 -0
- package/dist/core/auth/totp-provider.js +134 -0
- package/dist/core/auth/ui-login-provider.d.ts +26 -0
- package/dist/core/auth/ui-login-provider.js +198 -0
- package/dist/core/cache/index.d.ts +7 -0
- package/dist/core/cache/index.js +6 -0
- package/dist/core/cache/lru-cache.d.ts +203 -0
- package/dist/core/cache/lru-cache.js +397 -0
- package/dist/core/coverage/analyzer.d.ts +101 -0
- package/dist/core/coverage/analyzer.js +415 -0
- package/dist/core/coverage/collector.d.ts +74 -0
- package/dist/core/coverage/collector.js +459 -0
- package/dist/core/coverage/config.d.ts +37 -0
- package/dist/core/coverage/config.js +156 -0
- package/dist/core/coverage/index.d.ts +11 -0
- package/dist/core/coverage/index.js +15 -0
- package/dist/core/coverage/types.d.ts +267 -0
- package/dist/core/coverage/types.js +6 -0
- package/dist/core/coverage/vault.d.ts +95 -0
- package/dist/core/coverage/vault.js +405 -0
- package/dist/core/dashboard/assets.d.ts +6 -0
- package/dist/core/dashboard/assets.js +690 -0
- package/dist/core/dashboard/index.d.ts +6 -0
- package/dist/core/dashboard/index.js +5 -0
- package/dist/core/dashboard/server.d.ts +72 -0
- package/dist/core/dashboard/server.js +354 -0
- package/dist/core/dashboard/types.d.ts +70 -0
- package/dist/core/dashboard/types.js +5 -0
- package/dist/core/discoverer/index.d.ts +115 -0
- package/dist/core/discoverer/index.js +250 -0
- package/dist/core/flakiness/index.d.ts +228 -0
- package/dist/core/flakiness/index.js +384 -0
- package/dist/core/generation/code-formatter.d.ts +111 -0
- package/dist/core/generation/code-formatter.js +307 -0
- package/dist/core/generation/code-generator.d.ts +144 -0
- package/dist/core/generation/code-generator.js +293 -0
- package/dist/core/generation/generator.d.ts +40 -0
- package/dist/core/generation/generator.js +76 -0
- package/dist/core/generation/index.d.ts +30 -0
- package/dist/core/generation/index.js +28 -0
- package/dist/core/generation/pack-generator.d.ts +107 -0
- package/dist/core/generation/pack-generator.js +416 -0
- package/dist/core/generation/prompt-builder.d.ts +132 -0
- package/dist/core/generation/prompt-builder.js +672 -0
- package/dist/core/generation/source-analyzer.d.ts +213 -0
- package/dist/core/generation/source-analyzer.js +657 -0
- package/dist/core/generation/test-optimizer.d.ts +117 -0
- package/dist/core/generation/test-optimizer.js +328 -0
- package/dist/core/generation/types.d.ts +214 -0
- package/dist/core/generation/types.js +4 -0
- package/dist/core/index.d.ts +23 -1
- package/dist/core/index.js +39 -0
- package/dist/core/pack/validator.js +31 -1
- package/dist/core/pack-v2/index.d.ts +9 -0
- package/dist/core/pack-v2/index.js +8 -0
- package/dist/core/pack-v2/loader.d.ts +62 -0
- package/dist/core/pack-v2/loader.js +231 -0
- package/dist/core/pack-v2/migrator.d.ts +56 -0
- package/dist/core/pack-v2/migrator.js +455 -0
- package/dist/core/pack-v2/validator.d.ts +61 -0
- package/dist/core/pack-v2/validator.js +577 -0
- package/dist/core/regression/detector.d.ts +107 -0
- package/dist/core/regression/detector.js +497 -0
- package/dist/core/regression/index.d.ts +9 -0
- package/dist/core/regression/index.js +11 -0
- package/dist/core/regression/trend-analyzer.d.ts +102 -0
- package/dist/core/regression/trend-analyzer.js +345 -0
- package/dist/core/regression/types.d.ts +222 -0
- package/dist/core/regression/types.js +7 -0
- package/dist/core/regression/vault.d.ts +87 -0
- package/dist/core/regression/vault.js +289 -0
- package/dist/core/repair/engine/fixer.d.ts +24 -0
- package/dist/core/repair/engine/fixer.js +226 -0
- package/dist/core/repair/engine/suggestion-engine.d.ts +18 -0
- package/dist/core/repair/engine/suggestion-engine.js +187 -0
- package/dist/core/repair/index.d.ts +10 -0
- package/dist/core/repair/index.js +13 -0
- package/dist/core/repair/repairer.d.ts +90 -0
- package/dist/core/repair/repairer.js +284 -0
- package/dist/core/repair/types.d.ts +91 -0
- package/dist/core/repair/types.js +6 -0
- package/dist/core/repair/utils/error-analyzer.d.ts +28 -0
- package/dist/core/repair/utils/error-analyzer.js +264 -0
- package/dist/core/retry/flakiness-integration.d.ts +60 -0
- package/dist/core/retry/flakiness-integration.js +228 -0
- package/dist/core/retry/index.d.ts +14 -0
- package/dist/core/retry/index.js +16 -0
- package/dist/core/retry/retry-engine.d.ts +80 -0
- package/dist/core/retry/retry-engine.js +296 -0
- package/dist/core/retry/types.d.ts +178 -0
- package/dist/core/retry/types.js +52 -0
- package/dist/core/retry/vault.d.ts +77 -0
- package/dist/core/retry/vault.js +304 -0
- package/dist/core/runner/e2e-helpers.d.ts +102 -0
- package/dist/core/runner/e2e-helpers.js +153 -0
- package/dist/core/runner/phase3-runner.d.ts +101 -2
- package/dist/core/runner/phase3-runner.js +559 -24
- package/dist/core/self-healing/assertion-healer.d.ts +97 -0
- package/dist/core/self-healing/assertion-healer.js +371 -0
- package/dist/core/self-healing/engine.d.ts +122 -0
- package/dist/core/self-healing/engine.js +538 -0
- package/dist/core/self-healing/index.d.ts +10 -0
- package/dist/core/self-healing/index.js +11 -0
- package/dist/core/self-healing/selector-healer.d.ts +103 -0
- package/dist/core/self-healing/selector-healer.js +372 -0
- package/dist/core/self-healing/types.d.ts +152 -0
- package/dist/core/self-healing/types.js +6 -0
- package/dist/core/slo/config.d.ts +107 -0
- package/dist/core/slo/config.js +360 -0
- package/dist/core/slo/index.d.ts +11 -0
- package/dist/core/slo/index.js +15 -0
- package/dist/core/slo/sli-calculator.d.ts +92 -0
- package/dist/core/slo/sli-calculator.js +364 -0
- package/dist/core/slo/slo-tracker.d.ts +148 -0
- package/dist/core/slo/slo-tracker.js +379 -0
- package/dist/core/slo/types.d.ts +281 -0
- package/dist/core/slo/types.js +7 -0
- package/dist/core/slo/vault.d.ts +102 -0
- package/dist/core/slo/vault.js +427 -0
- package/dist/core/tui/index.d.ts +7 -0
- package/dist/core/tui/index.js +6 -0
- package/dist/core/tui/monitor.d.ts +92 -0
- package/dist/core/tui/monitor.js +271 -0
- package/dist/core/tui/renderer.d.ts +33 -0
- package/dist/core/tui/renderer.js +218 -0
- package/dist/core/tui/types.d.ts +63 -0
- package/dist/core/tui/types.js +5 -0
- package/dist/core/types/pack-v2.d.ts +425 -0
- package/dist/core/types/pack-v2.js +8 -0
- package/dist/core/vault/index.d.ts +116 -0
- package/dist/core/vault/index.js +400 -5
- package/dist/core/watch/index.d.ts +7 -0
- package/dist/core/watch/index.js +6 -0
- package/dist/core/watch/watch-mode.d.ts +213 -0
- package/dist/core/watch/watch-mode.js +389 -0
- package/dist/index.js +68 -68
- package/dist/utils/config.d.ts +5 -0
- package/dist/utils/config.js +136 -0
- package/package.json +5 -1
- package/dist/core/adapters/playwright-api.d.ts +0 -82
- package/dist/core/adapters/playwright-api.js +0 -264
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Provider for Anthropic API (https://api.anthropic.com).
|
|
5
|
+
* Supports Claude 3.5 Sonnet, Claude 3 Opus, and Claude 3 Haiku.
|
|
6
|
+
*
|
|
7
|
+
* Environment variables:
|
|
8
|
+
* - ANTHROPIC_API_KEY: Required API key
|
|
9
|
+
* - ANTHROPIC_BASE_URL: Optional base URL (default: https://api.anthropic.com)
|
|
10
|
+
* - ANTHROPIC_MODEL: Model to use (default: claude-3-5-sonnet-20241022)
|
|
11
|
+
*/
|
|
12
|
+
export class AnthropicProvider {
|
|
13
|
+
name = 'anthropic';
|
|
14
|
+
// Recommended models for code generation
|
|
15
|
+
models = [
|
|
16
|
+
'claude-3-5-sonnet-20241022', // Most capable for code
|
|
17
|
+
'claude-3-5-sonnet-20240620', // Previous Sonnet version
|
|
18
|
+
'claude-3-opus-20240229', // Most capable overall
|
|
19
|
+
'claude-3-haiku-20240307', // Fastest, cheapest
|
|
20
|
+
];
|
|
21
|
+
apiKey;
|
|
22
|
+
baseURL;
|
|
23
|
+
defaultModel;
|
|
24
|
+
timeout;
|
|
25
|
+
version = '2023-06-01'; // API version header
|
|
26
|
+
constructor(config = {}) {
|
|
27
|
+
this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY || '';
|
|
28
|
+
this.baseURL = config.baseURL || process.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com';
|
|
29
|
+
this.defaultModel = config.model || process.env.ANTHROPIC_MODEL || 'claude-3-5-sonnet-20241022';
|
|
30
|
+
this.timeout = config.timeout || 60000; // 1 minute default
|
|
31
|
+
}
|
|
32
|
+
async isAvailable() {
|
|
33
|
+
// Check if API key is present
|
|
34
|
+
if (!this.apiKey) {
|
|
35
|
+
return false;
|
|
36
|
+
}
|
|
37
|
+
// Try a minimal API call
|
|
38
|
+
try {
|
|
39
|
+
const response = await fetch(`${this.baseURL}/v1/messages`, {
|
|
40
|
+
method: 'POST',
|
|
41
|
+
headers: {
|
|
42
|
+
'x-api-key': this.apiKey,
|
|
43
|
+
'anthropic-version': this.version,
|
|
44
|
+
'content-type': 'application/json',
|
|
45
|
+
},
|
|
46
|
+
signal: AbortSignal.timeout(5000),
|
|
47
|
+
body: JSON.stringify({
|
|
48
|
+
model: this.defaultModel,
|
|
49
|
+
max_tokens: 1,
|
|
50
|
+
messages: [{ role: 'user', content: 'Hi' }]
|
|
51
|
+
})
|
|
52
|
+
});
|
|
53
|
+
// Anthropic may return 400 for malformed request but still be available
|
|
54
|
+
// We consider it available if we get any response (not a network error)
|
|
55
|
+
return response.status !== 401 && response.status !== 403;
|
|
56
|
+
}
|
|
57
|
+
catch {
|
|
58
|
+
return false;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
async generate(request) {
|
|
62
|
+
if (!this.apiKey) {
|
|
63
|
+
throw new AnthropicError('ANTHROPIC_API_KEY is required', {
|
|
64
|
+
suggestion: 'Set ANTHROPIC_API_KEY environment variable or pass apiKey in config'
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
const messages = this.buildMessages(request);
|
|
68
|
+
const response = await fetch(`${this.baseURL}/v1/messages`, {
|
|
69
|
+
method: 'POST',
|
|
70
|
+
headers: {
|
|
71
|
+
'x-api-key': this.apiKey,
|
|
72
|
+
'anthropic-version': this.version,
|
|
73
|
+
'content-type': 'application/json',
|
|
74
|
+
},
|
|
75
|
+
signal: AbortSignal.timeout(this.timeout),
|
|
76
|
+
body: JSON.stringify({
|
|
77
|
+
model: this.defaultModel,
|
|
78
|
+
messages,
|
|
79
|
+
system: request.systemPrompt,
|
|
80
|
+
temperature: request.temperature ?? 0.7,
|
|
81
|
+
max_tokens: request.maxTokens ?? 4096,
|
|
82
|
+
})
|
|
83
|
+
});
|
|
84
|
+
if (!response.ok) {
|
|
85
|
+
const error = await response.text().catch(() => 'Unknown error');
|
|
86
|
+
throw new AnthropicError(`Anthropic request failed: ${response.status} ${error}`, {
|
|
87
|
+
status: response.status,
|
|
88
|
+
response: error
|
|
89
|
+
});
|
|
90
|
+
}
|
|
91
|
+
const data = await response.json();
|
|
92
|
+
if (data.error) {
|
|
93
|
+
throw new AnthropicError(`Anthropic API error: ${data.error.message}`, {
|
|
94
|
+
type: data.error.type
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
const content = this.extractContent(data.content);
|
|
98
|
+
return {
|
|
99
|
+
content,
|
|
100
|
+
usage: {
|
|
101
|
+
promptTokens: data.usage?.input_tokens || 0,
|
|
102
|
+
completionTokens: data.usage?.output_tokens || 0,
|
|
103
|
+
totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
|
|
104
|
+
},
|
|
105
|
+
model: data.model,
|
|
106
|
+
finishReason: data.stop_reason === 'end_turn' ? 'stop' : 'length'
|
|
107
|
+
};
|
|
108
|
+
}
|
|
109
|
+
async *stream(request) {
|
|
110
|
+
if (!this.apiKey) {
|
|
111
|
+
throw new AnthropicError('ANTHROPIC_API_KEY is required');
|
|
112
|
+
}
|
|
113
|
+
const messages = this.buildMessages(request);
|
|
114
|
+
const response = await fetch(`${this.baseURL}/v1/messages`, {
|
|
115
|
+
method: 'POST',
|
|
116
|
+
headers: {
|
|
117
|
+
'x-api-key': this.apiKey,
|
|
118
|
+
'anthropic-version': this.version,
|
|
119
|
+
'content-type': 'application/json',
|
|
120
|
+
},
|
|
121
|
+
body: JSON.stringify({
|
|
122
|
+
model: this.defaultModel,
|
|
123
|
+
messages,
|
|
124
|
+
system: request.systemPrompt,
|
|
125
|
+
temperature: request.temperature ?? 0.7,
|
|
126
|
+
max_tokens: request.maxTokens ?? 4096,
|
|
127
|
+
stream: true,
|
|
128
|
+
})
|
|
129
|
+
});
|
|
130
|
+
if (!response.ok) {
|
|
131
|
+
throw new AnthropicError(`Anthropic stream failed: ${response.status}`);
|
|
132
|
+
}
|
|
133
|
+
const reader = response.body?.getReader();
|
|
134
|
+
if (!reader) {
|
|
135
|
+
throw new AnthropicError('No response body');
|
|
136
|
+
}
|
|
137
|
+
const decoder = new TextDecoder();
|
|
138
|
+
let buffer = '';
|
|
139
|
+
while (true) {
|
|
140
|
+
const { done, value } = await reader.read();
|
|
141
|
+
if (done)
|
|
142
|
+
break;
|
|
143
|
+
buffer += decoder.decode(value, { stream: true });
|
|
144
|
+
const lines = buffer.split('\n');
|
|
145
|
+
buffer = lines.pop() || '';
|
|
146
|
+
for (const line of lines) {
|
|
147
|
+
if (line.startsWith('data: ')) {
|
|
148
|
+
const data = line.slice(6);
|
|
149
|
+
if (data === '[DONE]') {
|
|
150
|
+
return;
|
|
151
|
+
}
|
|
152
|
+
try {
|
|
153
|
+
const parsed = JSON.parse(data);
|
|
154
|
+
if (parsed.type === 'content_block_delta') {
|
|
155
|
+
const content = parsed.delta?.text;
|
|
156
|
+
if (content) {
|
|
157
|
+
yield content;
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
catch {
|
|
162
|
+
// Ignore invalid JSON
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
countTokens(text) {
|
|
169
|
+
// Approximate token count (roughly 4 chars per token for English)
|
|
170
|
+
return Math.ceil(text.length / 4);
|
|
171
|
+
}
|
|
172
|
+
buildMessages(request) {
|
|
173
|
+
return [{ role: 'user', content: request.prompt }];
|
|
174
|
+
}
|
|
175
|
+
extractContent(content) {
|
|
176
|
+
// Anthropic returns content as an array of blocks
|
|
177
|
+
if (Array.isArray(content)) {
|
|
178
|
+
const textBlocks = content
|
|
179
|
+
.filter((block) => block?.type === 'text')
|
|
180
|
+
.map(block => block.text)
|
|
181
|
+
.join('');
|
|
182
|
+
return textBlocks;
|
|
183
|
+
}
|
|
184
|
+
return String(content ?? '');
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
/**
|
|
188
|
+
* Anthropic-specific error
|
|
189
|
+
*/
|
|
190
|
+
export class AnthropicError extends Error {
|
|
191
|
+
code = 'ANTHROPIC_ERROR';
|
|
192
|
+
details;
|
|
193
|
+
constructor(message, details) {
|
|
194
|
+
super(message);
|
|
195
|
+
this.name = 'AnthropicError';
|
|
196
|
+
this.details = details;
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
/**
|
|
200
|
+
* Create Anthropic provider with default settings
|
|
201
|
+
*/
|
|
202
|
+
export function createAnthropicProvider(config) {
|
|
203
|
+
return new AnthropicProvider(config);
|
|
204
|
+
}
|
|
205
|
+
/**
|
|
206
|
+
* Check if Anthropic is available and configured
|
|
207
|
+
*/
|
|
208
|
+
export async function checkAnthropicSetup() {
|
|
209
|
+
const provider = new AnthropicProvider();
|
|
210
|
+
return provider.isAvailable();
|
|
211
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DeepSeek LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Provider for DeepSeek API (https://api.deepseek.com).
|
|
5
|
+
* DeepSeek offers GPT-4 level performance at 95% lower cost.
|
|
6
|
+
*
|
|
7
|
+
* Environment variables:
|
|
8
|
+
* - DEEPSEEK_API_KEY: Required API key
|
|
9
|
+
* - DEEPSEEK_BASE_URL: Optional base URL (default: https://api.deepseek.com/v1)
|
|
10
|
+
* - DEEPSEEK_MODEL: Model to use (default: deepseek-chat)
|
|
11
|
+
*
|
|
12
|
+
* Pricing (as of 2025):
|
|
13
|
+
* - deepseek-chat: $0.14/M input tokens, $0.28/M output tokens
|
|
14
|
+
* - deepseek-coder: $0.14/M input tokens, $0.28/M output tokens
|
|
15
|
+
*
|
|
16
|
+
* Compared to GPT-4:
|
|
17
|
+
* - GPT-4: ~$30/M input, ~$60/M output
|
|
18
|
+
* - Savings: ~99.5% cheaper
|
|
19
|
+
*/
|
|
20
|
+
import type { LLMProvider, GenerationRequest, GenerationResponse } from './index.js';
|
|
21
|
+
export interface DeepSeekConfig {
|
|
22
|
+
apiKey?: string;
|
|
23
|
+
baseURL?: string;
|
|
24
|
+
model?: string;
|
|
25
|
+
timeout?: number;
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* DeepSeek provider for cost-effective AI inference
|
|
29
|
+
*
|
|
30
|
+
* DeepSeek V3 matches GPT-4 performance at a fraction of the cost.
|
|
31
|
+
* Perfect for test generation, code analysis, and quality assurance tasks.
|
|
32
|
+
*/
|
|
33
|
+
export declare class DeepSeekProvider implements LLMProvider {
|
|
34
|
+
name: string;
|
|
35
|
+
models: string[];
|
|
36
|
+
private readonly apiKey;
|
|
37
|
+
private readonly baseURL;
|
|
38
|
+
private readonly defaultModel;
|
|
39
|
+
private readonly timeout;
|
|
40
|
+
constructor(config?: DeepSeekConfig);
|
|
41
|
+
isAvailable(): Promise<boolean>;
|
|
42
|
+
generate(request: GenerationRequest): Promise<GenerationResponse>;
|
|
43
|
+
stream(request: GenerationRequest): AsyncIterable<string>;
|
|
44
|
+
countTokens(text: string): number;
|
|
45
|
+
private buildMessages;
|
|
46
|
+
/**
|
|
47
|
+
* Get cost estimate for a generation
|
|
48
|
+
* DeepSeek pricing: $0.14/M input, $0.28/M output
|
|
49
|
+
*/
|
|
50
|
+
estimateCost(inputTokens: number, outputTokens: number): number;
|
|
51
|
+
/**
|
|
52
|
+
* Compare cost to GPT-4
|
|
53
|
+
* GPT-4: ~$30/M input, ~$60/M output
|
|
54
|
+
*/
|
|
55
|
+
compareGPT4(inputTokens: number, outputTokens: number): {
|
|
56
|
+
deepseekCost: number;
|
|
57
|
+
gpt4Cost: number;
|
|
58
|
+
savings: number;
|
|
59
|
+
savingsPercent: number;
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* DeepSeek-specific error
|
|
64
|
+
*/
|
|
65
|
+
export declare class DeepSeekError extends Error {
|
|
66
|
+
code: string;
|
|
67
|
+
details?: Record<string, unknown>;
|
|
68
|
+
constructor(message: string, details?: Record<string, unknown>);
|
|
69
|
+
}
|
|
70
|
+
/**
|
|
71
|
+
* Create DeepSeek provider with default settings
|
|
72
|
+
*/
|
|
73
|
+
export declare function createDeepSeekProvider(config?: DeepSeekConfig): DeepSeekProvider;
|
|
74
|
+
/**
|
|
75
|
+
* Check if DeepSeek is available and configured
|
|
76
|
+
*/
|
|
77
|
+
export declare function checkDeepSeekSetup(): Promise<boolean>;
|
|
78
|
+
/**
|
|
79
|
+
* Get DeepSeek API key setup instructions
|
|
80
|
+
*/
|
|
81
|
+
export declare function getDeepSeekSetupInstructions(): string;
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DeepSeek LLM Provider
|
|
3
|
+
*
|
|
4
|
+
* Provider for DeepSeek API (https://api.deepseek.com).
|
|
5
|
+
* DeepSeek offers GPT-4 level performance at 95% lower cost.
|
|
6
|
+
*
|
|
7
|
+
* Environment variables:
|
|
8
|
+
* - DEEPSEEK_API_KEY: Required API key
|
|
9
|
+
* - DEEPSEEK_BASE_URL: Optional base URL (default: https://api.deepseek.com/v1)
|
|
10
|
+
* - DEEPSEEK_MODEL: Model to use (default: deepseek-chat)
|
|
11
|
+
*
|
|
12
|
+
* Pricing (as of 2025):
|
|
13
|
+
* - deepseek-chat: $0.14/M input tokens, $0.28/M output tokens
|
|
14
|
+
* - deepseek-coder: $0.14/M input tokens, $0.28/M output tokens
|
|
15
|
+
*
|
|
16
|
+
* Compared to GPT-4:
|
|
17
|
+
* - GPT-4: ~$30/M input, ~$60/M output
|
|
18
|
+
* - Savings: ~99.5% cheaper
|
|
19
|
+
*/
|
|
20
|
+
/**
|
|
21
|
+
* DeepSeek provider for cost-effective AI inference
|
|
22
|
+
*
|
|
23
|
+
* DeepSeek V3 matches GPT-4 performance at a fraction of the cost.
|
|
24
|
+
* Perfect for test generation, code analysis, and quality assurance tasks.
|
|
25
|
+
*/
|
|
26
|
+
export class DeepSeekProvider {
|
|
27
|
+
name = 'deepseek';
|
|
28
|
+
// Available DeepSeek models
|
|
29
|
+
models = [
|
|
30
|
+
'deepseek-chat', // General purpose, GPT-4 level
|
|
31
|
+
'deepseek-coder', // Specialized for code tasks
|
|
32
|
+
'deepseek-reasoner', // Enhanced reasoning capabilities
|
|
33
|
+
];
|
|
34
|
+
apiKey;
|
|
35
|
+
baseURL;
|
|
36
|
+
defaultModel;
|
|
37
|
+
timeout;
|
|
38
|
+
constructor(config = {}) {
|
|
39
|
+
this.apiKey = config.apiKey || process.env.DEEPSEEK_API_KEY || '';
|
|
40
|
+
this.baseURL = config.baseURL || process.env.DEEPSEEK_BASE_URL || 'https://api.deepseek.com';
|
|
41
|
+
this.defaultModel = config.model || process.env.DEEPSEEK_MODEL || 'deepseek-chat';
|
|
42
|
+
this.timeout = config.timeout || 60000; // 1 minute default
|
|
43
|
+
}
|
|
44
|
+
async isAvailable() {
|
|
45
|
+
// Check if API key is present
|
|
46
|
+
if (!this.apiKey) {
|
|
47
|
+
return false;
|
|
48
|
+
}
|
|
49
|
+
// Try a minimal API call
|
|
50
|
+
try {
|
|
51
|
+
const response = await fetch(`${this.baseURL}/v1/models`, {
|
|
52
|
+
method: 'GET',
|
|
53
|
+
headers: {
|
|
54
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
55
|
+
},
|
|
56
|
+
signal: AbortSignal.timeout(5000),
|
|
57
|
+
});
|
|
58
|
+
return response.ok;
|
|
59
|
+
}
|
|
60
|
+
catch {
|
|
61
|
+
return false;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
async generate(request) {
|
|
65
|
+
if (!this.apiKey) {
|
|
66
|
+
throw new DeepSeekError('DEEPSEEK_API_KEY is required', {
|
|
67
|
+
suggestion: 'Set DEEPSEEK_API_KEY environment variable or pass apiKey in config',
|
|
68
|
+
getApiKey: 'Get your API key at https://platform.deepseek.com/api_keys'
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
const messages = this.buildMessages(request);
|
|
72
|
+
const response = await fetch(`${this.baseURL}/v1/chat/completions`, {
|
|
73
|
+
method: 'POST',
|
|
74
|
+
headers: {
|
|
75
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
76
|
+
'Content-Type': 'application/json',
|
|
77
|
+
},
|
|
78
|
+
signal: AbortSignal.timeout(this.timeout),
|
|
79
|
+
body: JSON.stringify({
|
|
80
|
+
model: this.defaultModel,
|
|
81
|
+
messages,
|
|
82
|
+
temperature: request.temperature ?? 0.7,
|
|
83
|
+
max_tokens: request.maxTokens ?? 4096,
|
|
84
|
+
})
|
|
85
|
+
});
|
|
86
|
+
if (!response.ok) {
|
|
87
|
+
const errorText = await response.text().catch(() => 'Unknown error');
|
|
88
|
+
throw new DeepSeekError(`DeepSeek request failed: ${response.status} ${errorText}`, {
|
|
89
|
+
status: response.status,
|
|
90
|
+
response: errorText
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
const data = await response.json();
|
|
94
|
+
const choice = data.choices?.[0];
|
|
95
|
+
if (!choice) {
|
|
96
|
+
throw new DeepSeekError('No choices returned from DeepSeek');
|
|
97
|
+
}
|
|
98
|
+
return {
|
|
99
|
+
content: choice.message.content,
|
|
100
|
+
usage: {
|
|
101
|
+
promptTokens: data.usage?.prompt_tokens || 0,
|
|
102
|
+
completionTokens: data.usage?.completion_tokens || 0,
|
|
103
|
+
totalTokens: data.usage?.total_tokens || 0,
|
|
104
|
+
},
|
|
105
|
+
model: data.model,
|
|
106
|
+
finishReason: choice.finish_reason === 'stop' ? 'stop' : 'length'
|
|
107
|
+
};
|
|
108
|
+
}
|
|
109
|
+
async *stream(request) {
|
|
110
|
+
if (!this.apiKey) {
|
|
111
|
+
throw new DeepSeekError('DEEPSEEK_API_KEY is required');
|
|
112
|
+
}
|
|
113
|
+
const messages = this.buildMessages(request);
|
|
114
|
+
const response = await fetch(`${this.baseURL}/v1/chat/completions`, {
|
|
115
|
+
method: 'POST',
|
|
116
|
+
headers: {
|
|
117
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
118
|
+
'Content-Type': 'application/json',
|
|
119
|
+
},
|
|
120
|
+
body: JSON.stringify({
|
|
121
|
+
model: this.defaultModel,
|
|
122
|
+
messages,
|
|
123
|
+
temperature: request.temperature ?? 0.7,
|
|
124
|
+
max_tokens: request.maxTokens ?? 4096,
|
|
125
|
+
stream: true,
|
|
126
|
+
})
|
|
127
|
+
});
|
|
128
|
+
if (!response.ok) {
|
|
129
|
+
throw new DeepSeekError(`DeepSeek stream failed: ${response.status}`);
|
|
130
|
+
}
|
|
131
|
+
const reader = response.body?.getReader();
|
|
132
|
+
if (!reader) {
|
|
133
|
+
throw new DeepSeekError('No response body');
|
|
134
|
+
}
|
|
135
|
+
const decoder = new TextDecoder();
|
|
136
|
+
let buffer = '';
|
|
137
|
+
while (true) {
|
|
138
|
+
const { done, value } = await reader.read();
|
|
139
|
+
if (done)
|
|
140
|
+
break;
|
|
141
|
+
buffer += decoder.decode(value, { stream: true });
|
|
142
|
+
const lines = buffer.split('\n');
|
|
143
|
+
buffer = lines.pop() || '';
|
|
144
|
+
for (const line of lines) {
|
|
145
|
+
if (line.startsWith('data: ')) {
|
|
146
|
+
const data = line.slice(6);
|
|
147
|
+
if (data === '[DONE]') {
|
|
148
|
+
return;
|
|
149
|
+
}
|
|
150
|
+
try {
|
|
151
|
+
const parsed = JSON.parse(data);
|
|
152
|
+
const content = parsed.choices?.[0]?.delta?.content;
|
|
153
|
+
if (content) {
|
|
154
|
+
yield content;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
catch {
|
|
158
|
+
// Ignore invalid JSON
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
countTokens(text) {
|
|
165
|
+
// DeepSeek uses the same tokenizer as GPT-4
|
|
166
|
+
// Approximate token count (roughly 4 chars per token)
|
|
167
|
+
return Math.ceil(text.length / 4);
|
|
168
|
+
}
|
|
169
|
+
buildMessages(request) {
|
|
170
|
+
const messages = [];
|
|
171
|
+
if (request.systemPrompt) {
|
|
172
|
+
messages.push({ role: 'system', content: request.systemPrompt });
|
|
173
|
+
}
|
|
174
|
+
messages.push({ role: 'user', content: request.prompt });
|
|
175
|
+
return messages;
|
|
176
|
+
}
|
|
177
|
+
/**
|
|
178
|
+
* Get cost estimate for a generation
|
|
179
|
+
* DeepSeek pricing: $0.14/M input, $0.28/M output
|
|
180
|
+
*/
|
|
181
|
+
estimateCost(inputTokens, outputTokens) {
|
|
182
|
+
const inputCost = (inputTokens / 1_000_000) * 0.14;
|
|
183
|
+
const outputCost = (outputTokens / 1_000_000) * 0.28;
|
|
184
|
+
return inputCost + outputCost;
|
|
185
|
+
}
|
|
186
|
+
/**
|
|
187
|
+
* Compare cost to GPT-4
|
|
188
|
+
* GPT-4: ~$30/M input, ~$60/M output
|
|
189
|
+
*/
|
|
190
|
+
compareGPT4(inputTokens, outputTokens) {
|
|
191
|
+
const deepseekCost = this.estimateCost(inputTokens, outputTokens);
|
|
192
|
+
const gpt4Cost = (inputTokens / 1_000_000) * 30 + (outputTokens / 1_000_000) * 60;
|
|
193
|
+
const savings = gpt4Cost - deepseekCost;
|
|
194
|
+
const savingsPercent = (savings / gpt4Cost) * 100;
|
|
195
|
+
return {
|
|
196
|
+
deepseekCost,
|
|
197
|
+
gpt4Cost,
|
|
198
|
+
savings,
|
|
199
|
+
savingsPercent
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* DeepSeek-specific error
|
|
205
|
+
*/
|
|
206
|
+
export class DeepSeekError extends Error {
|
|
207
|
+
code = 'DEEPSEEK_ERROR';
|
|
208
|
+
details;
|
|
209
|
+
constructor(message, details) {
|
|
210
|
+
super(message);
|
|
211
|
+
this.name = 'DeepSeekError';
|
|
212
|
+
this.details = details;
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
/**
|
|
216
|
+
* Create DeepSeek provider with default settings
|
|
217
|
+
*/
|
|
218
|
+
export function createDeepSeekProvider(config) {
|
|
219
|
+
return new DeepSeekProvider(config);
|
|
220
|
+
}
|
|
221
|
+
/**
|
|
222
|
+
* Check if DeepSeek is available and configured
|
|
223
|
+
*/
|
|
224
|
+
export async function checkDeepSeekSetup() {
|
|
225
|
+
const provider = new DeepSeekProvider();
|
|
226
|
+
return provider.isAvailable();
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
229
|
+
* Get DeepSeek API key setup instructions
|
|
230
|
+
*/
|
|
231
|
+
export function getDeepSeekSetupInstructions() {
|
|
232
|
+
return `
|
|
233
|
+
DeepSeek API Setup:
|
|
234
|
+
|
|
235
|
+
1. Get your API key:
|
|
236
|
+
https://platform.deepseek.com/api_keys
|
|
237
|
+
|
|
238
|
+
2. Set environment variable:
|
|
239
|
+
export DEEPSEEK_API_KEY=your_api_key_here
|
|
240
|
+
|
|
241
|
+
3. Or configure in QA360:
|
|
242
|
+
qa360 secrets add DEEPSEEK_API_KEY
|
|
243
|
+
|
|
244
|
+
Available models:
|
|
245
|
+
- deepseek-chat (General purpose, GPT-4 level)
|
|
246
|
+
- deepseek-coder (Code specialized)
|
|
247
|
+
- deepseek-reasoner (Enhanced reasoning)
|
|
248
|
+
|
|
249
|
+
Pricing (as of 2025):
|
|
250
|
+
- Input: $0.14 per million tokens
|
|
251
|
+
- Output: $0.28 per million tokens
|
|
252
|
+
- ~99.5% cheaper than GPT-4
|
|
253
|
+
`;
|
|
254
|
+
}
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* QA360 AI Module - LLM Providers
|
|
3
|
+
*
|
|
4
|
+
* Multi-provider AI integration for test generation and analysis.
|
|
5
|
+
* Supports DeepSeek (best value), Ollama (local), OpenAI, Anthropic, and Mock providers.
|
|
6
|
+
*
|
|
7
|
+
* Default behavior: Automatically selects the first available provider
|
|
8
|
+
* using the factory pattern. DeepSeek is preferred for best value/cost ratio.
|
|
9
|
+
*/
|
|
10
|
+
export { OllamaProvider, createOllamaProvider, checkOllamaSetup, type OllamaConfig } from './ollama-provider.js';
|
|
11
|
+
export { OpenAIProvider, createOpenAIProvider, checkOpenAISetup, type OpenAIConfig, OpenAIError } from './openai-provider.js';
|
|
12
|
+
export { AnthropicProvider, createAnthropicProvider, checkAnthropicSetup, type AnthropicConfig, AnthropicError } from './anthropic-provider.js';
|
|
13
|
+
export { DeepSeekProvider, createDeepSeekProvider, checkDeepSeekSetup, getDeepSeekSetupInstructions, type DeepSeekConfig, DeepSeekError } from './deepseek-provider.js';
|
|
14
|
+
export { MockProvider, createMockProvider, createStandardMockProvider, MOCK_RESPONSES, type MockProviderConfig } from './mock-provider.js';
|
|
15
|
+
export { FallbackProvider, createLLMProvider, createBest, createResilientLLMProvider, getProviderStatus, getProviderInfo, detectBestProvider, hasAnyProvider, type ProviderFactoryConfig, type ProviderType, type ProviderSelection, type ProviderInfo, PROVIDER_CHAIN, } from './provider-factory.js';
|
|
16
|
+
/**
|
|
17
|
+
* Base interface for LLM providers
|
|
18
|
+
*/
|
|
19
|
+
export interface LLMProvider {
|
|
20
|
+
name: string;
|
|
21
|
+
models: string[];
|
|
22
|
+
generate(request: GenerationRequest): Promise<GenerationResponse>;
|
|
23
|
+
/**
|
|
24
|
+
* Streamed generation (optional)
|
|
25
|
+
*/
|
|
26
|
+
stream?(request: GenerationRequest): AsyncIterable<string>;
|
|
27
|
+
/**
|
|
28
|
+
* Count tokens in text (approximate)
|
|
29
|
+
*/
|
|
30
|
+
countTokens(text: string): number;
|
|
31
|
+
/**
|
|
32
|
+
* Check if provider is available
|
|
33
|
+
*/
|
|
34
|
+
isAvailable(): Promise<boolean>;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Request for LLM generation
|
|
38
|
+
*/
|
|
39
|
+
export interface GenerationRequest {
|
|
40
|
+
prompt: string;
|
|
41
|
+
systemPrompt?: string;
|
|
42
|
+
maxTokens?: number;
|
|
43
|
+
temperature?: number;
|
|
44
|
+
jsonMode?: boolean;
|
|
45
|
+
}
|
|
46
|
+
/**
|
|
47
|
+
* Response from LLM
|
|
48
|
+
*/
|
|
49
|
+
export interface GenerationResponse {
|
|
50
|
+
content: string;
|
|
51
|
+
usage: {
|
|
52
|
+
promptTokens: number;
|
|
53
|
+
completionTokens: number;
|
|
54
|
+
totalTokens: number;
|
|
55
|
+
};
|
|
56
|
+
model: string;
|
|
57
|
+
finishReason: 'stop' | 'length' | 'content_filter';
|
|
58
|
+
}
|
|
59
|
+
export type { ModelInfo, PullProgress } from './ollama-provider.js';
|
|
60
|
+
export { OllamaError } from './ollama-provider.js';
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* QA360 AI Module - LLM Providers
|
|
3
|
+
*
|
|
4
|
+
* Multi-provider AI integration for test generation and analysis.
|
|
5
|
+
* Supports DeepSeek (best value), Ollama (local), OpenAI, Anthropic, and Mock providers.
|
|
6
|
+
*
|
|
7
|
+
* Default behavior: Automatically selects the first available provider
|
|
8
|
+
* using the factory pattern. DeepSeek is preferred for best value/cost ratio.
|
|
9
|
+
*/
|
|
10
|
+
// Re-export all providers
|
|
11
|
+
export { OllamaProvider, createOllamaProvider, checkOllamaSetup } from './ollama-provider.js';
|
|
12
|
+
export { OpenAIProvider, createOpenAIProvider, checkOpenAISetup, OpenAIError } from './openai-provider.js';
|
|
13
|
+
export { AnthropicProvider, createAnthropicProvider, checkAnthropicSetup, AnthropicError } from './anthropic-provider.js';
|
|
14
|
+
export { DeepSeekProvider, createDeepSeekProvider, checkDeepSeekSetup, getDeepSeekSetupInstructions, DeepSeekError } from './deepseek-provider.js';
|
|
15
|
+
export { MockProvider, createMockProvider, createStandardMockProvider, MOCK_RESPONSES } from './mock-provider.js';
|
|
16
|
+
// Factory exports
|
|
17
|
+
export { FallbackProvider, createLLMProvider, createBest, createResilientLLMProvider, getProviderStatus, getProviderInfo, detectBestProvider, hasAnyProvider, PROVIDER_CHAIN, } from './provider-factory.js';
|
|
18
|
+
export { OllamaError } from './ollama-provider.js';
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Client Interface
|
|
3
|
+
*
|
|
4
|
+
* Abstract interface for LLM providers (Ollama, OpenAI, Anthropic, etc.).
|
|
5
|
+
* Allows switching between providers without changing application code.
|
|
6
|
+
*/
|
|
7
|
+
export interface LLMMessage {
|
|
8
|
+
role: 'system' | 'user' | 'assistant';
|
|
9
|
+
content: string;
|
|
10
|
+
}
|
|
11
|
+
export interface LLMResponse {
|
|
12
|
+
content: string;
|
|
13
|
+
model: string;
|
|
14
|
+
finish_reason?: 'stop' | 'length' | 'content_filter';
|
|
15
|
+
usage?: {
|
|
16
|
+
prompt_tokens: number;
|
|
17
|
+
completion_tokens: number;
|
|
18
|
+
total_tokens: number;
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
export interface LLMClientConfig {
|
|
22
|
+
apiKey?: string;
|
|
23
|
+
baseURL?: string;
|
|
24
|
+
model?: string;
|
|
25
|
+
timeout?: number;
|
|
26
|
+
maxRetries?: number;
|
|
27
|
+
}
|
|
28
|
+
export interface ILLMClient {
|
|
29
|
+
/**
|
|
30
|
+
* Provider name (ollama, openai, anthropic, mock)
|
|
31
|
+
*/
|
|
32
|
+
readonly provider: string;
|
|
33
|
+
/**
|
|
34
|
+
* Check if the provider is available and configured
|
|
35
|
+
*/
|
|
36
|
+
available(): Promise<boolean>;
|
|
37
|
+
/**
|
|
38
|
+
* Generate a completion from a prompt
|
|
39
|
+
*/
|
|
40
|
+
complete(prompt: string, options?: LLMClientConfig): Promise<LLMResponse>;
|
|
41
|
+
/**
|
|
42
|
+
* Generate a completion from a chat (messages)
|
|
43
|
+
*/
|
|
44
|
+
chat(messages: LLMMessage[], options?: LLMClientConfig): Promise<LLMResponse>;
|
|
45
|
+
}
|