qa360 1.4.5 → 2.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (209) hide show
  1. package/README.md +1 -1
  2. package/dist/commands/ai.d.ts +41 -0
  3. package/dist/commands/ai.js +499 -0
  4. package/dist/commands/ask.js +12 -12
  5. package/dist/commands/coverage.d.ts +8 -0
  6. package/dist/commands/coverage.js +252 -0
  7. package/dist/commands/explain.d.ts +27 -0
  8. package/dist/commands/explain.js +630 -0
  9. package/dist/commands/flakiness.d.ts +73 -0
  10. package/dist/commands/flakiness.js +435 -0
  11. package/dist/commands/generate.d.ts +66 -0
  12. package/dist/commands/generate.js +438 -0
  13. package/dist/commands/init.d.ts +56 -9
  14. package/dist/commands/init.js +217 -10
  15. package/dist/commands/monitor.d.ts +27 -0
  16. package/dist/commands/monitor.js +225 -0
  17. package/dist/commands/ollama.d.ts +40 -0
  18. package/dist/commands/ollama.js +301 -0
  19. package/dist/commands/pack.d.ts +37 -9
  20. package/dist/commands/pack.js +240 -141
  21. package/dist/commands/regression.d.ts +8 -0
  22. package/dist/commands/regression.js +340 -0
  23. package/dist/commands/repair.d.ts +26 -0
  24. package/dist/commands/repair.js +307 -0
  25. package/dist/commands/retry.d.ts +43 -0
  26. package/dist/commands/retry.js +275 -0
  27. package/dist/commands/run.d.ts +8 -3
  28. package/dist/commands/run.js +45 -31
  29. package/dist/commands/slo.d.ts +8 -0
  30. package/dist/commands/slo.js +327 -0
  31. package/dist/core/adapters/playwright-native-api.d.ts +183 -0
  32. package/dist/core/adapters/playwright-native-api.js +461 -0
  33. package/dist/core/adapters/playwright-ui.d.ts +7 -0
  34. package/dist/core/adapters/playwright-ui.js +29 -1
  35. package/dist/core/ai/anthropic-provider.d.ts +50 -0
  36. package/dist/core/ai/anthropic-provider.js +211 -0
  37. package/dist/core/ai/deepseek-provider.d.ts +81 -0
  38. package/dist/core/ai/deepseek-provider.js +254 -0
  39. package/dist/core/ai/index.d.ts +60 -0
  40. package/dist/core/ai/index.js +18 -0
  41. package/dist/core/ai/llm-client.d.ts +45 -0
  42. package/dist/core/ai/llm-client.js +7 -0
  43. package/dist/core/ai/mock-provider.d.ts +49 -0
  44. package/dist/core/ai/mock-provider.js +121 -0
  45. package/dist/core/ai/ollama-provider.d.ts +78 -0
  46. package/dist/core/ai/ollama-provider.js +192 -0
  47. package/dist/core/ai/openai-provider.d.ts +48 -0
  48. package/dist/core/ai/openai-provider.js +188 -0
  49. package/dist/core/ai/provider-factory.d.ts +160 -0
  50. package/dist/core/ai/provider-factory.js +269 -0
  51. package/dist/core/auth/api-key-provider.d.ts +16 -0
  52. package/dist/core/auth/api-key-provider.js +63 -0
  53. package/dist/core/auth/aws-iam-provider.d.ts +35 -0
  54. package/dist/core/auth/aws-iam-provider.js +177 -0
  55. package/dist/core/auth/azure-ad-provider.d.ts +15 -0
  56. package/dist/core/auth/azure-ad-provider.js +99 -0
  57. package/dist/core/auth/basic-auth-provider.d.ts +26 -0
  58. package/dist/core/auth/basic-auth-provider.js +111 -0
  59. package/dist/core/auth/gcp-adc-provider.d.ts +27 -0
  60. package/dist/core/auth/gcp-adc-provider.js +126 -0
  61. package/dist/core/auth/index.d.ts +238 -0
  62. package/dist/core/auth/index.js +82 -0
  63. package/dist/core/auth/jwt-provider.d.ts +19 -0
  64. package/dist/core/auth/jwt-provider.js +160 -0
  65. package/dist/core/auth/manager.d.ts +84 -0
  66. package/dist/core/auth/manager.js +230 -0
  67. package/dist/core/auth/oauth2-provider.d.ts +17 -0
  68. package/dist/core/auth/oauth2-provider.js +114 -0
  69. package/dist/core/auth/totp-provider.d.ts +31 -0
  70. package/dist/core/auth/totp-provider.js +134 -0
  71. package/dist/core/auth/ui-login-provider.d.ts +26 -0
  72. package/dist/core/auth/ui-login-provider.js +198 -0
  73. package/dist/core/cache/index.d.ts +7 -0
  74. package/dist/core/cache/index.js +6 -0
  75. package/dist/core/cache/lru-cache.d.ts +203 -0
  76. package/dist/core/cache/lru-cache.js +397 -0
  77. package/dist/core/coverage/analyzer.d.ts +101 -0
  78. package/dist/core/coverage/analyzer.js +415 -0
  79. package/dist/core/coverage/collector.d.ts +74 -0
  80. package/dist/core/coverage/collector.js +459 -0
  81. package/dist/core/coverage/config.d.ts +37 -0
  82. package/dist/core/coverage/config.js +156 -0
  83. package/dist/core/coverage/index.d.ts +11 -0
  84. package/dist/core/coverage/index.js +15 -0
  85. package/dist/core/coverage/types.d.ts +267 -0
  86. package/dist/core/coverage/types.js +6 -0
  87. package/dist/core/coverage/vault.d.ts +95 -0
  88. package/dist/core/coverage/vault.js +405 -0
  89. package/dist/core/dashboard/assets.d.ts +6 -0
  90. package/dist/core/dashboard/assets.js +690 -0
  91. package/dist/core/dashboard/index.d.ts +6 -0
  92. package/dist/core/dashboard/index.js +5 -0
  93. package/dist/core/dashboard/server.d.ts +72 -0
  94. package/dist/core/dashboard/server.js +354 -0
  95. package/dist/core/dashboard/types.d.ts +70 -0
  96. package/dist/core/dashboard/types.js +5 -0
  97. package/dist/core/discoverer/index.d.ts +115 -0
  98. package/dist/core/discoverer/index.js +250 -0
  99. package/dist/core/flakiness/index.d.ts +228 -0
  100. package/dist/core/flakiness/index.js +384 -0
  101. package/dist/core/generation/code-formatter.d.ts +111 -0
  102. package/dist/core/generation/code-formatter.js +307 -0
  103. package/dist/core/generation/code-generator.d.ts +144 -0
  104. package/dist/core/generation/code-generator.js +293 -0
  105. package/dist/core/generation/generator.d.ts +40 -0
  106. package/dist/core/generation/generator.js +76 -0
  107. package/dist/core/generation/index.d.ts +30 -0
  108. package/dist/core/generation/index.js +28 -0
  109. package/dist/core/generation/pack-generator.d.ts +107 -0
  110. package/dist/core/generation/pack-generator.js +416 -0
  111. package/dist/core/generation/prompt-builder.d.ts +132 -0
  112. package/dist/core/generation/prompt-builder.js +672 -0
  113. package/dist/core/generation/source-analyzer.d.ts +213 -0
  114. package/dist/core/generation/source-analyzer.js +657 -0
  115. package/dist/core/generation/test-optimizer.d.ts +117 -0
  116. package/dist/core/generation/test-optimizer.js +328 -0
  117. package/dist/core/generation/types.d.ts +214 -0
  118. package/dist/core/generation/types.js +4 -0
  119. package/dist/core/index.d.ts +23 -1
  120. package/dist/core/index.js +39 -0
  121. package/dist/core/pack/validator.js +31 -1
  122. package/dist/core/pack-v2/index.d.ts +9 -0
  123. package/dist/core/pack-v2/index.js +8 -0
  124. package/dist/core/pack-v2/loader.d.ts +62 -0
  125. package/dist/core/pack-v2/loader.js +231 -0
  126. package/dist/core/pack-v2/migrator.d.ts +56 -0
  127. package/dist/core/pack-v2/migrator.js +455 -0
  128. package/dist/core/pack-v2/validator.d.ts +61 -0
  129. package/dist/core/pack-v2/validator.js +577 -0
  130. package/dist/core/regression/detector.d.ts +107 -0
  131. package/dist/core/regression/detector.js +497 -0
  132. package/dist/core/regression/index.d.ts +9 -0
  133. package/dist/core/regression/index.js +11 -0
  134. package/dist/core/regression/trend-analyzer.d.ts +102 -0
  135. package/dist/core/regression/trend-analyzer.js +345 -0
  136. package/dist/core/regression/types.d.ts +222 -0
  137. package/dist/core/regression/types.js +7 -0
  138. package/dist/core/regression/vault.d.ts +87 -0
  139. package/dist/core/regression/vault.js +289 -0
  140. package/dist/core/repair/engine/fixer.d.ts +24 -0
  141. package/dist/core/repair/engine/fixer.js +226 -0
  142. package/dist/core/repair/engine/suggestion-engine.d.ts +18 -0
  143. package/dist/core/repair/engine/suggestion-engine.js +187 -0
  144. package/dist/core/repair/index.d.ts +10 -0
  145. package/dist/core/repair/index.js +13 -0
  146. package/dist/core/repair/repairer.d.ts +90 -0
  147. package/dist/core/repair/repairer.js +284 -0
  148. package/dist/core/repair/types.d.ts +91 -0
  149. package/dist/core/repair/types.js +6 -0
  150. package/dist/core/repair/utils/error-analyzer.d.ts +28 -0
  151. package/dist/core/repair/utils/error-analyzer.js +264 -0
  152. package/dist/core/retry/flakiness-integration.d.ts +60 -0
  153. package/dist/core/retry/flakiness-integration.js +228 -0
  154. package/dist/core/retry/index.d.ts +14 -0
  155. package/dist/core/retry/index.js +16 -0
  156. package/dist/core/retry/retry-engine.d.ts +80 -0
  157. package/dist/core/retry/retry-engine.js +296 -0
  158. package/dist/core/retry/types.d.ts +178 -0
  159. package/dist/core/retry/types.js +52 -0
  160. package/dist/core/retry/vault.d.ts +77 -0
  161. package/dist/core/retry/vault.js +304 -0
  162. package/dist/core/runner/e2e-helpers.d.ts +102 -0
  163. package/dist/core/runner/e2e-helpers.js +153 -0
  164. package/dist/core/runner/phase3-runner.d.ts +101 -2
  165. package/dist/core/runner/phase3-runner.js +559 -24
  166. package/dist/core/self-healing/assertion-healer.d.ts +97 -0
  167. package/dist/core/self-healing/assertion-healer.js +371 -0
  168. package/dist/core/self-healing/engine.d.ts +122 -0
  169. package/dist/core/self-healing/engine.js +538 -0
  170. package/dist/core/self-healing/index.d.ts +10 -0
  171. package/dist/core/self-healing/index.js +11 -0
  172. package/dist/core/self-healing/selector-healer.d.ts +103 -0
  173. package/dist/core/self-healing/selector-healer.js +372 -0
  174. package/dist/core/self-healing/types.d.ts +152 -0
  175. package/dist/core/self-healing/types.js +6 -0
  176. package/dist/core/slo/config.d.ts +107 -0
  177. package/dist/core/slo/config.js +360 -0
  178. package/dist/core/slo/index.d.ts +11 -0
  179. package/dist/core/slo/index.js +15 -0
  180. package/dist/core/slo/sli-calculator.d.ts +92 -0
  181. package/dist/core/slo/sli-calculator.js +364 -0
  182. package/dist/core/slo/slo-tracker.d.ts +148 -0
  183. package/dist/core/slo/slo-tracker.js +379 -0
  184. package/dist/core/slo/types.d.ts +281 -0
  185. package/dist/core/slo/types.js +7 -0
  186. package/dist/core/slo/vault.d.ts +102 -0
  187. package/dist/core/slo/vault.js +427 -0
  188. package/dist/core/tui/index.d.ts +7 -0
  189. package/dist/core/tui/index.js +6 -0
  190. package/dist/core/tui/monitor.d.ts +92 -0
  191. package/dist/core/tui/monitor.js +271 -0
  192. package/dist/core/tui/renderer.d.ts +33 -0
  193. package/dist/core/tui/renderer.js +218 -0
  194. package/dist/core/tui/types.d.ts +63 -0
  195. package/dist/core/tui/types.js +5 -0
  196. package/dist/core/types/pack-v2.d.ts +425 -0
  197. package/dist/core/types/pack-v2.js +8 -0
  198. package/dist/core/vault/index.d.ts +116 -0
  199. package/dist/core/vault/index.js +400 -5
  200. package/dist/core/watch/index.d.ts +7 -0
  201. package/dist/core/watch/index.js +6 -0
  202. package/dist/core/watch/watch-mode.d.ts +213 -0
  203. package/dist/core/watch/watch-mode.js +389 -0
  204. package/dist/index.js +68 -68
  205. package/dist/utils/config.d.ts +5 -0
  206. package/dist/utils/config.js +136 -0
  207. package/package.json +5 -1
  208. package/dist/core/adapters/playwright-api.d.ts +0 -82
  209. package/dist/core/adapters/playwright-api.js +0 -264
@@ -0,0 +1,49 @@
1
+ /**
2
+ * Mock LLM Provider
3
+ *
4
+ * Deterministic LLM provider for testing and fallback.
5
+ * Returns predefined responses without making network calls.
6
+ */
7
+ import type { LLMProvider, GenerationRequest, GenerationResponse } from './index.js';
8
+ export interface MockProviderConfig {
9
+ responses?: Record<string, string>;
10
+ defaultResponse?: string;
11
+ delay?: number;
12
+ }
13
+ export declare class MockProvider implements LLMProvider {
14
+ name: string;
15
+ models: string[];
16
+ private readonly responses;
17
+ private readonly defaultResponse;
18
+ private readonly delay;
19
+ constructor(config?: MockProviderConfig);
20
+ isAvailable(): Promise<boolean>;
21
+ generate(request: GenerationRequest): Promise<GenerationResponse>;
22
+ stream(request: GenerationRequest): AsyncIterable<string>;
23
+ countTokens(text: string): number;
24
+ private getRequestKey;
25
+ /**
26
+ * Set a predefined response for a specific prompt
27
+ */
28
+ setResponse(promptPattern: string, response: string): void;
29
+ /**
30
+ * Clear all predefined responses
31
+ */
32
+ clearResponses(): void;
33
+ }
34
+ /**
35
+ * Create a mock provider with predefined responses
36
+ */
37
+ export declare function createMockProvider(config?: MockProviderConfig): MockProvider;
38
+ /**
39
+ * Default mock responses for common QA360 prompts
40
+ */
41
+ export declare const MOCK_RESPONSES: {
42
+ testGeneration: string;
43
+ codeAnalysis: string;
44
+ testSuggestions: string;
45
+ };
46
+ /**
47
+ * Create a mock provider with standard QA360 mock responses
48
+ */
49
+ export declare function createStandardMockProvider(): MockProvider;
@@ -0,0 +1,121 @@
1
+ /**
2
+ * Mock LLM Provider
3
+ *
4
+ * Deterministic LLM provider for testing and fallback.
5
+ * Returns predefined responses without making network calls.
6
+ */
7
+ export class MockProvider {
8
+ name = 'mock';
9
+ models = ['mock-model', 'mock-coder', 'mock-analyst'];
10
+ responses;
11
+ defaultResponse;
12
+ delay;
13
+ constructor(config = {}) {
14
+ this.responses = config.responses || {};
15
+ this.defaultResponse = config.defaultResponse || 'Mock LLM response';
16
+ this.delay = config.delay || 0;
17
+ }
18
+ async isAvailable() {
19
+ // Mock is always available
20
+ return true;
21
+ }
22
+ async generate(request) {
23
+ if (this.delay > 0) {
24
+ await new Promise(resolve => setTimeout(resolve, this.delay));
25
+ }
26
+ // Check for predefined response
27
+ const key = this.getRequestKey(request);
28
+ const content = this.responses[key] || this.defaultResponse;
29
+ return {
30
+ content,
31
+ usage: {
32
+ promptTokens: this.countTokens(request.prompt),
33
+ completionTokens: this.countTokens(content),
34
+ totalTokens: this.countTokens(request.prompt) + this.countTokens(content)
35
+ },
36
+ model: 'mock-model',
37
+ finishReason: 'stop'
38
+ };
39
+ }
40
+ async *stream(request) {
41
+ const response = await this.generate(request);
42
+ const words = response.content.split(' ');
43
+ for (const word of words) {
44
+ await new Promise(resolve => setTimeout(resolve, 10)); // Small delay between chunks
45
+ yield word + ' ';
46
+ }
47
+ }
48
+ countTokens(text) {
49
+ return Math.ceil(text.length / 4);
50
+ }
51
+ getRequestKey(request) {
52
+ // Create a simple key from the prompt
53
+ const prompt = request.prompt.substring(0, 100);
54
+ return prompt.replace(/\s+/g, ' ').trim();
55
+ }
56
+ /**
57
+ * Set a predefined response for a specific prompt
58
+ */
59
+ setResponse(promptPattern, response) {
60
+ const key = promptPattern.replace(/\s+/g, ' ').trim();
61
+ this.responses[key] = response;
62
+ }
63
+ /**
64
+ * Clear all predefined responses
65
+ */
66
+ clearResponses() {
67
+ Object.keys(this.responses).forEach(key => {
68
+ delete this.responses[key];
69
+ });
70
+ }
71
+ }
72
+ /**
73
+ * Create a mock provider with predefined responses
74
+ */
75
+ export function createMockProvider(config) {
76
+ return new MockProvider(config);
77
+ }
78
+ /**
79
+ * Default mock responses for common QA360 prompts
80
+ */
81
+ export const MOCK_RESPONSES = {
82
+ testGeneration: `{
83
+ "tests": [
84
+ {
85
+ "name": "API Health Check",
86
+ "type": "api_smoke",
87
+ "target": "https://api.example.com/health",
88
+ "method": "GET",
89
+ "expectedStatus": 200
90
+ }
91
+ ]
92
+ }`,
93
+ codeAnalysis: `{
94
+ "issues": [
95
+ {
96
+ "severity": "medium",
97
+ "rule": "no-unused-vars",
98
+ "message": "Unused variable 'foo'",
99
+ "location": { "file": "src/app.ts", "line": 42 }
100
+ }
101
+ ]
102
+ }`,
103
+ testSuggestions: `{
104
+ "suggestions": [
105
+ {
106
+ "type": "add_test",
107
+ "description": "Add test for login function",
108
+ "priority": "high"
109
+ }
110
+ ]
111
+ }`
112
+ };
113
+ /**
114
+ * Create a mock provider with standard QA360 mock responses
115
+ */
116
+ export function createStandardMockProvider() {
117
+ return new MockProvider({
118
+ responses: MOCK_RESPONSES,
119
+ delay: 50 // Simulate 50ms latency
120
+ });
121
+ }
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Ollama LLM Provider
3
+ *
4
+ * Local LLM provider using Ollama (https://ollama.com).
5
+ * Runs locally with models like DeepSeek Coder, Llama, Mistral, etc.
6
+ *
7
+ * Environment variables:
8
+ * - OLLAMA_BASE_URL: Optional base URL (default: http://localhost:11434)
9
+ * - OLLAMA_MODEL: Model to use (default: deepseek-coder)
10
+ */
11
+ import type { LLMProvider, GenerationRequest, GenerationResponse } from './index.js';
12
+ export interface OllamaConfig {
13
+ baseUrl?: string;
14
+ model?: string;
15
+ timeout?: number;
16
+ }
17
+ /**
18
+ * Ollama-specific provider
19
+ * Connects to local Ollama instance for AI capabilities
20
+ */
21
+ export declare class OllamaProvider implements LLMProvider {
22
+ name: string;
23
+ models: string[];
24
+ private readonly baseUrl;
25
+ private readonly defaultModel;
26
+ private readonly timeout;
27
+ constructor(config?: OllamaConfig);
28
+ isAvailable(): Promise<boolean>;
29
+ generate(request: GenerationRequest): Promise<GenerationResponse>;
30
+ stream(request: GenerationRequest): AsyncIterable<string>;
31
+ countTokens(text: string): number;
32
+ /**
33
+ * List available models from Ollama
34
+ */
35
+ listModels(): Promise<ModelInfo[]>;
36
+ /**
37
+ * Pull a model from Ollama registry
38
+ */
39
+ pullModel(model: string, onProgress?: (progress: PullProgress) => Promise<void>): Promise<void>;
40
+ private buildFullPrompt;
41
+ }
42
+ /**
43
+ * Model information
44
+ */
45
+ export interface ModelInfo {
46
+ name: string;
47
+ size: number;
48
+ modifiedAt: string;
49
+ }
50
+ /**
51
+ * Pull progress information
52
+ */
53
+ export interface PullProgress {
54
+ status: 'pulling' | 'verifying' | 'complete';
55
+ digest?: string;
56
+ total?: number;
57
+ completed?: number;
58
+ }
59
+ /**
60
+ * Ollama-specific error
61
+ */
62
+ export declare class OllamaError extends Error {
63
+ code: string;
64
+ details?: Record<string, unknown>;
65
+ constructor(message: string, details?: Record<string, unknown>);
66
+ }
67
+ /**
68
+ * Create Ollama provider with default settings
69
+ */
70
+ export declare function createOllamaProvider(config?: OllamaConfig): OllamaProvider;
71
+ /**
72
+ * Check if Ollama is available and has the required model
73
+ */
74
+ export declare function checkOllamaSetup(model?: string): Promise<{
75
+ available: boolean;
76
+ modelInstalled: boolean;
77
+ models: string[];
78
+ }>;
@@ -0,0 +1,192 @@
1
+ /**
2
+ * Ollama LLM Provider
3
+ *
4
+ * Local LLM provider using Ollama (https://ollama.com).
5
+ * Runs locally with models like DeepSeek Coder, Llama, Mistral, etc.
6
+ *
7
+ * Environment variables:
8
+ * - OLLAMA_BASE_URL: Optional base URL (default: http://localhost:11434)
9
+ * - OLLAMA_MODEL: Model to use (default: deepseek-coder)
10
+ */
11
+ /**
12
+ * Ollama-specific provider
13
+ * Connects to local Ollama instance for AI capabilities
14
+ */
15
+ export class OllamaProvider {
16
+ name = 'ollama';
17
+ // Recommended models for code generation
18
+ models = [
19
+ 'deepseek-coder', // Best for code generation
20
+ 'codellama', // Good for code
21
+ 'llama3.2', // General purpose
22
+ 'mistral', // Fast, good quality
23
+ 'qwen2.5-coder', // Alternative code model
24
+ ];
25
+ baseUrl;
26
+ defaultModel;
27
+ timeout;
28
+ constructor(config = {}) {
29
+ this.baseUrl = config.baseUrl || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
30
+ this.defaultModel = config.model || process.env.OLLAMA_MODEL || 'deepseek-coder';
31
+ this.timeout = config.timeout || 120000; // 2 minutes default
32
+ }
33
+ async isAvailable() {
34
+ try {
35
+ const response = await fetch(`${this.baseUrl}/api/tags`, {
36
+ signal: AbortSignal.timeout(5000),
37
+ });
38
+ return response.ok;
39
+ }
40
+ catch {
41
+ return false;
42
+ }
43
+ }
44
+ async generate(request) {
45
+ const fullPrompt = this.buildFullPrompt(request);
46
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
47
+ method: 'POST',
48
+ headers: { 'Content-Type': 'application/json' },
49
+ signal: AbortSignal.timeout(this.timeout),
50
+ body: JSON.stringify({
51
+ model: this.defaultModel,
52
+ prompt: fullPrompt,
53
+ stream: false,
54
+ options: {
55
+ temperature: request.temperature ?? 0.7,
56
+ num_predict: request.maxTokens ?? 4096,
57
+ }
58
+ })
59
+ });
60
+ if (!response.ok) {
61
+ throw new OllamaError(`Ollama request failed: ${response.status} ${response.statusText}`, { response: await response.text().catch(() => 'Unknown error') });
62
+ }
63
+ const data = await response.json();
64
+ if (data.error) {
65
+ throw new OllamaError(data.error);
66
+ }
67
+ return {
68
+ content: data.response,
69
+ usage: {
70
+ promptTokens: data.prompt_eval_count || 0,
71
+ completionTokens: data.eval_count || 0,
72
+ totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0),
73
+ },
74
+ model: this.defaultModel,
75
+ finishReason: data.done_reason || 'stop',
76
+ };
77
+ }
78
+ async *stream(request) {
79
+ const fullPrompt = this.buildFullPrompt(request);
80
+ const response = await fetch(`${this.baseUrl}/api/generate`, {
81
+ method: 'POST',
82
+ headers: { 'Content-Type': 'application/json' },
83
+ body: JSON.stringify({
84
+ model: this.defaultModel,
85
+ prompt: fullPrompt,
86
+ stream: true,
87
+ options: {
88
+ temperature: request.temperature ?? 0.7,
89
+ num_predict: request.maxTokens ?? 4096,
90
+ }
91
+ })
92
+ });
93
+ if (!response.ok) {
94
+ throw new OllamaError(`Ollama stream failed: ${response.status}`);
95
+ }
96
+ const reader = response.body?.getReader();
97
+ if (!reader) {
98
+ throw new OllamaError('No response body');
99
+ }
100
+ const decoder = new TextDecoder();
101
+ let buffer = '';
102
+ while (true) {
103
+ const { done, value } = await reader.read();
104
+ if (done)
105
+ break;
106
+ buffer += decoder.decode(value, { stream: true });
107
+ const lines = buffer.split('\n');
108
+ buffer = lines.pop() || '';
109
+ for (const line of lines) {
110
+ if (line.trim()) {
111
+ const data = JSON.parse(line);
112
+ if (data.done) {
113
+ if (data.response)
114
+ yield data.response;
115
+ return;
116
+ }
117
+ if (data.response) {
118
+ yield data.response;
119
+ }
120
+ }
121
+ }
122
+ }
123
+ }
124
+ countTokens(text) {
125
+ // Approximate token count (roughly 4 chars per token)
126
+ return Math.ceil(text.length / 4);
127
+ }
128
+ /**
129
+ * List available models from Ollama
130
+ */
131
+ async listModels() {
132
+ const response = await fetch(`${this.baseUrl}/api/tags`);
133
+ if (!response.ok) {
134
+ throw new OllamaError('Failed to list models');
135
+ }
136
+ const data = await response.json();
137
+ return data.models.map((m) => ({
138
+ name: m.name,
139
+ size: m.size,
140
+ modifiedAt: m.modified_at,
141
+ }));
142
+ }
143
+ /**
144
+ * Pull a model from Ollama registry
145
+ */
146
+ async pullModel(model, onProgress) {
147
+ // This is a long-running operation, typically done via CLI
148
+ // but we provide the method for completeness
149
+ throw new Error('Use "ollama pull <model>" via CLI for model downloads');
150
+ }
151
+ buildFullPrompt(request) {
152
+ if (request.systemPrompt) {
153
+ return `System: ${request.systemPrompt}\n\nUser: ${request.prompt}\n\nAssistant:`;
154
+ }
155
+ return request.prompt;
156
+ }
157
+ }
158
+ /**
159
+ * Ollama-specific error
160
+ */
161
+ export class OllamaError extends Error {
162
+ code = 'OLLAMA_ERROR';
163
+ details;
164
+ constructor(message, details) {
165
+ super(message);
166
+ this.name = 'OllamaError';
167
+ this.details = details;
168
+ }
169
+ }
170
+ /**
171
+ * Create Ollama provider with default settings
172
+ */
173
+ export function createOllamaProvider(config) {
174
+ return new OllamaProvider(config);
175
+ }
176
+ /**
177
+ * Check if Ollama is available and has the required model
178
+ */
179
+ export async function checkOllamaSetup(model = 'deepseek-coder') {
180
+ const provider = new OllamaProvider();
181
+ const available = await provider.isAvailable();
182
+ if (!available) {
183
+ return { available: false, modelInstalled: false, models: [] };
184
+ }
185
+ const models = await provider.listModels();
186
+ const modelNames = models.map(m => m.name);
187
+ return {
188
+ available: true,
189
+ modelInstalled: modelNames.some(m => m.includes(model)),
190
+ models: modelNames,
191
+ };
192
+ }
@@ -0,0 +1,48 @@
1
+ /**
2
+ * OpenAI LLM Provider
3
+ *
4
+ * Provider for OpenAI API (https://api.openai.com).
5
+ * Supports GPT-4, GPT-4 Turbo, and GPT-3.5.
6
+ *
7
+ * Environment variables:
8
+ * - OPENAI_API_KEY: Required API key
9
+ * - OPENAI_BASE_URL: Optional base URL (default: https://api.openai.com/v1)
10
+ * - OPENAI_MODEL: Model to use (default: gpt-4)
11
+ */
12
+ import type { LLMProvider, GenerationRequest, GenerationResponse } from './index.js';
13
+ export interface OpenAIConfig {
14
+ apiKey?: string;
15
+ baseURL?: string;
16
+ model?: string;
17
+ timeout?: number;
18
+ }
19
+ export declare class OpenAIProvider implements LLMProvider {
20
+ name: string;
21
+ models: string[];
22
+ private readonly apiKey;
23
+ private readonly baseURL;
24
+ private readonly defaultModel;
25
+ private readonly timeout;
26
+ constructor(config?: OpenAIConfig);
27
+ isAvailable(): Promise<boolean>;
28
+ generate(request: GenerationRequest): Promise<GenerationResponse>;
29
+ stream(request: GenerationRequest): AsyncIterable<string>;
30
+ countTokens(text: string): number;
31
+ private buildMessages;
32
+ }
33
+ /**
34
+ * OpenAI-specific error
35
+ */
36
+ export declare class OpenAIError extends Error {
37
+ code: string;
38
+ details?: Record<string, unknown>;
39
+ constructor(message: string, details?: Record<string, unknown>);
40
+ }
41
+ /**
42
+ * Create OpenAI provider with default settings
43
+ */
44
+ export declare function createOpenAIProvider(config?: OpenAIConfig): OpenAIProvider;
45
+ /**
46
+ * Check if OpenAI is available and configured
47
+ */
48
+ export declare function checkOpenAISetup(): Promise<boolean>;
@@ -0,0 +1,188 @@
1
+ /**
2
+ * OpenAI LLM Provider
3
+ *
4
+ * Provider for OpenAI API (https://api.openai.com).
5
+ * Supports GPT-4, GPT-4 Turbo, and GPT-3.5.
6
+ *
7
+ * Environment variables:
8
+ * - OPENAI_API_KEY: Required API key
9
+ * - OPENAI_BASE_URL: Optional base URL (default: https://api.openai.com/v1)
10
+ * - OPENAI_MODEL: Model to use (default: gpt-4)
11
+ */
12
+ export class OpenAIProvider {
13
+ name = 'openai';
14
+ // Recommended models for code generation
15
+ models = [
16
+ 'gpt-4', // Most capable
17
+ 'gpt-4-turbo', // Faster, cheaper
18
+ 'gpt-4o', // Latest GPT-4 Omni
19
+ 'gpt-4o-mini', // Smaller, faster
20
+ 'gpt-3.5-turbo', // Budget option
21
+ ];
22
+ apiKey;
23
+ baseURL;
24
+ defaultModel;
25
+ timeout;
26
+ constructor(config = {}) {
27
+ this.apiKey = config.apiKey || process.env.OPENAI_API_KEY || '';
28
+ this.baseURL = config.baseURL || process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1';
29
+ this.defaultModel = config.model || process.env.OPENAI_MODEL || 'gpt-4';
30
+ this.timeout = config.timeout || 60000; // 1 minute default
31
+ }
32
+ async isAvailable() {
33
+ // Check if API key is present
34
+ if (!this.apiKey) {
35
+ return false;
36
+ }
37
+ // Try a minimal API call
38
+ try {
39
+ const response = await fetch(`${this.baseURL}/models`, {
40
+ method: 'GET',
41
+ headers: {
42
+ 'Authorization': `Bearer ${this.apiKey}`,
43
+ },
44
+ signal: AbortSignal.timeout(5000),
45
+ });
46
+ return response.ok;
47
+ }
48
+ catch {
49
+ return false;
50
+ }
51
+ }
52
+ async generate(request) {
53
+ if (!this.apiKey) {
54
+ throw new OpenAIError('OPENAI_API_KEY is required', {
55
+ suggestion: 'Set OPENAI_API_KEY environment variable or pass apiKey in config'
56
+ });
57
+ }
58
+ const messages = this.buildMessages(request);
59
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
60
+ method: 'POST',
61
+ headers: {
62
+ 'Authorization': `Bearer ${this.apiKey}`,
63
+ 'Content-Type': 'application/json',
64
+ },
65
+ signal: AbortSignal.timeout(this.timeout),
66
+ body: JSON.stringify({
67
+ model: this.defaultModel,
68
+ messages,
69
+ temperature: request.temperature ?? 0.7,
70
+ max_tokens: request.maxTokens ?? 4096,
71
+ })
72
+ });
73
+ if (!response.ok) {
74
+ const error = await response.text().catch(() => 'Unknown error');
75
+ throw new OpenAIError(`OpenAI request failed: ${response.status} ${error}`, {
76
+ status: response.status,
77
+ response: error
78
+ });
79
+ }
80
+ const data = await response.json();
81
+ const choice = data.choices?.[0];
82
+ if (!choice) {
83
+ throw new OpenAIError('No choices returned from OpenAI');
84
+ }
85
+ return {
86
+ content: choice.message.content,
87
+ usage: {
88
+ promptTokens: data.usage?.prompt_tokens || 0,
89
+ completionTokens: data.usage?.completion_tokens || 0,
90
+ totalTokens: data.usage?.total_tokens || 0,
91
+ },
92
+ model: data.model,
93
+ finishReason: choice.finish_reason === 'stop' ? 'stop' : 'length'
94
+ };
95
+ }
96
+ async *stream(request) {
97
+ if (!this.apiKey) {
98
+ throw new OpenAIError('OPENAI_API_KEY is required');
99
+ }
100
+ const messages = this.buildMessages(request);
101
+ const response = await fetch(`${this.baseURL}/chat/completions`, {
102
+ method: 'POST',
103
+ headers: {
104
+ 'Authorization': `Bearer ${this.apiKey}`,
105
+ 'Content-Type': 'application/json',
106
+ },
107
+ body: JSON.stringify({
108
+ model: this.defaultModel,
109
+ messages,
110
+ temperature: request.temperature ?? 0.7,
111
+ max_tokens: request.maxTokens ?? 4096,
112
+ stream: true,
113
+ })
114
+ });
115
+ if (!response.ok) {
116
+ throw new OpenAIError(`OpenAI stream failed: ${response.status}`);
117
+ }
118
+ const reader = response.body?.getReader();
119
+ if (!reader) {
120
+ throw new OpenAIError('No response body');
121
+ }
122
+ const decoder = new TextDecoder();
123
+ let buffer = '';
124
+ while (true) {
125
+ const { done, value } = await reader.read();
126
+ if (done)
127
+ break;
128
+ buffer += decoder.decode(value, { stream: true });
129
+ const lines = buffer.split('\n');
130
+ buffer = lines.pop() || '';
131
+ for (const line of lines) {
132
+ if (line.startsWith('data: ')) {
133
+ const data = line.slice(6);
134
+ if (data === '[DONE]') {
135
+ return;
136
+ }
137
+ try {
138
+ const parsed = JSON.parse(data);
139
+ const content = parsed.choices?.[0]?.delta?.content;
140
+ if (content) {
141
+ yield content;
142
+ }
143
+ }
144
+ catch {
145
+ // Ignore invalid JSON
146
+ }
147
+ }
148
+ }
149
+ }
150
+ }
151
+ countTokens(text) {
152
+ // Approximate token count (roughly 4 chars per token for English)
153
+ return Math.ceil(text.length / 4);
154
+ }
155
+ buildMessages(request) {
156
+ const messages = [];
157
+ if (request.systemPrompt) {
158
+ messages.push({ role: 'system', content: request.systemPrompt });
159
+ }
160
+ messages.push({ role: 'user', content: request.prompt });
161
+ return messages;
162
+ }
163
+ }
164
+ /**
165
+ * OpenAI-specific error
166
+ */
167
+ export class OpenAIError extends Error {
168
+ code = 'OPENAI_ERROR';
169
+ details;
170
+ constructor(message, details) {
171
+ super(message);
172
+ this.name = 'OpenAIError';
173
+ this.details = details;
174
+ }
175
+ }
176
+ /**
177
+ * Create OpenAI provider with default settings
178
+ */
179
+ export function createOpenAIProvider(config) {
180
+ return new OpenAIProvider(config);
181
+ }
182
+ /**
183
+ * Check if OpenAI is available and configured
184
+ */
185
+ export async function checkOpenAISetup() {
186
+ const provider = new OpenAIProvider();
187
+ return provider.isAvailable();
188
+ }