@yasserkhanorg/e2e-agents 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +168 -0
- package/README.md +620 -0
- package/dist/agent/analysis.d.ts +62 -0
- package/dist/agent/analysis.d.ts.map +1 -0
- package/dist/agent/analysis.js +292 -0
- package/dist/agent/blast_radius.d.ts +4 -0
- package/dist/agent/blast_radius.d.ts.map +1 -0
- package/dist/agent/blast_radius.js +37 -0
- package/dist/agent/cache_utils.d.ts +38 -0
- package/dist/agent/cache_utils.d.ts.map +1 -0
- package/dist/agent/cache_utils.js +67 -0
- package/dist/agent/config.d.ts +148 -0
- package/dist/agent/config.d.ts.map +1 -0
- package/dist/agent/config.js +640 -0
- package/dist/agent/dependency_graph.d.ts +14 -0
- package/dist/agent/dependency_graph.d.ts.map +1 -0
- package/dist/agent/dependency_graph.js +227 -0
- package/dist/agent/feedback.d.ts +55 -0
- package/dist/agent/feedback.d.ts.map +1 -0
- package/dist/agent/feedback.js +257 -0
- package/dist/agent/flags.d.ts +23 -0
- package/dist/agent/flags.d.ts.map +1 -0
- package/dist/agent/flags.js +171 -0
- package/dist/agent/flow_catalog.d.ts +25 -0
- package/dist/agent/flow_catalog.d.ts.map +1 -0
- package/dist/agent/flow_catalog.js +106 -0
- package/dist/agent/flow_mapping.d.ts +10 -0
- package/dist/agent/flow_mapping.d.ts.map +1 -0
- package/dist/agent/flow_mapping.js +84 -0
- package/dist/agent/framework.d.ts +13 -0
- package/dist/agent/framework.d.ts.map +1 -0
- package/dist/agent/framework.js +149 -0
- package/dist/agent/gap_suggestions.d.ts +14 -0
- package/dist/agent/gap_suggestions.d.ts.map +1 -0
- package/dist/agent/gap_suggestions.js +101 -0
- package/dist/agent/generator.d.ts +10 -0
- package/dist/agent/generator.d.ts.map +1 -0
- package/dist/agent/generator.js +115 -0
- package/dist/agent/git.d.ts +11 -0
- package/dist/agent/git.d.ts.map +1 -0
- package/dist/agent/git.js +90 -0
- package/dist/agent/handoff.d.ts +22 -0
- package/dist/agent/handoff.d.ts.map +1 -0
- package/dist/agent/handoff.js +180 -0
- package/dist/agent/impact-analyzer.d.ts +114 -0
- package/dist/agent/impact-analyzer.d.ts.map +1 -0
- package/dist/agent/impact-analyzer.js +557 -0
- package/dist/agent/index.d.ts +21 -0
- package/dist/agent/index.d.ts.map +1 -0
- package/dist/agent/index.js +38 -0
- package/dist/agent/model-router.d.ts +57 -0
- package/dist/agent/model-router.d.ts.map +1 -0
- package/dist/agent/model-router.js +154 -0
- package/dist/agent/operational_insights.d.ts +41 -0
- package/dist/agent/operational_insights.d.ts.map +1 -0
- package/dist/agent/operational_insights.js +126 -0
- package/dist/agent/pipeline.d.ts +23 -0
- package/dist/agent/pipeline.d.ts.map +1 -0
- package/dist/agent/pipeline.js +609 -0
- package/dist/agent/plan.d.ts +91 -0
- package/dist/agent/plan.d.ts.map +1 -0
- package/dist/agent/plan.js +331 -0
- package/dist/agent/playwright_report.d.ts +8 -0
- package/dist/agent/playwright_report.d.ts.map +1 -0
- package/dist/agent/playwright_report.js +126 -0
- package/dist/agent/report-generator.d.ts +24 -0
- package/dist/agent/report-generator.d.ts.map +1 -0
- package/dist/agent/report-generator.js +250 -0
- package/dist/agent/report.d.ts +81 -0
- package/dist/agent/report.d.ts.map +1 -0
- package/dist/agent/report.js +147 -0
- package/dist/agent/runner.d.ts +7 -0
- package/dist/agent/runner.d.ts.map +1 -0
- package/dist/agent/runner.js +576 -0
- package/dist/agent/selectors.d.ts +10 -0
- package/dist/agent/selectors.d.ts.map +1 -0
- package/dist/agent/selectors.js +75 -0
- package/dist/agent/spec-bridge.d.ts +101 -0
- package/dist/agent/spec-bridge.d.ts.map +1 -0
- package/dist/agent/spec-bridge.js +273 -0
- package/dist/agent/spec-builder.d.ts +102 -0
- package/dist/agent/spec-builder.d.ts.map +1 -0
- package/dist/agent/spec-builder.js +273 -0
- package/dist/agent/subsystem_risk.d.ts +23 -0
- package/dist/agent/subsystem_risk.d.ts.map +1 -0
- package/dist/agent/subsystem_risk.js +207 -0
- package/dist/agent/telemetry.d.ts +84 -0
- package/dist/agent/telemetry.d.ts.map +1 -0
- package/dist/agent/telemetry.js +220 -0
- package/dist/agent/test_path.d.ts +2 -0
- package/dist/agent/test_path.d.ts.map +1 -0
- package/dist/agent/test_path.js +23 -0
- package/dist/agent/tests.d.ts +18 -0
- package/dist/agent/tests.d.ts.map +1 -0
- package/dist/agent/tests.js +106 -0
- package/dist/agent/traceability.d.ts +22 -0
- package/dist/agent/traceability.d.ts.map +1 -0
- package/dist/agent/traceability.js +183 -0
- package/dist/agent/traceability_capture.d.ts +18 -0
- package/dist/agent/traceability_capture.d.ts.map +1 -0
- package/dist/agent/traceability_capture.js +313 -0
- package/dist/agent/traceability_ingest.d.ts +21 -0
- package/dist/agent/traceability_ingest.d.ts.map +1 -0
- package/dist/agent/traceability_ingest.js +237 -0
- package/dist/agent/utils.d.ts +13 -0
- package/dist/agent/utils.d.ts.map +1 -0
- package/dist/agent/utils.js +152 -0
- package/dist/agent/validators/selector-validator.d.ts +74 -0
- package/dist/agent/validators/selector-validator.d.ts.map +1 -0
- package/dist/agent/validators/selector-validator.js +165 -0
- package/dist/anthropic_provider.d.ts +65 -0
- package/dist/anthropic_provider.d.ts.map +1 -0
- package/dist/anthropic_provider.js +332 -0
- package/dist/api.d.ts +48 -0
- package/dist/api.d.ts.map +1 -0
- package/dist/api.js +113 -0
- package/dist/base_provider.d.ts +53 -0
- package/dist/base_provider.d.ts.map +1 -0
- package/dist/base_provider.js +81 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +843 -0
- package/dist/custom_provider.d.ts +20 -0
- package/dist/custom_provider.d.ts.map +1 -0
- package/dist/custom_provider.js +276 -0
- package/dist/e2e-test-gen/index.d.ts +51 -0
- package/dist/e2e-test-gen/index.d.ts.map +1 -0
- package/dist/e2e-test-gen/index.js +57 -0
- package/dist/e2e-test-gen/spec_parser.d.ts +142 -0
- package/dist/e2e-test-gen/spec_parser.d.ts.map +1 -0
- package/dist/e2e-test-gen/spec_parser.js +786 -0
- package/dist/e2e-test-gen/types.d.ts +185 -0
- package/dist/e2e-test-gen/types.d.ts.map +1 -0
- package/dist/e2e-test-gen/types.js +4 -0
- package/dist/esm/agent/analysis.js +287 -0
- package/dist/esm/agent/blast_radius.js +34 -0
- package/dist/esm/agent/cache_utils.js +63 -0
- package/dist/esm/agent/config.js +637 -0
- package/dist/esm/agent/dependency_graph.js +224 -0
- package/dist/esm/agent/feedback.js +253 -0
- package/dist/esm/agent/flags.js +160 -0
- package/dist/esm/agent/flow_catalog.js +103 -0
- package/dist/esm/agent/flow_mapping.js +81 -0
- package/dist/esm/agent/framework.js +145 -0
- package/dist/esm/agent/gap_suggestions.js +98 -0
- package/dist/esm/agent/generator.js +112 -0
- package/dist/esm/agent/git.js +87 -0
- package/dist/esm/agent/handoff.js +177 -0
- package/dist/esm/agent/impact-analyzer.js +548 -0
- package/dist/esm/agent/index.js +22 -0
- package/dist/esm/agent/model-router.js +150 -0
- package/dist/esm/agent/operational_insights.js +123 -0
- package/dist/esm/agent/pipeline.js +605 -0
- package/dist/esm/agent/plan.js +324 -0
- package/dist/esm/agent/playwright_report.js +123 -0
- package/dist/esm/agent/report-generator.js +247 -0
- package/dist/esm/agent/report.js +144 -0
- package/dist/esm/agent/runner.js +572 -0
- package/dist/esm/agent/selectors.js +71 -0
- package/dist/esm/agent/spec-bridge.js +267 -0
- package/dist/esm/agent/spec-builder.js +267 -0
- package/dist/esm/agent/subsystem_risk.js +204 -0
- package/dist/esm/agent/telemetry.js +216 -0
- package/dist/esm/agent/test_path.js +20 -0
- package/dist/esm/agent/tests.js +101 -0
- package/dist/esm/agent/traceability.js +180 -0
- package/dist/esm/agent/traceability_capture.js +310 -0
- package/dist/esm/agent/traceability_ingest.js +234 -0
- package/dist/esm/agent/utils.js +138 -0
- package/dist/esm/agent/validators/selector-validator.js +160 -0
- package/dist/esm/anthropic_provider.js +324 -0
- package/dist/esm/api.js +105 -0
- package/dist/esm/base_provider.js +77 -0
- package/dist/esm/cli.js +841 -0
- package/dist/esm/custom_provider.js +272 -0
- package/dist/esm/e2e-test-gen/index.js +50 -0
- package/dist/esm/e2e-test-gen/spec_parser.js +782 -0
- package/dist/esm/e2e-test-gen/types.js +3 -0
- package/dist/esm/index.js +16 -0
- package/dist/esm/logger.js +89 -0
- package/dist/esm/mcp-server.js +465 -0
- package/dist/esm/ollama_provider.js +300 -0
- package/dist/esm/openai_provider.js +242 -0
- package/dist/esm/package.json +3 -0
- package/dist/esm/plan-and-test-constants.js +126 -0
- package/dist/esm/provider_factory.js +336 -0
- package/dist/esm/provider_interface.js +23 -0
- package/dist/esm/provider_utils.js +96 -0
- package/dist/index.d.ts +31 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +41 -0
- package/dist/logger.d.ts +23 -0
- package/dist/logger.d.ts.map +1 -0
- package/dist/logger.js +93 -0
- package/dist/mcp-server.d.ts +35 -0
- package/dist/mcp-server.d.ts.map +1 -0
- package/dist/mcp-server.js +469 -0
- package/dist/ollama_provider.d.ts +65 -0
- package/dist/ollama_provider.d.ts.map +1 -0
- package/dist/ollama_provider.js +308 -0
- package/dist/openai_provider.d.ts +23 -0
- package/dist/openai_provider.d.ts.map +1 -0
- package/dist/openai_provider.js +250 -0
- package/dist/plan-and-test-constants.d.ts +110 -0
- package/dist/plan-and-test-constants.d.ts.map +1 -0
- package/dist/plan-and-test-constants.js +132 -0
- package/dist/provider_factory.d.ts +99 -0
- package/dist/provider_factory.d.ts.map +1 -0
- package/dist/provider_factory.js +341 -0
- package/dist/provider_interface.d.ts +358 -0
- package/dist/provider_interface.d.ts.map +1 -0
- package/dist/provider_interface.js +28 -0
- package/dist/provider_utils.d.ts +39 -0
- package/dist/provider_utils.d.ts.map +1 -0
- package/dist/provider_utils.js +103 -0
- package/package.json +101 -0
- package/schemas/gap.schema.json +18 -0
- package/schemas/impact.schema.json +418 -0
- package/schemas/plan.schema.json +285 -0
- package/schemas/subsystem-risk-map.schema.json +62 -0
- package/schemas/traceability-input.schema.json +122 -0
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
|
3
|
+
// See LICENSE.txt for license information.
|
|
4
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
5
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
6
|
+
};
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.OllamaProvider = void 0;
|
|
9
|
+
exports.checkOllamaSetup = checkOllamaSetup;
|
|
10
|
+
const openai_1 = __importDefault(require("openai"));
|
|
11
|
+
const provider_interface_js_1 = require("./provider_interface.js");
|
|
12
|
+
const provider_utils_js_1 = require("./provider_utils.js");
|
|
13
|
+
const base_provider_js_1 = require("./base_provider.js");
|
|
14
|
+
const logger_js_1 = require("./logger.js");
|
|
15
|
+
/**
|
|
16
|
+
* SECURITY: Validate Ollama base URL and enforce HTTPS for remote connections
|
|
17
|
+
*/
|
|
18
|
+
function normalizeOllamaBaseUrl(baseUrl) {
|
|
19
|
+
const raw = baseUrl || 'http://localhost:11434';
|
|
20
|
+
try {
|
|
21
|
+
const parsed = new URL(raw);
|
|
22
|
+
if (!parsed.pathname || parsed.pathname === '/') {
|
|
23
|
+
parsed.pathname = '/v1';
|
|
24
|
+
}
|
|
25
|
+
return parsed.toString().replace(/\/$/, '');
|
|
26
|
+
}
|
|
27
|
+
catch {
|
|
28
|
+
return 'http://localhost:11434/v1';
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
function validateOllamaUrl(baseUrl) {
|
|
32
|
+
const url = normalizeOllamaBaseUrl(baseUrl);
|
|
33
|
+
try {
|
|
34
|
+
const parsed = new URL(url);
|
|
35
|
+
// For non-localhost URLs, warn about HTTP risks
|
|
36
|
+
const isLocalhost = parsed.hostname === 'localhost' || parsed.hostname === '127.0.0.1' || parsed.hostname === '::1';
|
|
37
|
+
if (!isLocalhost && parsed.protocol === 'http:') {
|
|
38
|
+
logger_js_1.logger.warn('Ollama connection over plaintext HTTP to remote server. ' +
|
|
39
|
+
'Prompts and responses will be transmitted unencrypted. Consider using HTTPS proxy or local Ollama.');
|
|
40
|
+
}
|
|
41
|
+
return { valid: true, url };
|
|
42
|
+
}
|
|
43
|
+
catch {
|
|
44
|
+
return {
|
|
45
|
+
valid: false,
|
|
46
|
+
url: 'http://localhost:11434/v1',
|
|
47
|
+
warning: `Invalid Ollama URL: ${baseUrl}. Using default: http://localhost:11434/v1`,
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* SECURITY: Validate model name to prevent injection issues
|
|
53
|
+
*/
|
|
54
|
+
function validateModelName(model) {
|
|
55
|
+
// Allow alphanumeric, dash, colon, underscore
|
|
56
|
+
// Typical format: deepseek-r1:7b, llama4:13b, etc.
|
|
57
|
+
return /^[a-z0-9_:.\-]+$/i.test(model) && model.length < 256;
|
|
58
|
+
}
|
|
59
|
+
/**
|
|
60
|
+
* SECURITY: Validate timeout value
|
|
61
|
+
*/
|
|
62
|
+
function validateTimeout(timeout) {
|
|
63
|
+
if (!timeout)
|
|
64
|
+
return 60000;
|
|
65
|
+
if (timeout < 1000 || timeout > 600000) {
|
|
66
|
+
logger_js_1.logger.warn('Timeout out of valid range (1s-10m). Using 60 second default.');
|
|
67
|
+
return 60000;
|
|
68
|
+
}
|
|
69
|
+
return timeout;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Ollama Provider - Free, local LLM execution
|
|
73
|
+
*
|
|
74
|
+
* Features:
|
|
75
|
+
* - Zero cost (runs locally)
|
|
76
|
+
* - Full privacy (no data leaves your machine)
|
|
77
|
+
* - OpenAI-compatible API
|
|
78
|
+
* - Supports DeepSeek-R1, Llama 4, and other open models
|
|
79
|
+
*
|
|
80
|
+
* Limitations:
|
|
81
|
+
* - No vision support (most models)
|
|
82
|
+
* - Slower inference than cloud APIs (~2-5 sec vs <1 sec)
|
|
83
|
+
* - Requires local installation and model downloads
|
|
84
|
+
*
|
|
85
|
+
* Recommended models:
|
|
86
|
+
* - deepseek-r1:7b - Fast, good quality, low memory (4GB)
|
|
87
|
+
* - deepseek-r1:14b - Better quality, medium memory (8GB)
|
|
88
|
+
* - llama4:13b - High quality, medium memory (8GB)
|
|
89
|
+
* - deepseek-r1:7b-q4 - Quantized for speed, lower quality
|
|
90
|
+
*
|
|
91
|
+
* Setup:
|
|
92
|
+
* 1. Install Ollama: curl -fsSL https://ollama.com/install.sh | sh
|
|
93
|
+
* 2. Pull model: ollama pull deepseek-r1:7b
|
|
94
|
+
* 3. Start: ollama serve (runs on localhost:11434)
|
|
95
|
+
*/
|
|
96
|
+
class OllamaProvider extends base_provider_js_1.BaseProvider {
|
|
97
|
+
constructor(config) {
|
|
98
|
+
super();
|
|
99
|
+
this.name = 'ollama';
|
|
100
|
+
this.capabilities = {
|
|
101
|
+
vision: false, // Most Ollama models don't support vision
|
|
102
|
+
streaming: true,
|
|
103
|
+
maxTokens: 8000, // Varies by model
|
|
104
|
+
costPer1MInputTokens: 0, // Free!
|
|
105
|
+
costPer1MOutputTokens: 0, // Free!
|
|
106
|
+
supportsTools: true, // DeepSeek, Llama 4 support function calling
|
|
107
|
+
supportsPromptCaching: false,
|
|
108
|
+
typicalResponseTimeMs: 3000, // ~2-5 seconds on decent hardware
|
|
109
|
+
};
|
|
110
|
+
// SECURITY: Validate and sanitize URL
|
|
111
|
+
const urlValidation = validateOllamaUrl(config.baseUrl);
|
|
112
|
+
if (!urlValidation.valid && urlValidation.warning) {
|
|
113
|
+
console.warn(urlValidation.warning);
|
|
114
|
+
}
|
|
115
|
+
// SECURITY: Validate timeout
|
|
116
|
+
const timeout = validateTimeout(config.timeout);
|
|
117
|
+
// Ollama uses OpenAI-compatible API
|
|
118
|
+
this.client = new openai_1.default({
|
|
119
|
+
baseURL: urlValidation.url,
|
|
120
|
+
apiKey: 'ollama', // Ollama doesn't require real API key
|
|
121
|
+
timeout,
|
|
122
|
+
maxRetries: 0, // Don't retry to avoid hanging on connection issues
|
|
123
|
+
});
|
|
124
|
+
const model = config.model || 'deepseek-r1:7b';
|
|
125
|
+
// SECURITY: Validate model name format
|
|
126
|
+
if (!validateModelName(model)) {
|
|
127
|
+
throw new Error('Invalid model name format');
|
|
128
|
+
}
|
|
129
|
+
this.model = model;
|
|
130
|
+
}
|
|
131
|
+
async generateText(prompt, options) {
|
|
132
|
+
const startTime = Date.now();
|
|
133
|
+
try {
|
|
134
|
+
// SECURITY: Validate prompt length
|
|
135
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
136
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
137
|
+
}
|
|
138
|
+
const messages = [];
|
|
139
|
+
// Add system message if provided
|
|
140
|
+
if (options?.systemPrompt) {
|
|
141
|
+
messages.push({
|
|
142
|
+
role: 'system',
|
|
143
|
+
content: options.systemPrompt,
|
|
144
|
+
});
|
|
145
|
+
}
|
|
146
|
+
// Add user prompt
|
|
147
|
+
messages.push({
|
|
148
|
+
role: 'user',
|
|
149
|
+
content: prompt,
|
|
150
|
+
});
|
|
151
|
+
const response = await (0, provider_utils_js_1.withTimeout)(this.client.chat.completions.create({
|
|
152
|
+
model: this.model,
|
|
153
|
+
messages,
|
|
154
|
+
max_tokens: options?.maxTokens,
|
|
155
|
+
temperature: options?.temperature,
|
|
156
|
+
top_p: options?.topP,
|
|
157
|
+
stop: options?.stopSequences,
|
|
158
|
+
}), options?.timeout, 'generateText');
|
|
159
|
+
const responseTime = Date.now() - startTime;
|
|
160
|
+
const text = response.choices[0]?.message?.content || '';
|
|
161
|
+
const usage = {
|
|
162
|
+
inputTokens: response.usage?.prompt_tokens || 0,
|
|
163
|
+
outputTokens: response.usage?.completion_tokens || 0,
|
|
164
|
+
totalTokens: response.usage?.total_tokens || 0,
|
|
165
|
+
};
|
|
166
|
+
// Update stats
|
|
167
|
+
this.updateStats(usage, responseTime, 0); // Cost is always 0 for Ollama
|
|
168
|
+
return {
|
|
169
|
+
text,
|
|
170
|
+
usage,
|
|
171
|
+
cost: 0, // Free!
|
|
172
|
+
metadata: {
|
|
173
|
+
model: this.model,
|
|
174
|
+
responseTimeMs: responseTime,
|
|
175
|
+
finishReason: response.choices[0]?.finish_reason,
|
|
176
|
+
},
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
catch (error) {
|
|
180
|
+
this.stats.failedRequests++;
|
|
181
|
+
throw new provider_interface_js_1.LLMProviderError((0, provider_utils_js_1.sanitizeErrorMessage)(error, 'generateText'), this.name, undefined, error);
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
/**
|
|
185
|
+
* Ollama does not support vision by default
|
|
186
|
+
* This method throws an error to help users understand the limitation
|
|
187
|
+
*/
|
|
188
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
189
|
+
async analyzeImage(images, prompt, options) {
|
|
190
|
+
throw new provider_interface_js_1.UnsupportedCapabilityError(this.name, 'vision');
|
|
191
|
+
}
|
|
192
|
+
/**
|
|
193
|
+
* Stream text generation for real-time feedback
|
|
194
|
+
*/
|
|
195
|
+
async *streamText(prompt, options) {
|
|
196
|
+
try {
|
|
197
|
+
// SECURITY: Validate prompt length
|
|
198
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
199
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
200
|
+
}
|
|
201
|
+
const messages = [];
|
|
202
|
+
if (options?.systemPrompt) {
|
|
203
|
+
messages.push({
|
|
204
|
+
role: 'system',
|
|
205
|
+
content: options.systemPrompt,
|
|
206
|
+
});
|
|
207
|
+
}
|
|
208
|
+
messages.push({
|
|
209
|
+
role: 'user',
|
|
210
|
+
content: prompt,
|
|
211
|
+
});
|
|
212
|
+
const stream = await (0, provider_utils_js_1.withTimeout)(this.client.chat.completions.create({
|
|
213
|
+
model: this.model,
|
|
214
|
+
messages,
|
|
215
|
+
max_tokens: options?.maxTokens,
|
|
216
|
+
temperature: options?.temperature,
|
|
217
|
+
top_p: options?.topP,
|
|
218
|
+
stop: options?.stopSequences,
|
|
219
|
+
stream: true,
|
|
220
|
+
}), options?.timeout, 'streamText');
|
|
221
|
+
for await (const chunk of stream) {
|
|
222
|
+
const content = chunk.choices[0]?.delta?.content;
|
|
223
|
+
if (content) {
|
|
224
|
+
yield content;
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
// Note: Streaming doesn't provide detailed usage stats
|
|
228
|
+
// We increment request count but can't track exact tokens
|
|
229
|
+
this.stats.requestCount++;
|
|
230
|
+
this.stats.lastUpdated = new Date();
|
|
231
|
+
}
|
|
232
|
+
catch (error) {
|
|
233
|
+
this.stats.failedRequests++;
|
|
234
|
+
throw new provider_interface_js_1.LLMProviderError((0, provider_utils_js_1.sanitizeErrorMessage)(error, 'streamText'), this.name, undefined, error);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
/**
|
|
238
|
+
* Check if Ollama is running and accessible
|
|
239
|
+
*/
|
|
240
|
+
async checkHealth() {
|
|
241
|
+
try {
|
|
242
|
+
// Try a simple request
|
|
243
|
+
await (0, provider_utils_js_1.withTimeout)(this.client.models.list(), 5000, 'health check');
|
|
244
|
+
return {
|
|
245
|
+
healthy: true,
|
|
246
|
+
message: `Ollama is running with model: ${this.model}`,
|
|
247
|
+
};
|
|
248
|
+
}
|
|
249
|
+
catch (error) {
|
|
250
|
+
return {
|
|
251
|
+
healthy: false,
|
|
252
|
+
message: `Ollama not accessible: ${(0, provider_utils_js_1.sanitizeErrorMessage)(error, 'health check')}`,
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
/**
|
|
257
|
+
* List available models in Ollama
|
|
258
|
+
*/
|
|
259
|
+
async listModels() {
|
|
260
|
+
try {
|
|
261
|
+
const response = await (0, provider_utils_js_1.withTimeout)(this.client.models.list(), 5000, 'listModels');
|
|
262
|
+
return response.data.map((model) => model.id);
|
|
263
|
+
}
|
|
264
|
+
catch (error) {
|
|
265
|
+
throw new provider_interface_js_1.LLMProviderError((0, provider_utils_js_1.sanitizeErrorMessage)(error, 'listModels'), this.name, undefined, error);
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
exports.OllamaProvider = OllamaProvider;
|
|
270
|
+
/**
|
|
271
|
+
* Helper function to check if Ollama is installed and suggest setup
|
|
272
|
+
*/
|
|
273
|
+
async function checkOllamaSetup() {
|
|
274
|
+
const provider = new OllamaProvider({});
|
|
275
|
+
try {
|
|
276
|
+
const health = await provider.checkHealth();
|
|
277
|
+
const models = await provider.listModels();
|
|
278
|
+
return {
|
|
279
|
+
installed: true,
|
|
280
|
+
running: health.healthy,
|
|
281
|
+
modelAvailable: models.length > 0,
|
|
282
|
+
setupInstructions: health.healthy ? 'Ollama is ready to use!' : 'Run: ollama serve',
|
|
283
|
+
};
|
|
284
|
+
}
|
|
285
|
+
catch {
|
|
286
|
+
return {
|
|
287
|
+
installed: false,
|
|
288
|
+
running: false,
|
|
289
|
+
modelAvailable: false,
|
|
290
|
+
setupInstructions: `
|
|
291
|
+
Ollama is not installed. To set up:
|
|
292
|
+
|
|
293
|
+
1. Install Ollama:
|
|
294
|
+
curl -fsSL https://ollama.com/install.sh | sh
|
|
295
|
+
|
|
296
|
+
2. Pull a model (choose one):
|
|
297
|
+
ollama pull deepseek-r1:7b # Recommended: Fast, 4GB RAM
|
|
298
|
+
ollama pull deepseek-r1:14b # Better quality, 8GB RAM
|
|
299
|
+
ollama pull llama4:13b # Alternative, 8GB RAM
|
|
300
|
+
|
|
301
|
+
3. Start Ollama:
|
|
302
|
+
ollama serve
|
|
303
|
+
|
|
304
|
+
For more info: https://ollama.com
|
|
305
|
+
`.trim(),
|
|
306
|
+
};
|
|
307
|
+
}
|
|
308
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { GenerateOptions, ImageInput, LLMResponse, OpenAIConfig, ProviderCapabilities } from './provider_interface.js';
|
|
2
|
+
import { BaseProvider } from './base_provider.js';
|
|
3
|
+
export declare class OpenAIProvider extends BaseProvider {
|
|
4
|
+
name: string;
|
|
5
|
+
private client;
|
|
6
|
+
private model;
|
|
7
|
+
capabilities: ProviderCapabilities;
|
|
8
|
+
constructor(config: OpenAIConfig);
|
|
9
|
+
generateText(prompt: string, options?: GenerateOptions): Promise<LLMResponse>;
|
|
10
|
+
analyzeImage(images: ImageInput[], prompt: string, options?: GenerateOptions): Promise<LLMResponse>;
|
|
11
|
+
streamText(prompt: string, options?: GenerateOptions): AsyncGenerator<string, void, unknown>;
|
|
12
|
+
private extractUsage;
|
|
13
|
+
private extractStatusCode;
|
|
14
|
+
checkHealth(): Promise<{
|
|
15
|
+
healthy: boolean;
|
|
16
|
+
message: string;
|
|
17
|
+
}>;
|
|
18
|
+
}
|
|
19
|
+
export declare function checkOpenAISetup(apiKey: string): Promise<{
|
|
20
|
+
valid: boolean;
|
|
21
|
+
message: string;
|
|
22
|
+
}>;
|
|
23
|
+
//# sourceMappingURL=openai_provider.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openai_provider.d.ts","sourceRoot":"","sources":["../src/openai_provider.ts"],"names":[],"mappings":"AAKA,OAAO,KAAK,EACR,eAAe,EACf,UAAU,EACV,WAAW,EACX,YAAY,EACZ,oBAAoB,EACvB,MAAM,yBAAyB,CAAC;AAGjC,OAAO,EAAC,YAAY,EAAC,MAAM,oBAAoB,CAAC;AAehD,qBAAa,cAAe,SAAQ,YAAY;IAC5C,IAAI,SAAY;IAChB,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,KAAK,CAAS;IAEtB,YAAY,EAAE,oBAAoB,CAAC;gBAEvB,MAAM,EAAE,YAAY;IAyC1B,YAAY,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,WAAW,CAAC;IA2D7E,YAAY,CAAC,MAAM,EAAE,UAAU,EAAE,EAAE,MAAM,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,OAAO,CAAC,WAAW,CAAC;IAgGlG,UAAU,CAAC,MAAM,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,eAAe,GAAG,cAAc,CAAC,MAAM,EAAE,IAAI,EAAE,OAAO,CAAC;IA8CnG,OAAO,CAAC,YAAY;IAYpB,OAAO,CAAC,iBAAiB;IAWnB,WAAW,IAAI,OAAO,CAAC;QAAC,OAAO,EAAE,OAAO,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAC,CAAC;CAuBpE;AAED,wBAAsB,gBAAgB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC;IAC5D,KAAK,EAAE,OAAO,CAAC;IACf,OAAO,EAAE,MAAM,CAAC;CACnB,CAAC,CAsBD"}
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
|
3
|
+
// See LICENSE.txt for license information.
|
|
4
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
5
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
6
|
+
};
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.OpenAIProvider = void 0;
|
|
9
|
+
exports.checkOpenAISetup = checkOpenAISetup;
|
|
10
|
+
const openai_1 = __importDefault(require("openai"));
|
|
11
|
+
const provider_interface_js_1 = require("./provider_interface.js");
|
|
12
|
+
const provider_utils_js_1 = require("./provider_utils.js");
|
|
13
|
+
const base_provider_js_1 = require("./base_provider.js");
|
|
14
|
+
const logger_js_1 = require("./logger.js");
|
|
15
|
+
function inferVisionSupport(model) {
|
|
16
|
+
const lower = model.toLowerCase();
|
|
17
|
+
return lower.includes('vision') || lower.includes('4o') || lower.includes('omni');
|
|
18
|
+
}
|
|
19
|
+
class OpenAIProvider extends base_provider_js_1.BaseProvider {
|
|
20
|
+
constructor(config) {
|
|
21
|
+
super();
|
|
22
|
+
this.name = 'openai';
|
|
23
|
+
if (!provider_utils_js_1.API_KEY_PATTERNS.openai.test(config.apiKey)) {
|
|
24
|
+
throw new Error('Invalid API key format. Expected sk-* format.');
|
|
25
|
+
}
|
|
26
|
+
if (config.baseUrl) {
|
|
27
|
+
const validation = (0, provider_utils_js_1.validateAndSanitizeUrl)(config.baseUrl);
|
|
28
|
+
if (!validation.valid) {
|
|
29
|
+
throw new Error(`Invalid base URL: ${validation.warning}`);
|
|
30
|
+
}
|
|
31
|
+
if (validation.warning) {
|
|
32
|
+
logger_js_1.logger.warn(`HTTPS required for remote URLs: ${validation.warning}`);
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
this.client = new openai_1.default({
|
|
36
|
+
apiKey: config.apiKey,
|
|
37
|
+
baseURL: config.baseUrl,
|
|
38
|
+
organization: config.organizationId,
|
|
39
|
+
});
|
|
40
|
+
this.model = config.model || 'gpt-4';
|
|
41
|
+
const maxTokens = config.maxTokens || 128000;
|
|
42
|
+
const costPer1MInputTokens = config.costPer1MInputTokens ?? 0;
|
|
43
|
+
const costPer1MOutputTokens = config.costPer1MOutputTokens ?? 0;
|
|
44
|
+
this.capabilities = {
|
|
45
|
+
vision: inferVisionSupport(this.model),
|
|
46
|
+
streaming: true,
|
|
47
|
+
maxTokens,
|
|
48
|
+
costPer1MInputTokens,
|
|
49
|
+
costPer1MOutputTokens,
|
|
50
|
+
supportsTools: true,
|
|
51
|
+
supportsPromptCaching: false,
|
|
52
|
+
typicalResponseTimeMs: 1200,
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
async generateText(prompt, options) {
|
|
56
|
+
const startTime = Date.now();
|
|
57
|
+
try {
|
|
58
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
59
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
60
|
+
}
|
|
61
|
+
const messages = [];
|
|
62
|
+
if (options?.systemPrompt) {
|
|
63
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
64
|
+
}
|
|
65
|
+
messages.push({ role: 'user', content: prompt });
|
|
66
|
+
const response = await (0, provider_utils_js_1.withTimeout)(this.client.chat.completions.create({
|
|
67
|
+
model: this.model,
|
|
68
|
+
messages,
|
|
69
|
+
max_tokens: options?.maxTokens,
|
|
70
|
+
temperature: options?.temperature,
|
|
71
|
+
top_p: options?.topP,
|
|
72
|
+
stop: options?.stopSequences,
|
|
73
|
+
}), options?.timeout, 'generateText');
|
|
74
|
+
const responseTime = Date.now() - startTime;
|
|
75
|
+
const text = response.choices[0]?.message?.content || '';
|
|
76
|
+
const usage = this.extractUsage(response.usage);
|
|
77
|
+
const cost = this.calculateCost(usage, this.capabilities.costPer1MInputTokens, this.capabilities.costPer1MOutputTokens);
|
|
78
|
+
this.updateStats(usage, responseTime, cost);
|
|
79
|
+
return {
|
|
80
|
+
text,
|
|
81
|
+
usage,
|
|
82
|
+
cost,
|
|
83
|
+
metadata: {
|
|
84
|
+
model: this.model,
|
|
85
|
+
responseTimeMs: responseTime,
|
|
86
|
+
finishReason: response.choices[0]?.finish_reason,
|
|
87
|
+
},
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
catch (error) {
|
|
91
|
+
this.stats.failedRequests++;
|
|
92
|
+
throw new provider_interface_js_1.LLMProviderError((0, provider_utils_js_1.sanitizeErrorMessage)(error, 'generateText'), this.name, this.extractStatusCode(error), error);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
async analyzeImage(images, prompt, options) {
|
|
96
|
+
if (!this.capabilities.vision) {
|
|
97
|
+
throw new provider_interface_js_1.UnsupportedCapabilityError(this.name, 'vision');
|
|
98
|
+
}
|
|
99
|
+
const startTime = Date.now();
|
|
100
|
+
try {
|
|
101
|
+
if (images.length === 0 || images.length > 20) {
|
|
102
|
+
throw new Error('Image count must be between 1 and 20');
|
|
103
|
+
}
|
|
104
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
105
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
106
|
+
}
|
|
107
|
+
const content = [{ type: 'text', text: prompt }];
|
|
108
|
+
for (const image of images) {
|
|
109
|
+
const mediaType = (image.mimeType || image.mediaType || 'image/png');
|
|
110
|
+
if (!['image/png', 'image/jpeg', 'image/webp'].includes(mediaType)) {
|
|
111
|
+
throw new Error(`Unsupported image type: ${mediaType}`);
|
|
112
|
+
}
|
|
113
|
+
const data = image.data || image.base64 || '';
|
|
114
|
+
if (data.length > 20 * 1024 * 1024) {
|
|
115
|
+
throw new Error('Image data exceeds maximum size (20MB)');
|
|
116
|
+
}
|
|
117
|
+
const url = `data:${mediaType};base64,${data}`;
|
|
118
|
+
content.push({ type: 'image_url', image_url: { url } });
|
|
119
|
+
if (image.description) {
|
|
120
|
+
content.push({ type: 'text', text: `[Image: ${image.description}]` });
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
const messages = [];
|
|
124
|
+
if (options?.systemPrompt) {
|
|
125
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
126
|
+
}
|
|
127
|
+
messages.push({ role: 'user', content });
|
|
128
|
+
const response = await (0, provider_utils_js_1.withTimeout)(this.client.chat.completions.create({
|
|
129
|
+
model: this.model,
|
|
130
|
+
messages,
|
|
131
|
+
max_tokens: options?.maxTokens,
|
|
132
|
+
temperature: options?.temperature,
|
|
133
|
+
top_p: options?.topP,
|
|
134
|
+
stop: options?.stopSequences,
|
|
135
|
+
}), options?.timeout, 'analyzeImage');
|
|
136
|
+
const responseTime = Date.now() - startTime;
|
|
137
|
+
const text = response.choices[0]?.message?.content || '';
|
|
138
|
+
const usage = this.extractUsage(response.usage);
|
|
139
|
+
const cost = this.calculateCost(usage, this.capabilities.costPer1MInputTokens, this.capabilities.costPer1MOutputTokens);
|
|
140
|
+
this.updateStats(usage, responseTime, cost);
|
|
141
|
+
return {
|
|
142
|
+
text,
|
|
143
|
+
usage,
|
|
144
|
+
cost,
|
|
145
|
+
metadata: {
|
|
146
|
+
model: this.model,
|
|
147
|
+
responseTimeMs: responseTime,
|
|
148
|
+
finishReason: response.choices[0]?.finish_reason,
|
|
149
|
+
imageCount: images.length,
|
|
150
|
+
},
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
catch (error) {
|
|
154
|
+
this.stats.failedRequests++;
|
|
155
|
+
throw new provider_interface_js_1.LLMProviderError((0, provider_utils_js_1.sanitizeErrorMessage)(error, 'analyzeImage'), this.name, this.extractStatusCode(error), error);
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
async *streamText(prompt, options) {
|
|
159
|
+
try {
|
|
160
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
161
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
162
|
+
}
|
|
163
|
+
const messages = [];
|
|
164
|
+
if (options?.systemPrompt) {
|
|
165
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
166
|
+
}
|
|
167
|
+
messages.push({ role: 'user', content: prompt });
|
|
168
|
+
const stream = await (0, provider_utils_js_1.withTimeout)(this.client.chat.completions.create({
|
|
169
|
+
model: this.model,
|
|
170
|
+
messages,
|
|
171
|
+
max_tokens: options?.maxTokens,
|
|
172
|
+
temperature: options?.temperature,
|
|
173
|
+
top_p: options?.topP,
|
|
174
|
+
stop: options?.stopSequences,
|
|
175
|
+
stream: true,
|
|
176
|
+
}), options?.timeout, 'streamText');
|
|
177
|
+
for await (const chunk of stream) {
|
|
178
|
+
const content = chunk.choices[0]?.delta?.content;
|
|
179
|
+
if (content) {
|
|
180
|
+
yield content;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
this.stats.requestCount++;
|
|
184
|
+
this.stats.lastUpdated = new Date();
|
|
185
|
+
}
|
|
186
|
+
catch (error) {
|
|
187
|
+
this.stats.failedRequests++;
|
|
188
|
+
throw new provider_interface_js_1.LLMProviderError((0, provider_utils_js_1.sanitizeErrorMessage)(error, 'streamText'), this.name, this.extractStatusCode(error), error);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
extractUsage(usage) {
|
|
192
|
+
return {
|
|
193
|
+
inputTokens: usage?.prompt_tokens || 0,
|
|
194
|
+
outputTokens: usage?.completion_tokens || 0,
|
|
195
|
+
totalTokens: usage?.total_tokens || 0,
|
|
196
|
+
};
|
|
197
|
+
}
|
|
198
|
+
extractStatusCode(error) {
|
|
199
|
+
if (error && typeof error === 'object') {
|
|
200
|
+
const err = error;
|
|
201
|
+
const status = err.status;
|
|
202
|
+
if (typeof status === 'number') {
|
|
203
|
+
return status;
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
return undefined;
|
|
207
|
+
}
|
|
208
|
+
async checkHealth() {
|
|
209
|
+
try {
|
|
210
|
+
await (0, provider_utils_js_1.withTimeout)(this.client.chat.completions.create({
|
|
211
|
+
model: this.model,
|
|
212
|
+
max_tokens: 5,
|
|
213
|
+
messages: [{ role: 'user', content: 'Hi' }],
|
|
214
|
+
}), 5000, 'health check');
|
|
215
|
+
return {
|
|
216
|
+
healthy: true,
|
|
217
|
+
message: 'OpenAI API is accessible',
|
|
218
|
+
};
|
|
219
|
+
}
|
|
220
|
+
catch (error) {
|
|
221
|
+
return {
|
|
222
|
+
healthy: false,
|
|
223
|
+
message: `OpenAI API error: ${(0, provider_utils_js_1.sanitizeErrorMessage)(error, 'health check')}`,
|
|
224
|
+
};
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
exports.OpenAIProvider = OpenAIProvider;
|
|
229
|
+
async function checkOpenAISetup(apiKey) {
|
|
230
|
+
if (!apiKey) {
|
|
231
|
+
return {
|
|
232
|
+
valid: false,
|
|
233
|
+
message: 'No API key provided',
|
|
234
|
+
};
|
|
235
|
+
}
|
|
236
|
+
try {
|
|
237
|
+
const provider = new OpenAIProvider({ apiKey });
|
|
238
|
+
const health = await provider.checkHealth();
|
|
239
|
+
return {
|
|
240
|
+
valid: health.healthy,
|
|
241
|
+
message: health.message,
|
|
242
|
+
};
|
|
243
|
+
}
|
|
244
|
+
catch (error) {
|
|
245
|
+
return {
|
|
246
|
+
valid: false,
|
|
247
|
+
message: `Setup check failed: ${(0, provider_utils_js_1.sanitizeErrorMessage)(error, 'setup check')}`,
|
|
248
|
+
};
|
|
249
|
+
}
|
|
250
|
+
}
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Centralized constants for plan-and-test command
|
|
3
|
+
* Eliminates magic strings and hardcoded values
|
|
4
|
+
* Makes configuration easy to modify and maintain
|
|
5
|
+
*/
|
|
6
|
+
export declare const PLANNING_CONFIG: {
|
|
7
|
+
readonly SCENARIO_COUNTS: {
|
|
8
|
+
readonly P0: 3;
|
|
9
|
+
readonly P1: 2;
|
|
10
|
+
readonly P2: 1;
|
|
11
|
+
};
|
|
12
|
+
readonly DEFAULTS: {
|
|
13
|
+
readonly MAX_TESTS: 10;
|
|
14
|
+
readonly COVERAGE_THRESHOLD: 50;
|
|
15
|
+
readonly PRIORITY_FILTER: readonly ["P0", "P1"];
|
|
16
|
+
};
|
|
17
|
+
readonly LIMITS: {
|
|
18
|
+
readonly MIN_MAX_TESTS: 1;
|
|
19
|
+
readonly MAX_MAX_TESTS: 100;
|
|
20
|
+
readonly MIN_COVERAGE_THRESHOLD: 0;
|
|
21
|
+
readonly MAX_COVERAGE_THRESHOLD: 100;
|
|
22
|
+
};
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Display messages for plan-and-test command
|
|
26
|
+
*/
|
|
27
|
+
export declare const PLAN_AND_TEST_MESSAGES: {
|
|
28
|
+
readonly HEADER: {
|
|
29
|
+
readonly MAIN: "🚀 Planning and Generating Tests";
|
|
30
|
+
readonly STEP_1_ANALYSIS: "📊 Step 1: Analyzing Code Changes...";
|
|
31
|
+
readonly STEP_2_PLANNING: "💡 Step 2: Planning Test Generation...";
|
|
32
|
+
readonly STEP_3_GENERATION: "âš¡ Step 3: Generating Tests...";
|
|
33
|
+
readonly STEP_4_SUMMARY: "📈 Generation Summary";
|
|
34
|
+
readonly FOUND_FLOW_GROUPS: (count: number) => string;
|
|
35
|
+
};
|
|
36
|
+
readonly ANALYSIS: {
|
|
37
|
+
readonly FOUND_FLOWS: (count: number) => string;
|
|
38
|
+
readonly MORE_FLOWS: (count: number) => string;
|
|
39
|
+
readonly FOUND_FLOW_GROUPS: (count: number) => string;
|
|
40
|
+
};
|
|
41
|
+
readonly PLANNING: {
|
|
42
|
+
readonly PLAN_CREATED: (count: number) => string;
|
|
43
|
+
readonly SKIPPING_COUNT: (count: number) => string;
|
|
44
|
+
};
|
|
45
|
+
readonly SKIP_REASONS: {
|
|
46
|
+
readonly MAX_LIMIT_REACHED: (limit: number) => string;
|
|
47
|
+
readonly ALREADY_COVERED: (count: number) => string;
|
|
48
|
+
};
|
|
49
|
+
readonly COVERAGE_REASONS: {
|
|
50
|
+
readonly NO_COVERAGE: (priority: string) => string;
|
|
51
|
+
readonly PARTIAL_COVERAGE: (priority: string, gaps: number) => string;
|
|
52
|
+
};
|
|
53
|
+
readonly EXECUTION: {
|
|
54
|
+
readonly TEST_COUNT: (index: number, total: number) => string;
|
|
55
|
+
readonly GENERATION_FAILED: (error: string) => string;
|
|
56
|
+
readonly DRY_RUN_MODE: "📋 DRY RUN: Not executing. Run without --dry-run to generate tests.";
|
|
57
|
+
};
|
|
58
|
+
readonly SUMMARY: {
|
|
59
|
+
readonly SEPARATOR: string;
|
|
60
|
+
readonly TOTAL_GENERATED: (count: number) => string;
|
|
61
|
+
readonly SUCCESSFUL: (successful: number, total: number) => string;
|
|
62
|
+
readonly TOTAL_SCENARIOS: (count: number) => string;
|
|
63
|
+
readonly COMPLETION: "✅ Execution complete!";
|
|
64
|
+
};
|
|
65
|
+
readonly NEXT_STEPS: readonly [" • Run tests: npx playwright test --grep @smoke", " • Re-run impact: npx e2e-ai-agents impact --path <app-root> --tests-root <tests-root>", " • Check coverage: npm run test:impact"];
|
|
66
|
+
readonly ERRORS: {
|
|
67
|
+
readonly INVALID_PRIORITY: (priorities: string[]) => string;
|
|
68
|
+
readonly INVALID_MAX_TESTS: (min: number, max: number) => string;
|
|
69
|
+
readonly INVALID_COVERAGE_THRESHOLD: (min: number, max: number) => string;
|
|
70
|
+
readonly NO_CHANGES_DETECTED: "✓ No significant changes detected";
|
|
71
|
+
};
|
|
72
|
+
};
|
|
73
|
+
/**
|
|
74
|
+
* Priority levels in the system
|
|
75
|
+
*/
|
|
76
|
+
export declare const PRIORITY_LEVELS: {
|
|
77
|
+
readonly CRITICAL: "P0";
|
|
78
|
+
readonly HIGH: "P1";
|
|
79
|
+
readonly MEDIUM: "P2";
|
|
80
|
+
};
|
|
81
|
+
/**
|
|
82
|
+
* Test strategy types for flow groups
|
|
83
|
+
*/
|
|
84
|
+
export declare const TEST_STRATEGIES: {
|
|
85
|
+
readonly SEQUENTIAL: "sequential";
|
|
86
|
+
readonly PARALLEL: "parallel";
|
|
87
|
+
readonly MIXED: "mixed";
|
|
88
|
+
};
|
|
89
|
+
/**
|
|
90
|
+
* Flow group types
|
|
91
|
+
*/
|
|
92
|
+
export declare const FLOW_GROUP_TYPES: {
|
|
93
|
+
readonly MESSAGING_LIFECYCLE: "messaging-lifecycle";
|
|
94
|
+
readonly CHANNEL_MANAGEMENT: "channel-management";
|
|
95
|
+
readonly MESSAGING_INTERACTIONS: "messaging-interactions";
|
|
96
|
+
};
|
|
97
|
+
/**
|
|
98
|
+
* Utility function to get scenario count for a priority
|
|
99
|
+
* Provides type-safe access to scenario counts
|
|
100
|
+
*/
|
|
101
|
+
export declare function getScenarioCount(priority: string): number;
|
|
102
|
+
/**
|
|
103
|
+
* Utility function to validate priority level
|
|
104
|
+
*/
|
|
105
|
+
export declare function isValidPriority(priority: string): boolean;
|
|
106
|
+
/**
|
|
107
|
+
* Utility function to get all valid priority levels
|
|
108
|
+
*/
|
|
109
|
+
export declare function getValidPriorities(): string[];
|
|
110
|
+
//# sourceMappingURL=plan-and-test-constants.d.ts.map
|