@yasserkhanorg/e2e-agents 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +168 -0
- package/README.md +620 -0
- package/dist/agent/analysis.d.ts +62 -0
- package/dist/agent/analysis.d.ts.map +1 -0
- package/dist/agent/analysis.js +292 -0
- package/dist/agent/blast_radius.d.ts +4 -0
- package/dist/agent/blast_radius.d.ts.map +1 -0
- package/dist/agent/blast_radius.js +37 -0
- package/dist/agent/cache_utils.d.ts +38 -0
- package/dist/agent/cache_utils.d.ts.map +1 -0
- package/dist/agent/cache_utils.js +67 -0
- package/dist/agent/config.d.ts +148 -0
- package/dist/agent/config.d.ts.map +1 -0
- package/dist/agent/config.js +640 -0
- package/dist/agent/dependency_graph.d.ts +14 -0
- package/dist/agent/dependency_graph.d.ts.map +1 -0
- package/dist/agent/dependency_graph.js +227 -0
- package/dist/agent/feedback.d.ts +55 -0
- package/dist/agent/feedback.d.ts.map +1 -0
- package/dist/agent/feedback.js +257 -0
- package/dist/agent/flags.d.ts +23 -0
- package/dist/agent/flags.d.ts.map +1 -0
- package/dist/agent/flags.js +171 -0
- package/dist/agent/flow_catalog.d.ts +25 -0
- package/dist/agent/flow_catalog.d.ts.map +1 -0
- package/dist/agent/flow_catalog.js +106 -0
- package/dist/agent/flow_mapping.d.ts +10 -0
- package/dist/agent/flow_mapping.d.ts.map +1 -0
- package/dist/agent/flow_mapping.js +84 -0
- package/dist/agent/framework.d.ts +13 -0
- package/dist/agent/framework.d.ts.map +1 -0
- package/dist/agent/framework.js +149 -0
- package/dist/agent/gap_suggestions.d.ts +14 -0
- package/dist/agent/gap_suggestions.d.ts.map +1 -0
- package/dist/agent/gap_suggestions.js +101 -0
- package/dist/agent/generator.d.ts +10 -0
- package/dist/agent/generator.d.ts.map +1 -0
- package/dist/agent/generator.js +115 -0
- package/dist/agent/git.d.ts +11 -0
- package/dist/agent/git.d.ts.map +1 -0
- package/dist/agent/git.js +90 -0
- package/dist/agent/handoff.d.ts +22 -0
- package/dist/agent/handoff.d.ts.map +1 -0
- package/dist/agent/handoff.js +180 -0
- package/dist/agent/impact-analyzer.d.ts +114 -0
- package/dist/agent/impact-analyzer.d.ts.map +1 -0
- package/dist/agent/impact-analyzer.js +557 -0
- package/dist/agent/index.d.ts +21 -0
- package/dist/agent/index.d.ts.map +1 -0
- package/dist/agent/index.js +38 -0
- package/dist/agent/model-router.d.ts +57 -0
- package/dist/agent/model-router.d.ts.map +1 -0
- package/dist/agent/model-router.js +154 -0
- package/dist/agent/operational_insights.d.ts +41 -0
- package/dist/agent/operational_insights.d.ts.map +1 -0
- package/dist/agent/operational_insights.js +126 -0
- package/dist/agent/pipeline.d.ts +23 -0
- package/dist/agent/pipeline.d.ts.map +1 -0
- package/dist/agent/pipeline.js +609 -0
- package/dist/agent/plan.d.ts +91 -0
- package/dist/agent/plan.d.ts.map +1 -0
- package/dist/agent/plan.js +331 -0
- package/dist/agent/playwright_report.d.ts +8 -0
- package/dist/agent/playwright_report.d.ts.map +1 -0
- package/dist/agent/playwright_report.js +126 -0
- package/dist/agent/report-generator.d.ts +24 -0
- package/dist/agent/report-generator.d.ts.map +1 -0
- package/dist/agent/report-generator.js +250 -0
- package/dist/agent/report.d.ts +81 -0
- package/dist/agent/report.d.ts.map +1 -0
- package/dist/agent/report.js +147 -0
- package/dist/agent/runner.d.ts +7 -0
- package/dist/agent/runner.d.ts.map +1 -0
- package/dist/agent/runner.js +576 -0
- package/dist/agent/selectors.d.ts +10 -0
- package/dist/agent/selectors.d.ts.map +1 -0
- package/dist/agent/selectors.js +75 -0
- package/dist/agent/spec-bridge.d.ts +101 -0
- package/dist/agent/spec-bridge.d.ts.map +1 -0
- package/dist/agent/spec-bridge.js +273 -0
- package/dist/agent/spec-builder.d.ts +102 -0
- package/dist/agent/spec-builder.d.ts.map +1 -0
- package/dist/agent/spec-builder.js +273 -0
- package/dist/agent/subsystem_risk.d.ts +23 -0
- package/dist/agent/subsystem_risk.d.ts.map +1 -0
- package/dist/agent/subsystem_risk.js +207 -0
- package/dist/agent/telemetry.d.ts +84 -0
- package/dist/agent/telemetry.d.ts.map +1 -0
- package/dist/agent/telemetry.js +220 -0
- package/dist/agent/test_path.d.ts +2 -0
- package/dist/agent/test_path.d.ts.map +1 -0
- package/dist/agent/test_path.js +23 -0
- package/dist/agent/tests.d.ts +18 -0
- package/dist/agent/tests.d.ts.map +1 -0
- package/dist/agent/tests.js +106 -0
- package/dist/agent/traceability.d.ts +22 -0
- package/dist/agent/traceability.d.ts.map +1 -0
- package/dist/agent/traceability.js +183 -0
- package/dist/agent/traceability_capture.d.ts +18 -0
- package/dist/agent/traceability_capture.d.ts.map +1 -0
- package/dist/agent/traceability_capture.js +313 -0
- package/dist/agent/traceability_ingest.d.ts +21 -0
- package/dist/agent/traceability_ingest.d.ts.map +1 -0
- package/dist/agent/traceability_ingest.js +237 -0
- package/dist/agent/utils.d.ts +13 -0
- package/dist/agent/utils.d.ts.map +1 -0
- package/dist/agent/utils.js +152 -0
- package/dist/agent/validators/selector-validator.d.ts +74 -0
- package/dist/agent/validators/selector-validator.d.ts.map +1 -0
- package/dist/agent/validators/selector-validator.js +165 -0
- package/dist/anthropic_provider.d.ts +65 -0
- package/dist/anthropic_provider.d.ts.map +1 -0
- package/dist/anthropic_provider.js +332 -0
- package/dist/api.d.ts +48 -0
- package/dist/api.d.ts.map +1 -0
- package/dist/api.js +113 -0
- package/dist/base_provider.d.ts +53 -0
- package/dist/base_provider.d.ts.map +1 -0
- package/dist/base_provider.js +81 -0
- package/dist/cli.d.ts +3 -0
- package/dist/cli.d.ts.map +1 -0
- package/dist/cli.js +843 -0
- package/dist/custom_provider.d.ts +20 -0
- package/dist/custom_provider.d.ts.map +1 -0
- package/dist/custom_provider.js +276 -0
- package/dist/e2e-test-gen/index.d.ts +51 -0
- package/dist/e2e-test-gen/index.d.ts.map +1 -0
- package/dist/e2e-test-gen/index.js +57 -0
- package/dist/e2e-test-gen/spec_parser.d.ts +142 -0
- package/dist/e2e-test-gen/spec_parser.d.ts.map +1 -0
- package/dist/e2e-test-gen/spec_parser.js +786 -0
- package/dist/e2e-test-gen/types.d.ts +185 -0
- package/dist/e2e-test-gen/types.d.ts.map +1 -0
- package/dist/e2e-test-gen/types.js +4 -0
- package/dist/esm/agent/analysis.js +287 -0
- package/dist/esm/agent/blast_radius.js +34 -0
- package/dist/esm/agent/cache_utils.js +63 -0
- package/dist/esm/agent/config.js +637 -0
- package/dist/esm/agent/dependency_graph.js +224 -0
- package/dist/esm/agent/feedback.js +253 -0
- package/dist/esm/agent/flags.js +160 -0
- package/dist/esm/agent/flow_catalog.js +103 -0
- package/dist/esm/agent/flow_mapping.js +81 -0
- package/dist/esm/agent/framework.js +145 -0
- package/dist/esm/agent/gap_suggestions.js +98 -0
- package/dist/esm/agent/generator.js +112 -0
- package/dist/esm/agent/git.js +87 -0
- package/dist/esm/agent/handoff.js +177 -0
- package/dist/esm/agent/impact-analyzer.js +548 -0
- package/dist/esm/agent/index.js +22 -0
- package/dist/esm/agent/model-router.js +150 -0
- package/dist/esm/agent/operational_insights.js +123 -0
- package/dist/esm/agent/pipeline.js +605 -0
- package/dist/esm/agent/plan.js +324 -0
- package/dist/esm/agent/playwright_report.js +123 -0
- package/dist/esm/agent/report-generator.js +247 -0
- package/dist/esm/agent/report.js +144 -0
- package/dist/esm/agent/runner.js +572 -0
- package/dist/esm/agent/selectors.js +71 -0
- package/dist/esm/agent/spec-bridge.js +267 -0
- package/dist/esm/agent/spec-builder.js +267 -0
- package/dist/esm/agent/subsystem_risk.js +204 -0
- package/dist/esm/agent/telemetry.js +216 -0
- package/dist/esm/agent/test_path.js +20 -0
- package/dist/esm/agent/tests.js +101 -0
- package/dist/esm/agent/traceability.js +180 -0
- package/dist/esm/agent/traceability_capture.js +310 -0
- package/dist/esm/agent/traceability_ingest.js +234 -0
- package/dist/esm/agent/utils.js +138 -0
- package/dist/esm/agent/validators/selector-validator.js +160 -0
- package/dist/esm/anthropic_provider.js +324 -0
- package/dist/esm/api.js +105 -0
- package/dist/esm/base_provider.js +77 -0
- package/dist/esm/cli.js +841 -0
- package/dist/esm/custom_provider.js +272 -0
- package/dist/esm/e2e-test-gen/index.js +50 -0
- package/dist/esm/e2e-test-gen/spec_parser.js +782 -0
- package/dist/esm/e2e-test-gen/types.js +3 -0
- package/dist/esm/index.js +16 -0
- package/dist/esm/logger.js +89 -0
- package/dist/esm/mcp-server.js +465 -0
- package/dist/esm/ollama_provider.js +300 -0
- package/dist/esm/openai_provider.js +242 -0
- package/dist/esm/package.json +3 -0
- package/dist/esm/plan-and-test-constants.js +126 -0
- package/dist/esm/provider_factory.js +336 -0
- package/dist/esm/provider_interface.js +23 -0
- package/dist/esm/provider_utils.js +96 -0
- package/dist/index.d.ts +31 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +41 -0
- package/dist/logger.d.ts +23 -0
- package/dist/logger.d.ts.map +1 -0
- package/dist/logger.js +93 -0
- package/dist/mcp-server.d.ts +35 -0
- package/dist/mcp-server.d.ts.map +1 -0
- package/dist/mcp-server.js +469 -0
- package/dist/ollama_provider.d.ts +65 -0
- package/dist/ollama_provider.d.ts.map +1 -0
- package/dist/ollama_provider.js +308 -0
- package/dist/openai_provider.d.ts +23 -0
- package/dist/openai_provider.d.ts.map +1 -0
- package/dist/openai_provider.js +250 -0
- package/dist/plan-and-test-constants.d.ts +110 -0
- package/dist/plan-and-test-constants.d.ts.map +1 -0
- package/dist/plan-and-test-constants.js +132 -0
- package/dist/provider_factory.d.ts +99 -0
- package/dist/provider_factory.d.ts.map +1 -0
- package/dist/provider_factory.js +341 -0
- package/dist/provider_interface.d.ts +358 -0
- package/dist/provider_interface.d.ts.map +1 -0
- package/dist/provider_interface.js +28 -0
- package/dist/provider_utils.d.ts +39 -0
- package/dist/provider_utils.d.ts.map +1 -0
- package/dist/provider_utils.js +103 -0
- package/package.json +101 -0
- package/schemas/gap.schema.json +18 -0
- package/schemas/impact.schema.json +418 -0
- package/schemas/plan.schema.json +285 -0
- package/schemas/subsystem-risk-map.schema.json +62 -0
- package/schemas/traceability-input.schema.json +122 -0
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
|
2
|
+
// See LICENSE.txt for license information.
|
|
3
|
+
import OpenAI from 'openai';
|
|
4
|
+
import { LLMProviderError, UnsupportedCapabilityError } from './provider_interface.js';
|
|
5
|
+
import { sanitizeErrorMessage, withTimeout } from './provider_utils.js';
|
|
6
|
+
import { BaseProvider } from './base_provider.js';
|
|
7
|
+
import { logger } from './logger.js';
|
|
8
|
+
/**
|
|
9
|
+
* SECURITY: Validate Ollama base URL and enforce HTTPS for remote connections
|
|
10
|
+
*/
|
|
11
|
+
function normalizeOllamaBaseUrl(baseUrl) {
|
|
12
|
+
const raw = baseUrl || 'http://localhost:11434';
|
|
13
|
+
try {
|
|
14
|
+
const parsed = new URL(raw);
|
|
15
|
+
if (!parsed.pathname || parsed.pathname === '/') {
|
|
16
|
+
parsed.pathname = '/v1';
|
|
17
|
+
}
|
|
18
|
+
return parsed.toString().replace(/\/$/, '');
|
|
19
|
+
}
|
|
20
|
+
catch {
|
|
21
|
+
return 'http://localhost:11434/v1';
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
function validateOllamaUrl(baseUrl) {
|
|
25
|
+
const url = normalizeOllamaBaseUrl(baseUrl);
|
|
26
|
+
try {
|
|
27
|
+
const parsed = new URL(url);
|
|
28
|
+
// For non-localhost URLs, warn about HTTP risks
|
|
29
|
+
const isLocalhost = parsed.hostname === 'localhost' || parsed.hostname === '127.0.0.1' || parsed.hostname === '::1';
|
|
30
|
+
if (!isLocalhost && parsed.protocol === 'http:') {
|
|
31
|
+
logger.warn('Ollama connection over plaintext HTTP to remote server. ' +
|
|
32
|
+
'Prompts and responses will be transmitted unencrypted. Consider using HTTPS proxy or local Ollama.');
|
|
33
|
+
}
|
|
34
|
+
return { valid: true, url };
|
|
35
|
+
}
|
|
36
|
+
catch {
|
|
37
|
+
return {
|
|
38
|
+
valid: false,
|
|
39
|
+
url: 'http://localhost:11434/v1',
|
|
40
|
+
warning: `Invalid Ollama URL: ${baseUrl}. Using default: http://localhost:11434/v1`,
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* SECURITY: Validate model name to prevent injection issues
|
|
46
|
+
*/
|
|
47
|
+
function validateModelName(model) {
|
|
48
|
+
// Allow alphanumeric, dash, colon, underscore
|
|
49
|
+
// Typical format: deepseek-r1:7b, llama4:13b, etc.
|
|
50
|
+
return /^[a-z0-9_:.\-]+$/i.test(model) && model.length < 256;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* SECURITY: Validate timeout value
|
|
54
|
+
*/
|
|
55
|
+
function validateTimeout(timeout) {
|
|
56
|
+
if (!timeout)
|
|
57
|
+
return 60000;
|
|
58
|
+
if (timeout < 1000 || timeout > 600000) {
|
|
59
|
+
logger.warn('Timeout out of valid range (1s-10m). Using 60 second default.');
|
|
60
|
+
return 60000;
|
|
61
|
+
}
|
|
62
|
+
return timeout;
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Ollama Provider - Free, local LLM execution
|
|
66
|
+
*
|
|
67
|
+
* Features:
|
|
68
|
+
* - Zero cost (runs locally)
|
|
69
|
+
* - Full privacy (no data leaves your machine)
|
|
70
|
+
* - OpenAI-compatible API
|
|
71
|
+
* - Supports DeepSeek-R1, Llama 4, and other open models
|
|
72
|
+
*
|
|
73
|
+
* Limitations:
|
|
74
|
+
* - No vision support (most models)
|
|
75
|
+
* - Slower inference than cloud APIs (~2-5 sec vs <1 sec)
|
|
76
|
+
* - Requires local installation and model downloads
|
|
77
|
+
*
|
|
78
|
+
* Recommended models:
|
|
79
|
+
* - deepseek-r1:7b - Fast, good quality, low memory (4GB)
|
|
80
|
+
* - deepseek-r1:14b - Better quality, medium memory (8GB)
|
|
81
|
+
* - llama4:13b - High quality, medium memory (8GB)
|
|
82
|
+
* - deepseek-r1:7b-q4 - Quantized for speed, lower quality
|
|
83
|
+
*
|
|
84
|
+
* Setup:
|
|
85
|
+
* 1. Install Ollama: curl -fsSL https://ollama.com/install.sh | sh
|
|
86
|
+
* 2. Pull model: ollama pull deepseek-r1:7b
|
|
87
|
+
* 3. Start: ollama serve (runs on localhost:11434)
|
|
88
|
+
*/
|
|
89
|
+
export class OllamaProvider extends BaseProvider {
|
|
90
|
+
constructor(config) {
|
|
91
|
+
super();
|
|
92
|
+
this.name = 'ollama';
|
|
93
|
+
this.capabilities = {
|
|
94
|
+
vision: false, // Most Ollama models don't support vision
|
|
95
|
+
streaming: true,
|
|
96
|
+
maxTokens: 8000, // Varies by model
|
|
97
|
+
costPer1MInputTokens: 0, // Free!
|
|
98
|
+
costPer1MOutputTokens: 0, // Free!
|
|
99
|
+
supportsTools: true, // DeepSeek, Llama 4 support function calling
|
|
100
|
+
supportsPromptCaching: false,
|
|
101
|
+
typicalResponseTimeMs: 3000, // ~2-5 seconds on decent hardware
|
|
102
|
+
};
|
|
103
|
+
// SECURITY: Validate and sanitize URL
|
|
104
|
+
const urlValidation = validateOllamaUrl(config.baseUrl);
|
|
105
|
+
if (!urlValidation.valid && urlValidation.warning) {
|
|
106
|
+
console.warn(urlValidation.warning);
|
|
107
|
+
}
|
|
108
|
+
// SECURITY: Validate timeout
|
|
109
|
+
const timeout = validateTimeout(config.timeout);
|
|
110
|
+
// Ollama uses OpenAI-compatible API
|
|
111
|
+
this.client = new OpenAI({
|
|
112
|
+
baseURL: urlValidation.url,
|
|
113
|
+
apiKey: 'ollama', // Ollama doesn't require real API key
|
|
114
|
+
timeout,
|
|
115
|
+
maxRetries: 0, // Don't retry to avoid hanging on connection issues
|
|
116
|
+
});
|
|
117
|
+
const model = config.model || 'deepseek-r1:7b';
|
|
118
|
+
// SECURITY: Validate model name format
|
|
119
|
+
if (!validateModelName(model)) {
|
|
120
|
+
throw new Error('Invalid model name format');
|
|
121
|
+
}
|
|
122
|
+
this.model = model;
|
|
123
|
+
}
|
|
124
|
+
async generateText(prompt, options) {
|
|
125
|
+
const startTime = Date.now();
|
|
126
|
+
try {
|
|
127
|
+
// SECURITY: Validate prompt length
|
|
128
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
129
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
130
|
+
}
|
|
131
|
+
const messages = [];
|
|
132
|
+
// Add system message if provided
|
|
133
|
+
if (options?.systemPrompt) {
|
|
134
|
+
messages.push({
|
|
135
|
+
role: 'system',
|
|
136
|
+
content: options.systemPrompt,
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
// Add user prompt
|
|
140
|
+
messages.push({
|
|
141
|
+
role: 'user',
|
|
142
|
+
content: prompt,
|
|
143
|
+
});
|
|
144
|
+
const response = await withTimeout(this.client.chat.completions.create({
|
|
145
|
+
model: this.model,
|
|
146
|
+
messages,
|
|
147
|
+
max_tokens: options?.maxTokens,
|
|
148
|
+
temperature: options?.temperature,
|
|
149
|
+
top_p: options?.topP,
|
|
150
|
+
stop: options?.stopSequences,
|
|
151
|
+
}), options?.timeout, 'generateText');
|
|
152
|
+
const responseTime = Date.now() - startTime;
|
|
153
|
+
const text = response.choices[0]?.message?.content || '';
|
|
154
|
+
const usage = {
|
|
155
|
+
inputTokens: response.usage?.prompt_tokens || 0,
|
|
156
|
+
outputTokens: response.usage?.completion_tokens || 0,
|
|
157
|
+
totalTokens: response.usage?.total_tokens || 0,
|
|
158
|
+
};
|
|
159
|
+
// Update stats
|
|
160
|
+
this.updateStats(usage, responseTime, 0); // Cost is always 0 for Ollama
|
|
161
|
+
return {
|
|
162
|
+
text,
|
|
163
|
+
usage,
|
|
164
|
+
cost: 0, // Free!
|
|
165
|
+
metadata: {
|
|
166
|
+
model: this.model,
|
|
167
|
+
responseTimeMs: responseTime,
|
|
168
|
+
finishReason: response.choices[0]?.finish_reason,
|
|
169
|
+
},
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
catch (error) {
|
|
173
|
+
this.stats.failedRequests++;
|
|
174
|
+
throw new LLMProviderError(sanitizeErrorMessage(error, 'generateText'), this.name, undefined, error);
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
/**
|
|
178
|
+
* Ollama does not support vision by default
|
|
179
|
+
* This method throws an error to help users understand the limitation
|
|
180
|
+
*/
|
|
181
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
182
|
+
async analyzeImage(images, prompt, options) {
|
|
183
|
+
throw new UnsupportedCapabilityError(this.name, 'vision');
|
|
184
|
+
}
|
|
185
|
+
/**
|
|
186
|
+
* Stream text generation for real-time feedback
|
|
187
|
+
*/
|
|
188
|
+
async *streamText(prompt, options) {
|
|
189
|
+
try {
|
|
190
|
+
// SECURITY: Validate prompt length
|
|
191
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
192
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
193
|
+
}
|
|
194
|
+
const messages = [];
|
|
195
|
+
if (options?.systemPrompt) {
|
|
196
|
+
messages.push({
|
|
197
|
+
role: 'system',
|
|
198
|
+
content: options.systemPrompt,
|
|
199
|
+
});
|
|
200
|
+
}
|
|
201
|
+
messages.push({
|
|
202
|
+
role: 'user',
|
|
203
|
+
content: prompt,
|
|
204
|
+
});
|
|
205
|
+
const stream = await withTimeout(this.client.chat.completions.create({
|
|
206
|
+
model: this.model,
|
|
207
|
+
messages,
|
|
208
|
+
max_tokens: options?.maxTokens,
|
|
209
|
+
temperature: options?.temperature,
|
|
210
|
+
top_p: options?.topP,
|
|
211
|
+
stop: options?.stopSequences,
|
|
212
|
+
stream: true,
|
|
213
|
+
}), options?.timeout, 'streamText');
|
|
214
|
+
for await (const chunk of stream) {
|
|
215
|
+
const content = chunk.choices[0]?.delta?.content;
|
|
216
|
+
if (content) {
|
|
217
|
+
yield content;
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
// Note: Streaming doesn't provide detailed usage stats
|
|
221
|
+
// We increment request count but can't track exact tokens
|
|
222
|
+
this.stats.requestCount++;
|
|
223
|
+
this.stats.lastUpdated = new Date();
|
|
224
|
+
}
|
|
225
|
+
catch (error) {
|
|
226
|
+
this.stats.failedRequests++;
|
|
227
|
+
throw new LLMProviderError(sanitizeErrorMessage(error, 'streamText'), this.name, undefined, error);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
/**
|
|
231
|
+
* Check if Ollama is running and accessible
|
|
232
|
+
*/
|
|
233
|
+
async checkHealth() {
|
|
234
|
+
try {
|
|
235
|
+
// Try a simple request
|
|
236
|
+
await withTimeout(this.client.models.list(), 5000, 'health check');
|
|
237
|
+
return {
|
|
238
|
+
healthy: true,
|
|
239
|
+
message: `Ollama is running with model: ${this.model}`,
|
|
240
|
+
};
|
|
241
|
+
}
|
|
242
|
+
catch (error) {
|
|
243
|
+
return {
|
|
244
|
+
healthy: false,
|
|
245
|
+
message: `Ollama not accessible: ${sanitizeErrorMessage(error, 'health check')}`,
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
/**
|
|
250
|
+
* List available models in Ollama
|
|
251
|
+
*/
|
|
252
|
+
async listModels() {
|
|
253
|
+
try {
|
|
254
|
+
const response = await withTimeout(this.client.models.list(), 5000, 'listModels');
|
|
255
|
+
return response.data.map((model) => model.id);
|
|
256
|
+
}
|
|
257
|
+
catch (error) {
|
|
258
|
+
throw new LLMProviderError(sanitizeErrorMessage(error, 'listModels'), this.name, undefined, error);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
/**
|
|
263
|
+
* Helper function to check if Ollama is installed and suggest setup
|
|
264
|
+
*/
|
|
265
|
+
export async function checkOllamaSetup() {
|
|
266
|
+
const provider = new OllamaProvider({});
|
|
267
|
+
try {
|
|
268
|
+
const health = await provider.checkHealth();
|
|
269
|
+
const models = await provider.listModels();
|
|
270
|
+
return {
|
|
271
|
+
installed: true,
|
|
272
|
+
running: health.healthy,
|
|
273
|
+
modelAvailable: models.length > 0,
|
|
274
|
+
setupInstructions: health.healthy ? 'Ollama is ready to use!' : 'Run: ollama serve',
|
|
275
|
+
};
|
|
276
|
+
}
|
|
277
|
+
catch {
|
|
278
|
+
return {
|
|
279
|
+
installed: false,
|
|
280
|
+
running: false,
|
|
281
|
+
modelAvailable: false,
|
|
282
|
+
setupInstructions: `
|
|
283
|
+
Ollama is not installed. To set up:
|
|
284
|
+
|
|
285
|
+
1. Install Ollama:
|
|
286
|
+
curl -fsSL https://ollama.com/install.sh | sh
|
|
287
|
+
|
|
288
|
+
2. Pull a model (choose one):
|
|
289
|
+
ollama pull deepseek-r1:7b # Recommended: Fast, 4GB RAM
|
|
290
|
+
ollama pull deepseek-r1:14b # Better quality, 8GB RAM
|
|
291
|
+
ollama pull llama4:13b # Alternative, 8GB RAM
|
|
292
|
+
|
|
293
|
+
3. Start Ollama:
|
|
294
|
+
ollama serve
|
|
295
|
+
|
|
296
|
+
For more info: https://ollama.com
|
|
297
|
+
`.trim(),
|
|
298
|
+
};
|
|
299
|
+
}
|
|
300
|
+
}
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
|
2
|
+
// See LICENSE.txt for license information.
|
|
3
|
+
import OpenAI from 'openai';
|
|
4
|
+
import { LLMProviderError, UnsupportedCapabilityError } from './provider_interface.js';
|
|
5
|
+
import { API_KEY_PATTERNS, sanitizeErrorMessage, withTimeout, validateAndSanitizeUrl } from './provider_utils.js';
|
|
6
|
+
import { BaseProvider } from './base_provider.js';
|
|
7
|
+
import { logger } from './logger.js';
|
|
8
|
+
function inferVisionSupport(model) {
|
|
9
|
+
const lower = model.toLowerCase();
|
|
10
|
+
return lower.includes('vision') || lower.includes('4o') || lower.includes('omni');
|
|
11
|
+
}
|
|
12
|
+
export class OpenAIProvider extends BaseProvider {
|
|
13
|
+
constructor(config) {
|
|
14
|
+
super();
|
|
15
|
+
this.name = 'openai';
|
|
16
|
+
if (!API_KEY_PATTERNS.openai.test(config.apiKey)) {
|
|
17
|
+
throw new Error('Invalid API key format. Expected sk-* format.');
|
|
18
|
+
}
|
|
19
|
+
if (config.baseUrl) {
|
|
20
|
+
const validation = validateAndSanitizeUrl(config.baseUrl);
|
|
21
|
+
if (!validation.valid) {
|
|
22
|
+
throw new Error(`Invalid base URL: ${validation.warning}`);
|
|
23
|
+
}
|
|
24
|
+
if (validation.warning) {
|
|
25
|
+
logger.warn(`HTTPS required for remote URLs: ${validation.warning}`);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
this.client = new OpenAI({
|
|
29
|
+
apiKey: config.apiKey,
|
|
30
|
+
baseURL: config.baseUrl,
|
|
31
|
+
organization: config.organizationId,
|
|
32
|
+
});
|
|
33
|
+
this.model = config.model || 'gpt-4';
|
|
34
|
+
const maxTokens = config.maxTokens || 128000;
|
|
35
|
+
const costPer1MInputTokens = config.costPer1MInputTokens ?? 0;
|
|
36
|
+
const costPer1MOutputTokens = config.costPer1MOutputTokens ?? 0;
|
|
37
|
+
this.capabilities = {
|
|
38
|
+
vision: inferVisionSupport(this.model),
|
|
39
|
+
streaming: true,
|
|
40
|
+
maxTokens,
|
|
41
|
+
costPer1MInputTokens,
|
|
42
|
+
costPer1MOutputTokens,
|
|
43
|
+
supportsTools: true,
|
|
44
|
+
supportsPromptCaching: false,
|
|
45
|
+
typicalResponseTimeMs: 1200,
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
async generateText(prompt, options) {
|
|
49
|
+
const startTime = Date.now();
|
|
50
|
+
try {
|
|
51
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
52
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
53
|
+
}
|
|
54
|
+
const messages = [];
|
|
55
|
+
if (options?.systemPrompt) {
|
|
56
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
57
|
+
}
|
|
58
|
+
messages.push({ role: 'user', content: prompt });
|
|
59
|
+
const response = await withTimeout(this.client.chat.completions.create({
|
|
60
|
+
model: this.model,
|
|
61
|
+
messages,
|
|
62
|
+
max_tokens: options?.maxTokens,
|
|
63
|
+
temperature: options?.temperature,
|
|
64
|
+
top_p: options?.topP,
|
|
65
|
+
stop: options?.stopSequences,
|
|
66
|
+
}), options?.timeout, 'generateText');
|
|
67
|
+
const responseTime = Date.now() - startTime;
|
|
68
|
+
const text = response.choices[0]?.message?.content || '';
|
|
69
|
+
const usage = this.extractUsage(response.usage);
|
|
70
|
+
const cost = this.calculateCost(usage, this.capabilities.costPer1MInputTokens, this.capabilities.costPer1MOutputTokens);
|
|
71
|
+
this.updateStats(usage, responseTime, cost);
|
|
72
|
+
return {
|
|
73
|
+
text,
|
|
74
|
+
usage,
|
|
75
|
+
cost,
|
|
76
|
+
metadata: {
|
|
77
|
+
model: this.model,
|
|
78
|
+
responseTimeMs: responseTime,
|
|
79
|
+
finishReason: response.choices[0]?.finish_reason,
|
|
80
|
+
},
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
catch (error) {
|
|
84
|
+
this.stats.failedRequests++;
|
|
85
|
+
throw new LLMProviderError(sanitizeErrorMessage(error, 'generateText'), this.name, this.extractStatusCode(error), error);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
async analyzeImage(images, prompt, options) {
|
|
89
|
+
if (!this.capabilities.vision) {
|
|
90
|
+
throw new UnsupportedCapabilityError(this.name, 'vision');
|
|
91
|
+
}
|
|
92
|
+
const startTime = Date.now();
|
|
93
|
+
try {
|
|
94
|
+
if (images.length === 0 || images.length > 20) {
|
|
95
|
+
throw new Error('Image count must be between 1 and 20');
|
|
96
|
+
}
|
|
97
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
98
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
99
|
+
}
|
|
100
|
+
const content = [{ type: 'text', text: prompt }];
|
|
101
|
+
for (const image of images) {
|
|
102
|
+
const mediaType = (image.mimeType || image.mediaType || 'image/png');
|
|
103
|
+
if (!['image/png', 'image/jpeg', 'image/webp'].includes(mediaType)) {
|
|
104
|
+
throw new Error(`Unsupported image type: ${mediaType}`);
|
|
105
|
+
}
|
|
106
|
+
const data = image.data || image.base64 || '';
|
|
107
|
+
if (data.length > 20 * 1024 * 1024) {
|
|
108
|
+
throw new Error('Image data exceeds maximum size (20MB)');
|
|
109
|
+
}
|
|
110
|
+
const url = `data:${mediaType};base64,${data}`;
|
|
111
|
+
content.push({ type: 'image_url', image_url: { url } });
|
|
112
|
+
if (image.description) {
|
|
113
|
+
content.push({ type: 'text', text: `[Image: ${image.description}]` });
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
const messages = [];
|
|
117
|
+
if (options?.systemPrompt) {
|
|
118
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
119
|
+
}
|
|
120
|
+
messages.push({ role: 'user', content });
|
|
121
|
+
const response = await withTimeout(this.client.chat.completions.create({
|
|
122
|
+
model: this.model,
|
|
123
|
+
messages,
|
|
124
|
+
max_tokens: options?.maxTokens,
|
|
125
|
+
temperature: options?.temperature,
|
|
126
|
+
top_p: options?.topP,
|
|
127
|
+
stop: options?.stopSequences,
|
|
128
|
+
}), options?.timeout, 'analyzeImage');
|
|
129
|
+
const responseTime = Date.now() - startTime;
|
|
130
|
+
const text = response.choices[0]?.message?.content || '';
|
|
131
|
+
const usage = this.extractUsage(response.usage);
|
|
132
|
+
const cost = this.calculateCost(usage, this.capabilities.costPer1MInputTokens, this.capabilities.costPer1MOutputTokens);
|
|
133
|
+
this.updateStats(usage, responseTime, cost);
|
|
134
|
+
return {
|
|
135
|
+
text,
|
|
136
|
+
usage,
|
|
137
|
+
cost,
|
|
138
|
+
metadata: {
|
|
139
|
+
model: this.model,
|
|
140
|
+
responseTimeMs: responseTime,
|
|
141
|
+
finishReason: response.choices[0]?.finish_reason,
|
|
142
|
+
imageCount: images.length,
|
|
143
|
+
},
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
catch (error) {
|
|
147
|
+
this.stats.failedRequests++;
|
|
148
|
+
throw new LLMProviderError(sanitizeErrorMessage(error, 'analyzeImage'), this.name, this.extractStatusCode(error), error);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
async *streamText(prompt, options) {
|
|
152
|
+
try {
|
|
153
|
+
if (prompt.length > 10 * 1024 * 1024) {
|
|
154
|
+
throw new Error('Prompt exceeds maximum size (10MB)');
|
|
155
|
+
}
|
|
156
|
+
const messages = [];
|
|
157
|
+
if (options?.systemPrompt) {
|
|
158
|
+
messages.push({ role: 'system', content: options.systemPrompt });
|
|
159
|
+
}
|
|
160
|
+
messages.push({ role: 'user', content: prompt });
|
|
161
|
+
const stream = await withTimeout(this.client.chat.completions.create({
|
|
162
|
+
model: this.model,
|
|
163
|
+
messages,
|
|
164
|
+
max_tokens: options?.maxTokens,
|
|
165
|
+
temperature: options?.temperature,
|
|
166
|
+
top_p: options?.topP,
|
|
167
|
+
stop: options?.stopSequences,
|
|
168
|
+
stream: true,
|
|
169
|
+
}), options?.timeout, 'streamText');
|
|
170
|
+
for await (const chunk of stream) {
|
|
171
|
+
const content = chunk.choices[0]?.delta?.content;
|
|
172
|
+
if (content) {
|
|
173
|
+
yield content;
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
this.stats.requestCount++;
|
|
177
|
+
this.stats.lastUpdated = new Date();
|
|
178
|
+
}
|
|
179
|
+
catch (error) {
|
|
180
|
+
this.stats.failedRequests++;
|
|
181
|
+
throw new LLMProviderError(sanitizeErrorMessage(error, 'streamText'), this.name, this.extractStatusCode(error), error);
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
extractUsage(usage) {
|
|
185
|
+
return {
|
|
186
|
+
inputTokens: usage?.prompt_tokens || 0,
|
|
187
|
+
outputTokens: usage?.completion_tokens || 0,
|
|
188
|
+
totalTokens: usage?.total_tokens || 0,
|
|
189
|
+
};
|
|
190
|
+
}
|
|
191
|
+
extractStatusCode(error) {
|
|
192
|
+
if (error && typeof error === 'object') {
|
|
193
|
+
const err = error;
|
|
194
|
+
const status = err.status;
|
|
195
|
+
if (typeof status === 'number') {
|
|
196
|
+
return status;
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
return undefined;
|
|
200
|
+
}
|
|
201
|
+
async checkHealth() {
|
|
202
|
+
try {
|
|
203
|
+
await withTimeout(this.client.chat.completions.create({
|
|
204
|
+
model: this.model,
|
|
205
|
+
max_tokens: 5,
|
|
206
|
+
messages: [{ role: 'user', content: 'Hi' }],
|
|
207
|
+
}), 5000, 'health check');
|
|
208
|
+
return {
|
|
209
|
+
healthy: true,
|
|
210
|
+
message: 'OpenAI API is accessible',
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
catch (error) {
|
|
214
|
+
return {
|
|
215
|
+
healthy: false,
|
|
216
|
+
message: `OpenAI API error: ${sanitizeErrorMessage(error, 'health check')}`,
|
|
217
|
+
};
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
export async function checkOpenAISetup(apiKey) {
|
|
222
|
+
if (!apiKey) {
|
|
223
|
+
return {
|
|
224
|
+
valid: false,
|
|
225
|
+
message: 'No API key provided',
|
|
226
|
+
};
|
|
227
|
+
}
|
|
228
|
+
try {
|
|
229
|
+
const provider = new OpenAIProvider({ apiKey });
|
|
230
|
+
const health = await provider.checkHealth();
|
|
231
|
+
return {
|
|
232
|
+
valid: health.healthy,
|
|
233
|
+
message: health.message,
|
|
234
|
+
};
|
|
235
|
+
}
|
|
236
|
+
catch (error) {
|
|
237
|
+
return {
|
|
238
|
+
valid: false,
|
|
239
|
+
message: `Setup check failed: ${sanitizeErrorMessage(error, 'setup check')}`,
|
|
240
|
+
};
|
|
241
|
+
}
|
|
242
|
+
}
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
|
2
|
+
// See LICENSE.txt for license information.
|
|
3
|
+
/**
|
|
4
|
+
* Centralized constants for plan-and-test command
|
|
5
|
+
* Eliminates magic strings and hardcoded values
|
|
6
|
+
* Makes configuration easy to modify and maintain
|
|
7
|
+
*/
|
|
8
|
+
export const PLANNING_CONFIG = {
|
|
9
|
+
// Number of test scenarios per priority level
|
|
10
|
+
SCENARIO_COUNTS: {
|
|
11
|
+
P0: 3,
|
|
12
|
+
P1: 2,
|
|
13
|
+
P2: 1,
|
|
14
|
+
},
|
|
15
|
+
// Default parameter values
|
|
16
|
+
DEFAULTS: {
|
|
17
|
+
MAX_TESTS: 10,
|
|
18
|
+
COVERAGE_THRESHOLD: 50,
|
|
19
|
+
PRIORITY_FILTER: ['P0', 'P1'],
|
|
20
|
+
},
|
|
21
|
+
// Limit constraints
|
|
22
|
+
LIMITS: {
|
|
23
|
+
MIN_MAX_TESTS: 1,
|
|
24
|
+
MAX_MAX_TESTS: 100,
|
|
25
|
+
MIN_COVERAGE_THRESHOLD: 0,
|
|
26
|
+
MAX_COVERAGE_THRESHOLD: 100,
|
|
27
|
+
},
|
|
28
|
+
};
|
|
29
|
+
/**
|
|
30
|
+
* Display messages for plan-and-test command
|
|
31
|
+
*/
|
|
32
|
+
export const PLAN_AND_TEST_MESSAGES = {
|
|
33
|
+
HEADER: {
|
|
34
|
+
MAIN: '🚀 Planning and Generating Tests',
|
|
35
|
+
STEP_1_ANALYSIS: '📊 Step 1: Analyzing Code Changes...',
|
|
36
|
+
STEP_2_PLANNING: '💡 Step 2: Planning Test Generation...',
|
|
37
|
+
STEP_3_GENERATION: '⚡ Step 3: Generating Tests...',
|
|
38
|
+
STEP_4_SUMMARY: '📈 Generation Summary',
|
|
39
|
+
FOUND_FLOW_GROUPS: (count) => `📦 Found ${count} flow groups (end-to-end journeys):`,
|
|
40
|
+
},
|
|
41
|
+
ANALYSIS: {
|
|
42
|
+
FOUND_FLOWS: (count) => `✓ Found ${count} affected flows`,
|
|
43
|
+
MORE_FLOWS: (count) => ` ... and ${count} more`,
|
|
44
|
+
FOUND_FLOW_GROUPS: (count) => `📦 Found ${count} flow groups (end-to-end journeys)`,
|
|
45
|
+
},
|
|
46
|
+
PLANNING: {
|
|
47
|
+
PLAN_CREATED: (count) => `✓ Plan created: ${count} flows to test`,
|
|
48
|
+
SKIPPING_COUNT: (count) => `⊘ Skipping ${count} flows:`,
|
|
49
|
+
},
|
|
50
|
+
SKIP_REASONS: {
|
|
51
|
+
MAX_LIMIT_REACHED: (limit) => `Max tests limit reached (${limit})`,
|
|
52
|
+
ALREADY_COVERED: (count) => `Already covered (${count} existing tests)`,
|
|
53
|
+
},
|
|
54
|
+
COVERAGE_REASONS: {
|
|
55
|
+
NO_COVERAGE: (priority) => `${priority} - no coverage`,
|
|
56
|
+
PARTIAL_COVERAGE: (priority, gaps) => `${priority} - partial coverage (${gaps} gaps)`,
|
|
57
|
+
},
|
|
58
|
+
EXECUTION: {
|
|
59
|
+
TEST_COUNT: (index, total) => `[${index}/${total}]`,
|
|
60
|
+
GENERATION_FAILED: (error) => ` ⚠️ Generation failed: ${error}`,
|
|
61
|
+
DRY_RUN_MODE: '📋 DRY RUN: Not executing. Run without --dry-run to generate tests.',
|
|
62
|
+
},
|
|
63
|
+
SUMMARY: {
|
|
64
|
+
SEPARATOR: '═'.repeat(50),
|
|
65
|
+
TOTAL_GENERATED: (count) => `Total Tests Generated: ${count}`,
|
|
66
|
+
SUCCESSFUL: (successful, total) => `Successful: ${successful}/${total}`,
|
|
67
|
+
TOTAL_SCENARIOS: (count) => `Total Scenarios: ${count}`,
|
|
68
|
+
COMPLETION: '✅ Execution complete!',
|
|
69
|
+
},
|
|
70
|
+
NEXT_STEPS: [
|
|
71
|
+
' • Run tests: npx playwright test --grep @smoke',
|
|
72
|
+
' • Re-run impact: npx e2e-ai-agents impact --path <app-root> --tests-root <tests-root>',
|
|
73
|
+
' • Check coverage: npm run test:impact',
|
|
74
|
+
],
|
|
75
|
+
ERRORS: {
|
|
76
|
+
INVALID_PRIORITY: (priorities) => `Invalid priority levels: ${priorities.join(', ')}`,
|
|
77
|
+
INVALID_MAX_TESTS: (min, max) => `maxTests must be between ${min} and ${max}`,
|
|
78
|
+
INVALID_COVERAGE_THRESHOLD: (min, max) => `coverageThreshold must be between ${min} and ${max}`,
|
|
79
|
+
NO_CHANGES_DETECTED: '✓ No significant changes detected',
|
|
80
|
+
},
|
|
81
|
+
};
|
|
82
|
+
/**
|
|
83
|
+
* Priority levels in the system
|
|
84
|
+
*/
|
|
85
|
+
export const PRIORITY_LEVELS = {
|
|
86
|
+
CRITICAL: 'P0',
|
|
87
|
+
HIGH: 'P1',
|
|
88
|
+
MEDIUM: 'P2',
|
|
89
|
+
};
|
|
90
|
+
/**
|
|
91
|
+
* Test strategy types for flow groups
|
|
92
|
+
*/
|
|
93
|
+
export const TEST_STRATEGIES = {
|
|
94
|
+
SEQUENTIAL: 'sequential',
|
|
95
|
+
PARALLEL: 'parallel',
|
|
96
|
+
MIXED: 'mixed',
|
|
97
|
+
};
|
|
98
|
+
/**
|
|
99
|
+
* Flow group types
|
|
100
|
+
*/
|
|
101
|
+
export const FLOW_GROUP_TYPES = {
|
|
102
|
+
MESSAGING_LIFECYCLE: 'messaging-lifecycle',
|
|
103
|
+
CHANNEL_MANAGEMENT: 'channel-management',
|
|
104
|
+
MESSAGING_INTERACTIONS: 'messaging-interactions',
|
|
105
|
+
};
|
|
106
|
+
/**
|
|
107
|
+
* Utility function to get scenario count for a priority
|
|
108
|
+
* Provides type-safe access to scenario counts
|
|
109
|
+
*/
|
|
110
|
+
export function getScenarioCount(priority) {
|
|
111
|
+
const normalizedPriority = priority.toUpperCase();
|
|
112
|
+
return (PLANNING_CONFIG.SCENARIO_COUNTS[normalizedPriority] || PLANNING_CONFIG.SCENARIO_COUNTS.P1);
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Utility function to validate priority level
|
|
116
|
+
*/
|
|
117
|
+
export function isValidPriority(priority) {
|
|
118
|
+
const validPriorities = Object.values(PRIORITY_LEVELS);
|
|
119
|
+
return validPriorities.includes(priority.toUpperCase());
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* Utility function to get all valid priority levels
|
|
123
|
+
*/
|
|
124
|
+
export function getValidPriorities() {
|
|
125
|
+
return Object.values(PRIORITY_LEVELS);
|
|
126
|
+
}
|