artes 1.6.4 โ 1.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +37 -2
- package/cucumber.config.js +19 -0
- package/docs/aiProviders.md +193 -0
- package/executer.js +26 -2
- package/package.json +1 -1
- package/src/helper/controller/aiBugReporter.js +349 -0
- package/src/helper/controller/aiProvider.js +162 -0
- package/src/helper/controller/getEnvInfo.js +8 -0
- package/src/helper/controller/testCoverageCalculator.js +1 -1
- package/src/helper/executers/exporter.js +2 -1
- package/src/helper/executers/helper.js +35 -1
- package/src/helper/executers/projectCreator.js +9 -0
- package/src/hooks/hooks.js +23 -3
package/README.md
CHANGED
|
@@ -87,9 +87,19 @@ Artes is a test runner for Playwright that executes [predefined Cucumber tests](
|
|
|
87
87
|
### ๐งช CLI, CI/CD & Containerization
|
|
88
88
|
|
|
89
89
|
- Powerful CLI for full control from the command line
|
|
90
|
+
- Quality gate support โ set a minimum success rate threshold to automatically fail the pipeline when test results drop below the required percentage
|
|
90
91
|
- Official [Artes Docker image](https://hub.docker.com/r/vahidaghayev/artes) for seamless containerized execution
|
|
91
92
|
- CI/CD-ready โ integrate easily with any pipeline
|
|
92
93
|
|
|
94
|
+
### ๐ค AI-Powered Bug Reports & Test Summaries
|
|
95
|
+
|
|
96
|
+
- Automatically generates professional bug reports for failed test cases
|
|
97
|
+
- Generates concise test summaries for passed scenarios
|
|
98
|
+
- Supports multiple AI providers โ OpenAI, Google Gemini, Anthropic Claude, Mistral, Groq, Cohere, DeepSeek (see [Supported AI Providers](docs/aiProviders.md) for full model list and setup)
|
|
99
|
+
- Works with local AI models (Ollama, LM Studio) โ no API key required
|
|
100
|
+
- Multi-language report generation โ produce reports in any language
|
|
101
|
+
- Configurable report cap to control API usage and costs
|
|
102
|
+
|
|
93
103
|
### ๐ Artes Reporting System
|
|
94
104
|
|
|
95
105
|
- Easy installation with docker compose (For detailed info: [Artes Reporting System](https://github.com/4gayev1/artes-reporting-system))
|
|
@@ -173,6 +183,19 @@ npx artes [options]
|
|
|
173
183
|
| โฑ๏ธ `--timeout` | Set timeout for each test step in seconds (default is 30 seconds) | `artes --timeout 10` |
|
|
174
184
|
| ๐ข `--slowMo` | Slow down text execution for clear view (default: 0 seconds) | `artes --slowMo 1` |
|
|
175
185
|
|
|
186
|
+
### AI Options
|
|
187
|
+
|
|
188
|
+
| Option | Description | Usage Example |
|
|
189
|
+
| --- | --- | --- |
|
|
190
|
+
| ๐ค `--ai` | Enable AI-generated bug reports and test summaries | `artes --ai` |
|
|
191
|
+
| ๐ง `--aiModel` | AI model to use for report generation | `artes --ai --aiModel "gemini 2.5 flash"` |
|
|
192
|
+
| ๐ `--aiKey` | API key for the selected AI provider | `artes --ai --aiKey "your-api-key"` |
|
|
193
|
+
| ๐ `--aiURL` | Local AI endpoint URL (e.g. Ollama, LM Studio). Overrides `--aiModel` and `--aiKey` when set | `artes --ai --aiURL "http://localhost:11434/api/chat"` |
|
|
194
|
+
| ๐ `--aiLanguage` | Language for AI-generated reports (default: `"English"`) | `artes --ai --aiLanguage "Azerbaijani"` |
|
|
195
|
+
| ๐ข --maxTokens | Maximum tokens for AI response output (default: 4000) | artes --ai --maxTokens 8000 |
|
|
196
|
+
| ๐ `--maxReports` | Maximum number of AI reports to generate per test run (default: `10`) | `artes --ai --maxReports 5` |
|
|
197
|
+
|
|
198
|
+
|
|
176
199
|
\*\* To just run the tests: <br>
|
|
177
200
|
Globally: artes <br>
|
|
178
201
|
Locally: npx artes
|
|
@@ -566,6 +589,20 @@ You can configure Artes by editing the `artes.config.js` file. Below are the def
|
|
|
566
589
|
|
|
567
590
|
---
|
|
568
591
|
|
|
592
|
+
## ๐ค AI Configuration
|
|
593
|
+
|
|
594
|
+
| **Option** | **Default Value** | **Description** |
|
|
595
|
+
| -------------------- | ----------------- | ---------------------------------------------------------------------------------------------------- |
|
|
596
|
+
| `ai.ai` | `false` | Enable AI-generated bug reports and test summaries. |
|
|
597
|
+
| `ai.model` | `"gpt-4o"` | AI model to use for report generation (e.g. `"gemini 2.5 flash"`, `"claude sonnet"`) |
|
|
598
|
+
| `ai.key` | `""` | API key for the selected AI provider. |
|
|
599
|
+
| `ai.url` | `""` | Local AI endpoint URL (e.g. Ollama, LM Studio). Overrides `model` and `key` when set. |
|
|
600
|
+
| `ai.language` | `"English"` | Language for AI-generated reports (e.g. `"Azerbaijani"`, `"German"`). |
|
|
601
|
+
| ai.maxTokens | 4000 | Maximum tokens for AI response output |
|
|
602
|
+
| `ai.maxReports` | `10` | Maximum number of AI reports to generate per test run. |
|
|
603
|
+
|
|
604
|
+
---
|
|
605
|
+
|
|
569
606
|
## ๐ Environment Configuration
|
|
570
607
|
|
|
571
608
|
| **Option** | **Default Value** | **Description** |
|
|
@@ -660,8 +697,6 @@ Artes can generate Allure reports. After running tests with the `-r` flag, the r
|
|
|
660
697
|
|
|
661
698
|
Artes has a built-in integration with the Artes Reporting System. By configuring the options below, you can automatically upload your test reports and keep your pipeline stages clean and organized.
|
|
662
699
|
|
|
663
|
-
*Note: report and zip should be true for report uploading*
|
|
664
|
-
|
|
665
700
|
| **Option** | **Default Value** | **Description** |
|
|
666
701
|
| ---------------- | ----------------------------- | ---------------------------------------------------------------------- |
|
|
667
702
|
| `uploadReport` | `false` | Automatically upload the report to Artes Reporting System after tests. |
|
package/cucumber.config.js
CHANGED
|
@@ -191,6 +191,25 @@ module.exports = {
|
|
|
191
191
|
: false,
|
|
192
192
|
zip: process.env.ZIP == "true" ? true : artesConfig.zip ? true : false,
|
|
193
193
|
},
|
|
194
|
+
ai:{
|
|
195
|
+
ai: process.env.AI ? process.env.AI : artesConfig?.ai?.ai || false,
|
|
196
|
+
url: process.env.AI_URL ? process.env.AI_URL : artesConfig?.ai?.url || "",
|
|
197
|
+
model: process.env.AI_MODEL
|
|
198
|
+
? process.env.AI_MODEL
|
|
199
|
+
: artesConfig?.ai?.model || "gpt-4o",
|
|
200
|
+
key: process.env.AI_KEY
|
|
201
|
+
? process.env.AI_KEY
|
|
202
|
+
: artesConfig?.ai?.key || "",
|
|
203
|
+
language: process.env.AI_LANGUAGE
|
|
204
|
+
? process.env.AI_LANGUAGE
|
|
205
|
+
: artesConfig?.ai?.language || "English",
|
|
206
|
+
maxTokens: process.env.MAX_TOKENS
|
|
207
|
+
? parseInt(process.env.MAX_TOKENS)
|
|
208
|
+
: artesConfig?.ai?.maxTokens || 4000,
|
|
209
|
+
maxReports: process.env.MAX_REPORTS
|
|
210
|
+
? parseInt(process.env.MAX_REPORTS)
|
|
211
|
+
: artesConfig?.ai?.maxReports || 10,
|
|
212
|
+
},
|
|
194
213
|
env: env,
|
|
195
214
|
variables: loadVariables(process.env.VARS, artesConfig.variables),
|
|
196
215
|
baseURL: process.env.BASE_URL
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
# ๐ค AI Providers
|
|
2
|
+
|
|
3
|
+
Artes supports multiple AI providers for generating bug reports and test summaries.
|
|
4
|
+
Configure the provider via `--aiModel` flag or the `ai.model` field in artes config file.
|
|
5
|
+
Use the given exact model name in cli and config file(Just copy and paste the model name).
|
|
6
|
+
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
## Supported Providers
|
|
10
|
+
|
|
11
|
+
### ๐ต Google Gemini
|
|
12
|
+
|
|
13
|
+
**Flag keyword:** `gemini`
|
|
14
|
+
**Get API key:** https://aistudio.google.com/app/apikey
|
|
15
|
+
|
|
16
|
+
| Model Name | `--aiModel` value |
|
|
17
|
+
|--------------------|-------------------------|
|
|
18
|
+
| Gemini 2.5 Flash โญ | `"gemini 2.5 flash"` |
|
|
19
|
+
| Gemini 2.5 Flash Lite | `"gemini 2.5 flash lite"` |
|
|
20
|
+
| Gemini 2.5 Pro | `"gemini 2.5 pro"` |
|
|
21
|
+
| Gemini 2.0 Flash | `"gemini 2.0 flash"` |
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
artes --ai --aiModel "gemini 2.5 flash" --aiKey "your-key"
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
### ๐ข OpenAI
|
|
30
|
+
|
|
31
|
+
**Flag keyword:** `openai`, `chatgpt`, `gpt`
|
|
32
|
+
**Get API key:** https://platform.openai.com/api-keys
|
|
33
|
+
|
|
34
|
+
| Model Name | `--aiModel` value |
|
|
35
|
+
|-----------------|---------------------|
|
|
36
|
+
| GPT-4o โญ | `"gpt-4o"` |
|
|
37
|
+
| GPT-4o Mini | `"gpt-4o mini"` |
|
|
38
|
+
| GPT-4 Turbo | `"gpt-4 turbo"` |
|
|
39
|
+
| GPT-4 | `"gpt-4"` |
|
|
40
|
+
| GPT-3.5 Turbo | `"gpt-3.5 turbo"` |
|
|
41
|
+
| o1 Mini | `"o1 mini"` |
|
|
42
|
+
| o1 | `"o1"` |
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
artes --ai --aiModel "gpt-4o" --aiKey "your-key"
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
---
|
|
49
|
+
|
|
50
|
+
### ๐ Anthropic Claude
|
|
51
|
+
|
|
52
|
+
**Flag keyword:** `claude`, `anthropic`
|
|
53
|
+
**Get API key:** https://console.anthropic.com/settings/keys
|
|
54
|
+
|
|
55
|
+
| Model Name | `--aiModel` value |
|
|
56
|
+
|------------------|----------------------|
|
|
57
|
+
| Claude Sonnet โญ | `"claude sonnet"` |
|
|
58
|
+
| Claude Sonnet 4 | `"claude sonnet 4"` |
|
|
59
|
+
| Claude Opus | `"claude opus"` |
|
|
60
|
+
| Claude Opus 4 | `"claude opus 4"` |
|
|
61
|
+
| Claude Haiku | `"claude haiku"` |
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
artes --ai --aiModel "claude sonnet" --aiKey "your-key"
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
---
|
|
68
|
+
|
|
69
|
+
### ๐ด Mistral
|
|
70
|
+
|
|
71
|
+
**Flag keyword:** `mistral`
|
|
72
|
+
**Get API key:** https://console.mistral.ai/api-keys
|
|
73
|
+
|
|
74
|
+
| Model Name | `--aiModel` value |
|
|
75
|
+
|-----------------|-----------------------|
|
|
76
|
+
| Mistral Large โญ | `"mistral large"` |
|
|
77
|
+
| Mistral Medium | `"mistral medium"` |
|
|
78
|
+
| Mistral Small | `"mistral small"` |
|
|
79
|
+
| Mistral Nemo | `"mistral nemo"` |
|
|
80
|
+
| Mistral 7B | `"mistral 7b"` |
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
artes --ai --aiModel "mistral large" --aiKey "your-key"
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
### โก Groq
|
|
89
|
+
|
|
90
|
+
**Flag keyword:** `groq`
|
|
91
|
+
**Get API key:** https://console.groq.com/keys
|
|
92
|
+
|
|
93
|
+
| Model Name | `--aiModel` value |
|
|
94
|
+
|-------------------|-------------------------|
|
|
95
|
+
| LLaMA 3 70B โญ | `"groq llama 3 70b"` |
|
|
96
|
+
| LLaMA 3 8B | `"groq llama 3 8b"` |
|
|
97
|
+
| Mixtral 8x7B | `"groq mixtral"` |
|
|
98
|
+
| Gemma 7B | `"groq gemma 7b"` |
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
artes --ai --aiModel "groq llama 3 70b" --aiKey "your-key"
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
### ๐ท Cohere
|
|
107
|
+
|
|
108
|
+
**Flag keyword:** `cohere`, `command`
|
|
109
|
+
**Get API key:** https://dashboard.cohere.com/api-keys
|
|
110
|
+
|
|
111
|
+
| Model Name | `--aiModel` value |
|
|
112
|
+
|-----------------|----------------------|
|
|
113
|
+
| Command R+ โญ | `"cohere r+"` |
|
|
114
|
+
| Command R | `"cohere r"` |
|
|
115
|
+
|
|
116
|
+
```bash
|
|
117
|
+
artes --ai --aiModel "cohere r+" --aiKey "your-key"
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
---
|
|
121
|
+
|
|
122
|
+
### ๐ DeepSeek
|
|
123
|
+
|
|
124
|
+
**Flag keyword:** `deepseek`
|
|
125
|
+
**Get API key:** https://platform.deepseek.com/api_keys
|
|
126
|
+
|
|
127
|
+
| Model Name | `--aiModel` value |
|
|
128
|
+
|------------------|-------------------------|
|
|
129
|
+
| DeepSeek Chat โญ | `"deepseek chat"` |
|
|
130
|
+
| DeepSeek Coder | `"deepseek coder"` |
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
artes --ai --aiModel "deepseek chat" --aiKey "your-key"
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
### ๐ Local AI (Ollama, LM Studio, etc.)
|
|
139
|
+
|
|
140
|
+
Run any local model without an API key using `--aiURL` instead of `--aiModel` and `--aiKey`.
|
|
141
|
+
The endpoint must be OpenAI-compatible (accepts `messages` array in the request body).
|
|
142
|
+
|
|
143
|
+
```bash
|
|
144
|
+
# Ollama
|
|
145
|
+
artes --ai --aiURL "http://localhost:11434/api/chat"
|
|
146
|
+
|
|
147
|
+
# LM Studio
|
|
148
|
+
artes --ai --aiURL "http://localhost:1234/v1/chat/completions"
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
---
|
|
152
|
+
|
|
153
|
+
## Config File Usage
|
|
154
|
+
|
|
155
|
+
Instead of CLI flags, you can set AI options in your `artes.config.js`:
|
|
156
|
+
|
|
157
|
+
```js
|
|
158
|
+
module.exports = {
|
|
159
|
+
ai: {
|
|
160
|
+
ai : true,
|
|
161
|
+
model : "gemini 2.5 flash",
|
|
162
|
+
key : "your-api-key",
|
|
163
|
+
language : "English",
|
|
164
|
+
maxReports: 10,
|
|
165
|
+
|
|
166
|
+
// For local AI โ overrides model and key when set:
|
|
167
|
+
// url: "http://localhost:11434/api/chat",
|
|
168
|
+
},
|
|
169
|
+
};
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
---
|
|
173
|
+
|
|
174
|
+
## Environment Variables
|
|
175
|
+
|
|
176
|
+
All AI options can also be set via environment variables:
|
|
177
|
+
|
|
178
|
+
| Variable | Description |
|
|
179
|
+
|-----------------|------------------------------------------|
|
|
180
|
+
| `AI` | Enable AI reports (`true` / `false`) |
|
|
181
|
+
| `AI_MODEL` | Model string (e.g. `"gemini 2.5 flash"`) |
|
|
182
|
+
| `AI_KEY` | API key for the provider |
|
|
183
|
+
| `AI_URL` | Local AI endpoint URL |
|
|
184
|
+
| `AI_LANGUAGE` | Report language (e.g. `"Azerbaijani"`) |
|
|
185
|
+
| `MAX_REPORTS` | Max reports per run (e.g. `10`) |
|
|
186
|
+
|
|
187
|
+
```bash
|
|
188
|
+
AI=true AI_MODEL="gpt-4o" AI_KEY="your-key" npx artes
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
---
|
|
192
|
+
|
|
193
|
+
> โญ = default model for that provider
|
package/executer.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
const {
|
|
3
3
|
showHelp,
|
|
4
|
+
showAIHelp,
|
|
4
5
|
showVersion,
|
|
5
6
|
createProject,
|
|
6
7
|
runTests,
|
|
@@ -20,7 +21,6 @@ const {
|
|
|
20
21
|
const { getEnvInfo } = require("artes/src/helper/controller/getEnvInfo");
|
|
21
22
|
const { uploadReport } = require("./src/helper/controller/reportUploader");
|
|
22
23
|
|
|
23
|
-
|
|
24
24
|
const artesConfigPath = path.resolve(process.cwd(), "artes.config.js");
|
|
25
25
|
|
|
26
26
|
let artesConfig = {};
|
|
@@ -56,6 +56,14 @@ const flags = {
|
|
|
56
56
|
projectName: args.includes("--projectName"),
|
|
57
57
|
projectType: args.includes("--projectType"),
|
|
58
58
|
reportPath: args.includes("--reportPath"),
|
|
59
|
+
aiHelp: args.includes("--aiHelp"),
|
|
60
|
+
ai: args.includes("--ai"),
|
|
61
|
+
aiURL: args.includes("--aiURL"),
|
|
62
|
+
aiModel: args.includes("--aiModel"),
|
|
63
|
+
aiKey: args.includes("--aiKey"),
|
|
64
|
+
aiLanguage: args.includes("--aiLanguage"),
|
|
65
|
+
maxTokens: args.includes("--maxTokens"),
|
|
66
|
+
maxReports: args.includes("--maxReports"),
|
|
59
67
|
features: args.includes("--features"),
|
|
60
68
|
stepDef: args.includes("--stepDef"),
|
|
61
69
|
tags: args.includes("--tags"),
|
|
@@ -87,6 +95,13 @@ const reportPath = getArgValue("--reportPath");
|
|
|
87
95
|
const logo = getArgValue("--logo");
|
|
88
96
|
const brandName = getArgValue("--brandName");
|
|
89
97
|
const reportName = getArgValue("--reportName");
|
|
98
|
+
const ai = args.includes("--ai");
|
|
99
|
+
const aiURL = getArgValue("--aiURL");
|
|
100
|
+
const aiModel = getArgValue("--aiModel");
|
|
101
|
+
const aiKey = getArgValue("--aiKey");
|
|
102
|
+
const aiLanguage = getArgValue("--aiLanguage");
|
|
103
|
+
const maxTokens = getArgValue("--maxTokens")
|
|
104
|
+
const maxReports = getArgValue("--maxReports");
|
|
90
105
|
const featureFiles = getArgValue("--features");
|
|
91
106
|
const features = flags.features && featureFiles;
|
|
92
107
|
const stepDef = getArgValue("--stepDef");
|
|
@@ -145,6 +160,14 @@ flags.singleFileReport
|
|
|
145
160
|
|
|
146
161
|
flags.zip ? (process.env.ZIP = true) : (process.env.ZIP = false);
|
|
147
162
|
|
|
163
|
+
flags.ai ? (process.env.AI = ai) : (process.env.AI = false);
|
|
164
|
+
flags.aiURL ? (process.env.AI_URL = aiURL) : "";
|
|
165
|
+
flags.aiModel ? (process.env.AI_MODEL = aiModel) : "";
|
|
166
|
+
flags.aiKey ? (process.env.AI_KEY = aiKey) : "";
|
|
167
|
+
flags.aiLanguage ? (process.env.AI_LANGUAGE = aiLanguage) : "";
|
|
168
|
+
flags.maxTokens ? (process.env.MAX_TOKENS = maxTokens) : "";
|
|
169
|
+
flags.maxReports ? (process.env.MAX_REPORTS = maxReports) : "";
|
|
170
|
+
|
|
148
171
|
flags.headless &&
|
|
149
172
|
console.log("Running mode:", flags.headless ? "headless" : "headed");
|
|
150
173
|
flags.headless ? (process.env.MODE = JSON.stringify(true)) : false;
|
|
@@ -186,6 +209,7 @@ flags.slowMo ? (process.env.SLOWMO = slowMo) : "";
|
|
|
186
209
|
|
|
187
210
|
async function main() {
|
|
188
211
|
if (flags.help) return showHelp();
|
|
212
|
+
if (flags.aiHelp) return showAIHelp();
|
|
189
213
|
if (flags.version) return showVersion();
|
|
190
214
|
if (flags.create) return createProject(flags.createYes, flags.noDeps);
|
|
191
215
|
|
|
@@ -193,7 +217,7 @@ async function main() {
|
|
|
193
217
|
|
|
194
218
|
const testCoverage = testCoverageCalculator();
|
|
195
219
|
|
|
196
|
-
if (testCoverage
|
|
220
|
+
if (testCoverage.totalTests === 0) {
|
|
197
221
|
console.log("\x1b[33mNo tests were run (0 scenarios). Skipping report generation and upload.\x1b[0m");
|
|
198
222
|
cleanUp();
|
|
199
223
|
process.exit(process.env.EXIT_CODE);
|
package/package.json
CHANGED
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
require("allure-cucumberjs");
|
|
4
|
+
const allure = require("allure-js-commons");
|
|
5
|
+
|
|
6
|
+
const { PROVIDERS } = require("./aiProvider");
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
function resolveProvider(aiFlag = "gemini 2.5 flash") {
|
|
10
|
+
const flag = aiFlag.trim().toLowerCase();
|
|
11
|
+
|
|
12
|
+
const provider = PROVIDERS.find((p) =>
|
|
13
|
+
p.keywords.some((kw) => flag.includes(kw))
|
|
14
|
+
);
|
|
15
|
+
|
|
16
|
+
if (!provider) {
|
|
17
|
+
console.warn(` Unknown AI provider "${aiFlag}" โ falling back to Gemini.`);
|
|
18
|
+
return { provider: PROVIDERS[0], modelId: PROVIDERS[0].models.default };
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
const modelKey = Object.keys(provider.models)
|
|
23
|
+
.filter((k) => k !== "default")
|
|
24
|
+
.sort((a, b) => b.length - a.length)
|
|
25
|
+
.find((k) => flag.includes(k));
|
|
26
|
+
|
|
27
|
+
const modelId = modelKey ? provider.models[modelKey] : provider.models.default;
|
|
28
|
+
|
|
29
|
+
return { provider, modelId };
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async function callAI({ prompt, aiFlag, apiKey, maxTokens }) {
|
|
34
|
+
const { provider, modelId } = resolveProvider(aiFlag);
|
|
35
|
+
|
|
36
|
+
const url = provider.buildUrl(modelId, apiKey);
|
|
37
|
+
const body = provider.buildBody(prompt, modelId, maxTokens);
|
|
38
|
+
|
|
39
|
+
const headers = { "Content-Type": "application/json" };
|
|
40
|
+
|
|
41
|
+
if (provider.authStyle === "header") {
|
|
42
|
+
headers[provider.authKey] = provider.authValue(apiKey);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
if (provider.buildExtraHeaders) {
|
|
46
|
+
Object.assign(headers, provider.buildExtraHeaders());
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
const res = await fetch(url, {
|
|
50
|
+
method : "POST",
|
|
51
|
+
headers,
|
|
52
|
+
body : JSON.stringify(body),
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
if (!res.ok) {
|
|
56
|
+
const err = await res.text();
|
|
57
|
+
throw new Error(`${provider.name} API error ${res.status}: ${err}`);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const data = await res.json();
|
|
61
|
+
const text = provider.parseResp(data);
|
|
62
|
+
|
|
63
|
+
if (!text) throw new Error(`${provider.name} returned an empty response`);
|
|
64
|
+
|
|
65
|
+
return text.trim();
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
function stripAnsi(str = "") {
|
|
70
|
+
|
|
71
|
+
return str.replace(/\x1B\[[0-9;]*m/g, "");
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
const DUMMY_NAMES = new Set([
|
|
75
|
+
"alma", "test", "scenario", "example", "untitled",
|
|
76
|
+
"sample", "demo", "placeholder", "temp", "tmp",
|
|
77
|
+
]);
|
|
78
|
+
|
|
79
|
+
function isMeaningfulName(name = "") {
|
|
80
|
+
return !DUMMY_NAMES.has(name.trim().toLowerCase());
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
function buildResultContext(result = {}) {
|
|
85
|
+
const raw = stripAnsi(result.message ?? "");
|
|
86
|
+
const exType = result.exception?.type ?? "Error";
|
|
87
|
+
const exMsg = stripAnsi(result.exception?.message ?? raw);
|
|
88
|
+
const stack = stripAnsi(result.exception?.stackTrace ?? "");
|
|
89
|
+
|
|
90
|
+
const expectedMatch = raw.match(/Expected[:\s]+(.+)/);
|
|
91
|
+
const receivedMatch = raw.match(/Received[:\s]+(.+)/);
|
|
92
|
+
|
|
93
|
+
return {
|
|
94
|
+
errorType : exType,
|
|
95
|
+
errorMessage: exMsg,
|
|
96
|
+
expected : expectedMatch ? expectedMatch[1].trim() : null,
|
|
97
|
+
received : receivedMatch ? receivedMatch[1].trim() : null,
|
|
98
|
+
stackTrace : stack,
|
|
99
|
+
durationMs : result.duration
|
|
100
|
+
? (result.duration.seconds * 1000 + result.duration.nanos / 1e6).toFixed(1)
|
|
101
|
+
: null,
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
function buildPickleContext(pickle = {}) {
|
|
106
|
+
const steps = (pickle.steps ?? []).map((s) => s.text ?? "");
|
|
107
|
+
const failedStep = steps.at(-1) ?? null;
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
scenarioName : pickle.name ?? "Unnamed scenario",
|
|
111
|
+
featureFile : pickle.uri ?? null,
|
|
112
|
+
steps,
|
|
113
|
+
failedStep,
|
|
114
|
+
useMeaningfulName: isMeaningfulName(pickle.name),
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
async function callLocalAI({ prompt, url, apiKey }) {
|
|
119
|
+
|
|
120
|
+
const headers = { "Content-Type": "application/json" };
|
|
121
|
+
if (apiKey) headers["Authorization"] = `Bearer ${apiKey}`;
|
|
122
|
+
|
|
123
|
+
const res = await fetch(url, {
|
|
124
|
+
method: "POST",
|
|
125
|
+
headers,
|
|
126
|
+
body: JSON.stringify({
|
|
127
|
+
messages: [{ role: "user", content: prompt }],
|
|
128
|
+
}),
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
if (!res.ok) {
|
|
132
|
+
const err = await res.text();
|
|
133
|
+
throw new Error(`Local AI error ${res.status}: ${err}`);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
const data = await res.json();
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
const text =
|
|
140
|
+
data?.choices?.[0]?.message?.content ||
|
|
141
|
+
data?.response ||
|
|
142
|
+
data?.content ||
|
|
143
|
+
data?.text;
|
|
144
|
+
|
|
145
|
+
if (!text) throw new Error("Local AI returned an empty response");
|
|
146
|
+
|
|
147
|
+
return text.trim();
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
async function generateFailedBugReport({ resultCtx, pickleCtx, response, language, aiFlag, apiKey, url }) {
|
|
152
|
+
|
|
153
|
+
const nameInstruction = pickleCtx.useMeaningfulName
|
|
154
|
+
? `The test case is named "${pickleCtx.scenarioName}" โ use this as context for the bug report title.`
|
|
155
|
+
: `The test case name "${pickleCtx.scenarioName}" is a placeholder โ do NOT use it; derive the title from the actual failure.`;
|
|
156
|
+
|
|
157
|
+
const prompt = `
|
|
158
|
+
You are a senior QA engineer writing a professional bug report with formal language and like human written.
|
|
159
|
+
Write the entire report in ${language ?? "English"}.
|
|
160
|
+
Translate ALL section headings into ${language ?? "English"} โ do not leave any heading in English.
|
|
161
|
+
|
|
162
|
+
${nameInstruction}
|
|
163
|
+
|
|
164
|
+
โโโ TEST SCENARIO โโโ
|
|
165
|
+
Feature file : ${pickleCtx.featureFile ?? "unknown"}
|
|
166
|
+
Scenario : ${pickleCtx.scenarioName}
|
|
167
|
+
|
|
168
|
+
Steps in this scenario:
|
|
169
|
+
${pickleCtx.steps.map((s, i) => ` ${i + 1}. ${s}`).join("\n")}
|
|
170
|
+
|
|
171
|
+
Failed step : ${pickleCtx.failedStep ?? "unknown"}
|
|
172
|
+
|
|
173
|
+
โโโ ASSERTION FAILURE โโโ
|
|
174
|
+
Error type : ${resultCtx.errorType}
|
|
175
|
+
Error message: ${resultCtx.errorMessage}
|
|
176
|
+
Expected : ${resultCtx.expected ?? "see error message"}
|
|
177
|
+
Received : ${resultCtx.received ?? "see error message"}
|
|
178
|
+
Duration : ${resultCtx.durationMs ? resultCtx.durationMs + " ms" : "unknown"}
|
|
179
|
+
|
|
180
|
+
โโโ API RESPONSE โโโ
|
|
181
|
+
${JSON.stringify(response ?? {}, null, 2)}
|
|
182
|
+
|
|
183
|
+
โโโ FAILED REQUEST cURL โโโ
|
|
184
|
+
${response["cURL Command"]}
|
|
185
|
+
|
|
186
|
+
โโโ INSTRUCTIONS โโโ
|
|
187
|
+
Write a bug report with these exact sections in this order.
|
|
188
|
+
Translate ALL section headings below into ${language ?? "English"} โ do not leave any heading in English.
|
|
189
|
+
Do NOT add extra sections. Do NOT use markdown โ plain text only.
|
|
190
|
+
|
|
191
|
+
BUG REPORT NAME
|
|
192
|
+
(One concise sentence describing the defect โ derive from the failure, not the test name if it is a placeholder)
|
|
193
|
+
|
|
194
|
+
SUMMARY
|
|
195
|
+
(2โ4 sentences: what the test was doing, what assertion failed, and why this is a problem)
|
|
196
|
+
|
|
197
|
+
ACTUAL RESULT
|
|
198
|
+
(What the API/system actually did โ status code, response body key points)
|
|
199
|
+
|
|
200
|
+
EXPECTED RESULT
|
|
201
|
+
(What it should have done according to the test assertion)
|
|
202
|
+
|
|
203
|
+
STEPS TO REPRODUCE
|
|
204
|
+
(Numbered list โ use the scenario steps above; make each step actionable and clear)
|
|
205
|
+
|
|
206
|
+
FAILED REQUEST
|
|
207
|
+
(Paste the cURL command exactly as provided above)
|
|
208
|
+
`.trim();
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
if (url) {
|
|
212
|
+
return callLocalAI({ prompt, url, apiKey });
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
return callAI({ prompt, aiFlag, apiKey, maxTokens });
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
async function generatePassedSummary({ pickleCtx, response, language, aiFlag, apiKey, url }) {
|
|
219
|
+
const lang = language ?? "English";
|
|
220
|
+
|
|
221
|
+
const prompt = [
|
|
222
|
+
`You are a senior QA engineer writing a short test execution summary for your team.`,
|
|
223
|
+
`Write the entire summary in ${lang}.`,
|
|
224
|
+
`Keep it concise. No bullet soup, no corporate filler.`,
|
|
225
|
+
``,
|
|
226
|
+
`TEST SCENARIO`,
|
|
227
|
+
`Feature file : ${pickleCtx.featureFile ?? "unknown"}`,
|
|
228
|
+
`Scenario : ${pickleCtx.scenarioName}`,
|
|
229
|
+
``,
|
|
230
|
+
`Steps executed:`,
|
|
231
|
+
...pickleCtx.steps.map((s, i) => ` ${i + 1}. ${s}`),
|
|
232
|
+
``,
|
|
233
|
+
`API RESPONSE`,
|
|
234
|
+
JSON.stringify(response ?? {}, null, 2),
|
|
235
|
+
``,
|
|
236
|
+
`INSTRUCTIONS`,
|
|
237
|
+
`Write a short passed test summary with these exact sections in this order.`,
|
|
238
|
+
`Do NOT add extra sections. Do NOT use markdown โ plain text only.`,
|
|
239
|
+
`Use these exact translated headings โ do not change them, do not revert to English.`,
|
|
240
|
+
``,
|
|
241
|
+
`Test Name`,
|
|
242
|
+
`One sentence: the name or purpose of this test case. Derive it from the steps if the scenario name is generic.`,
|
|
243
|
+
``,
|
|
244
|
+
`Test Purpose`,
|
|
245
|
+
`1-2 sentences: what behaviour this test case is verifying and why it matters.`,
|
|
246
|
+
``,
|
|
247
|
+
`Summary`,
|
|
248
|
+
`2-3 sentences: what happened during execution โ which endpoint was called, what data was used,`,
|
|
249
|
+
`what the API returned, and that all assertions passed. Be specific about status codes and key response fields.`,
|
|
250
|
+
].join("\n");
|
|
251
|
+
|
|
252
|
+
if (url) {
|
|
253
|
+
return callLocalAI({ prompt, url, apiKey });
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
return callAI({ prompt, aiFlag, apiKey, maxTokens });
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
let _reportCount = 0;
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
const DEFAULT_DELAY_MS = 3000;
|
|
267
|
+
|
|
268
|
+
async function attachAiBugReport({
|
|
269
|
+
result,
|
|
270
|
+
pickle,
|
|
271
|
+
response,
|
|
272
|
+
language = "English",
|
|
273
|
+
aiModel,
|
|
274
|
+
aiKey,
|
|
275
|
+
url,
|
|
276
|
+
maxReports = 10,
|
|
277
|
+
delayMs = DEFAULT_DELAY_MS,
|
|
278
|
+
maxTokens
|
|
279
|
+
}) {
|
|
280
|
+
try {
|
|
281
|
+
if (!aiKey && !url) {
|
|
282
|
+
console.warn("โ ๏ธ No AI key or local URL provided. Skipping bug report.");
|
|
283
|
+
return;
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
if (_reportCount >= maxReports) {
|
|
287
|
+
console.warn(`โ ๏ธ AI bug report cap reached (${maxReports}). Skipping "${pickle.name}".`);
|
|
288
|
+
return;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
if (_reportCount > 0 && delayMs > 0) {
|
|
292
|
+
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
const pickleCtx = buildPickleContext(pickle);
|
|
296
|
+
|
|
297
|
+
let reportText;
|
|
298
|
+
let attachmentLabel;
|
|
299
|
+
|
|
300
|
+
if (result?.status === "PASSED") {
|
|
301
|
+
reportText = await generatePassedSummary({
|
|
302
|
+
pickleCtx,
|
|
303
|
+
response,
|
|
304
|
+
language,
|
|
305
|
+
aiFlag: aiModel,
|
|
306
|
+
apiKey: aiKey,
|
|
307
|
+
url,
|
|
308
|
+
maxTokens
|
|
309
|
+
});
|
|
310
|
+
attachmentLabel = "Test Summary";
|
|
311
|
+
} else {
|
|
312
|
+
const resultCtx = buildResultContext(result);
|
|
313
|
+
reportText = await generateFailedBugReport({
|
|
314
|
+
resultCtx,
|
|
315
|
+
pickleCtx,
|
|
316
|
+
response,
|
|
317
|
+
language,
|
|
318
|
+
aiFlag: aiModel,
|
|
319
|
+
apiKey: aiKey,
|
|
320
|
+
url,
|
|
321
|
+
maxTokens
|
|
322
|
+
});
|
|
323
|
+
attachmentLabel = "Bug Report";
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
if (!reportText) return;
|
|
327
|
+
|
|
328
|
+
_reportCount++;
|
|
329
|
+
|
|
330
|
+
await allure.attachment(attachmentLabel, reportText, "text/plain");
|
|
331
|
+
|
|
332
|
+
} catch (err) {
|
|
333
|
+
console.warn("โ ๏ธ Bug report generation failed:", err.message);
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
function resetReportCount() {
|
|
339
|
+
_reportCount = 0;
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
module.exports = {
|
|
344
|
+
attachAiBugReport,
|
|
345
|
+
buildResultContext,
|
|
346
|
+
buildPickleContext,
|
|
347
|
+
resolveProvider,
|
|
348
|
+
resetReportCount,
|
|
349
|
+
};
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
const PROVIDERS = [
|
|
2
|
+
{
|
|
3
|
+
name : "Gemini",
|
|
4
|
+
keywords: ["gemini"],
|
|
5
|
+
models : {
|
|
6
|
+
"2.5 flash lite" : "gemini-2.5-flash-lite",
|
|
7
|
+
"2.5 flash" : "gemini-2.5-flash",
|
|
8
|
+
"2.5 pro" : "gemini-2.5-pro",
|
|
9
|
+
"2.0 flash" : "gemini-2.0-flash",
|
|
10
|
+
default : "gemini-2.5-flash",
|
|
11
|
+
},
|
|
12
|
+
authStyle : "queryparam",
|
|
13
|
+
buildUrl : (modelId, apiKey) =>
|
|
14
|
+
`https://generativelanguage.googleapis.com/v1beta/models/${modelId}:generateContent?key=${apiKey}`,
|
|
15
|
+
buildBody : (prompt, modelId, maxTokens = 4000) => ({
|
|
16
|
+
contents : [{ parts: [{ text: prompt }] }],
|
|
17
|
+
generationConfig: { temperature: 0.6, maxOutputTokens: maxTokens },
|
|
18
|
+
}),
|
|
19
|
+
parseResp : (data) => data?.candidates?.[0]?.content?.parts?.[0]?.text ?? "",
|
|
20
|
+
},
|
|
21
|
+
|
|
22
|
+
{
|
|
23
|
+
name : "OpenAI",
|
|
24
|
+
keywords: ["openai", "chatgpt", "gpt"],
|
|
25
|
+
models : {
|
|
26
|
+
"4o mini" : "gpt-4o-mini",
|
|
27
|
+
"4o" : "gpt-4o",
|
|
28
|
+
"4 turbo" : "gpt-4-turbo",
|
|
29
|
+
"4" : "gpt-4",
|
|
30
|
+
"3.5 turbo" : "gpt-3.5-turbo",
|
|
31
|
+
"o1 mini" : "o1-mini",
|
|
32
|
+
"o1" : "o1-preview",
|
|
33
|
+
default : "gpt-4o",
|
|
34
|
+
},
|
|
35
|
+
authStyle : "header",
|
|
36
|
+
authKey : "Authorization",
|
|
37
|
+
authValue : (apiKey) => `Bearer ${apiKey}`,
|
|
38
|
+
buildUrl : () => "https://api.openai.com/v1/chat/completions",
|
|
39
|
+
buildBody : (prompt, modelId, maxTokens = 4000) => ({
|
|
40
|
+
model : modelId,
|
|
41
|
+
max_tokens : maxTokens,
|
|
42
|
+
temperature: 0.6,
|
|
43
|
+
messages : [{ role: "user", content: prompt }],
|
|
44
|
+
}),
|
|
45
|
+
parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
|
|
46
|
+
},
|
|
47
|
+
|
|
48
|
+
{
|
|
49
|
+
name : "Claude",
|
|
50
|
+
keywords: ["claude", "anthropic"],
|
|
51
|
+
models : {
|
|
52
|
+
"opus 4" : "claude-opus-4-5",
|
|
53
|
+
"sonnet 4" : "claude-sonnet-4-5",
|
|
54
|
+
"haiku" : "claude-haiku-4-5-20251001",
|
|
55
|
+
"opus" : "claude-opus-4-5",
|
|
56
|
+
"sonnet" : "claude-sonnet-4-5",
|
|
57
|
+
default : "claude-sonnet-4-5",
|
|
58
|
+
},
|
|
59
|
+
authStyle : "header",
|
|
60
|
+
authKey : "x-api-key",
|
|
61
|
+
authValue : (apiKey) => apiKey,
|
|
62
|
+
buildExtraHeaders: () => ({ "anthropic-version": "2023-06-01" }),
|
|
63
|
+
buildUrl : () => "https://api.anthropic.com/v1/messages",
|
|
64
|
+
buildBody : (prompt, modelId, maxTokens = 4000) => ({
|
|
65
|
+
model : modelId,
|
|
66
|
+
max_tokens: maxTokens,
|
|
67
|
+
messages : [{ role: "user", content: prompt }],
|
|
68
|
+
}),
|
|
69
|
+
parseResp : (data) =>
|
|
70
|
+
(data?.content ?? []).filter((b) => b.type === "text").map((b) => b.text).join(""),
|
|
71
|
+
},
|
|
72
|
+
|
|
73
|
+
{
|
|
74
|
+
name : "Mistral",
|
|
75
|
+
keywords: ["mistral"],
|
|
76
|
+
models : {
|
|
77
|
+
"large" : "mistral-large-latest",
|
|
78
|
+
"medium" : "mistral-medium-latest",
|
|
79
|
+
"small" : "mistral-small-latest",
|
|
80
|
+
"nemo" : "open-mistral-nemo",
|
|
81
|
+
"7b" : "open-mistral-7b",
|
|
82
|
+
default : "mistral-large-latest",
|
|
83
|
+
},
|
|
84
|
+
authStyle : "header",
|
|
85
|
+
authKey : "Authorization",
|
|
86
|
+
authValue : (apiKey) => `Bearer ${apiKey}`,
|
|
87
|
+
buildUrl : () => "https://api.mistral.ai/v1/chat/completions",
|
|
88
|
+
buildBody : (prompt, modelId, maxTokens = 4000) => ({
|
|
89
|
+
model : modelId,
|
|
90
|
+
max_tokens : maxTokens,
|
|
91
|
+
temperature: 0.6,
|
|
92
|
+
messages : [{ role: "user", content: prompt }],
|
|
93
|
+
}),
|
|
94
|
+
parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
|
|
95
|
+
},
|
|
96
|
+
|
|
97
|
+
{
|
|
98
|
+
name : "Groq",
|
|
99
|
+
keywords: ["groq"],
|
|
100
|
+
models : {
|
|
101
|
+
"llama 3 70b" : "llama3-70b-8192",
|
|
102
|
+
"llama 3 8b" : "llama3-8b-8192",
|
|
103
|
+
"mixtral" : "mixtral-8x7b-32768",
|
|
104
|
+
"gemma 7b" : "gemma-7b-it",
|
|
105
|
+
default : "llama3-70b-8192",
|
|
106
|
+
},
|
|
107
|
+
authStyle : "header",
|
|
108
|
+
authKey : "Authorization",
|
|
109
|
+
authValue : (apiKey) => `Bearer ${apiKey}`,
|
|
110
|
+
buildUrl : () => "https://api.groq.com/openai/v1/chat/completions",
|
|
111
|
+
buildBody : (prompt, modelId, maxTokens = 4000) => ({
|
|
112
|
+
model : modelId,
|
|
113
|
+
max_tokens : maxTokens,
|
|
114
|
+
temperature: 0.6,
|
|
115
|
+
messages : [{ role: "user", content: prompt }],
|
|
116
|
+
}),
|
|
117
|
+
parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
|
|
118
|
+
},
|
|
119
|
+
|
|
120
|
+
{
|
|
121
|
+
name : "Cohere",
|
|
122
|
+
keywords: ["cohere", "command"],
|
|
123
|
+
models : {
|
|
124
|
+
"r+" : "command-r-plus",
|
|
125
|
+
"r" : "command-r",
|
|
126
|
+
default: "command-r-plus",
|
|
127
|
+
},
|
|
128
|
+
authStyle : "header",
|
|
129
|
+
authKey : "Authorization",
|
|
130
|
+
authValue : (apiKey) => `Bearer ${apiKey}`,
|
|
131
|
+
buildUrl : () => "https://api.cohere.com/v2/chat",
|
|
132
|
+
buildBody : (prompt, modelId, maxTokens = 4000) => ({
|
|
133
|
+
model : modelId,
|
|
134
|
+
max_tokens : maxTokens,
|
|
135
|
+
messages : [{ role: "user", content: prompt }],
|
|
136
|
+
}),
|
|
137
|
+
parseResp : (data) => data?.message?.content?.[0]?.text ?? "",
|
|
138
|
+
},
|
|
139
|
+
|
|
140
|
+
{
|
|
141
|
+
name : "DeepSeek",
|
|
142
|
+
keywords: ["deepseek"],
|
|
143
|
+
models : {
|
|
144
|
+
"coder" : "deepseek-coder",
|
|
145
|
+
"chat" : "deepseek-chat",
|
|
146
|
+
default : "deepseek-chat",
|
|
147
|
+
},
|
|
148
|
+
authStyle : "header",
|
|
149
|
+
authKey : "Authorization",
|
|
150
|
+
authValue : (apiKey) => `Bearer ${apiKey}`,
|
|
151
|
+
buildUrl : () => "https://api.deepseek.com/chat/completions",
|
|
152
|
+
buildBody : (prompt, modelId, maxTokens = 4000) => ({
|
|
153
|
+
model : modelId,
|
|
154
|
+
max_tokens : maxTokens,
|
|
155
|
+
temperature: 0.6,
|
|
156
|
+
messages : [{ role: "user", content: prompt }],
|
|
157
|
+
}),
|
|
158
|
+
parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
|
|
159
|
+
},
|
|
160
|
+
];
|
|
161
|
+
|
|
162
|
+
module.exports = { PROVIDERS };
|
|
@@ -55,6 +55,14 @@ async function getEnvInfo() {
|
|
|
55
55
|
Parallel_Runner: cucumberConfig.default.parallel,
|
|
56
56
|
Timeout: cucumberConfig.default.timeout ?? "N/A",
|
|
57
57
|
|
|
58
|
+
// โโ AI Config โโโโโโโโโโโโโโโโโโโโโโโโโ
|
|
59
|
+
AI_Enabled: cucumberConfig.default.ai ?? false,
|
|
60
|
+
AI_URL: cucumberConfig.default.url ?? "N/A",
|
|
61
|
+
AI_Model: cucumberConfig.default.model ?? "gemini 2.5 flash",
|
|
62
|
+
AI_Language: cucumberConfig.default.language ?? "English",
|
|
63
|
+
AI_Max_Tokens: cucumberConfig.default.maxTokens ?? "4000",
|
|
64
|
+
AI_Max_Reports: cucumberConfig.default.maxReports ?? 10,
|
|
65
|
+
|
|
58
66
|
// โโ Git โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
|
59
67
|
Git_Branch: process.env.GIT_BRANCH ?? process.env.BRANCH_NAME ?? "N/A",
|
|
60
68
|
Git_Commit: process.env.GIT_COMMIT ?? process.env.GIT_SHA ?? "N/A",
|
|
@@ -68,7 +68,7 @@ function testCoverageCalculator({ silent = false } = {}) {
|
|
|
68
68
|
});
|
|
69
69
|
|
|
70
70
|
if (!silent && retriedTests.length > 0) {
|
|
71
|
-
console.warn(
|
|
71
|
+
console.warn("\n\x1b[33mRetried test cases:");
|
|
72
72
|
retriedTests.forEach((t) => {
|
|
73
73
|
console.warn(`- "${t.scenario}" ran ${t.count} times`);
|
|
74
74
|
});
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
const { showHelp } = require("./helper");
|
|
1
|
+
const { showHelp, showAIHelp } = require("./helper");
|
|
2
2
|
const { createProject } = require("./projectCreator");
|
|
3
3
|
const { generateReport } = require("./reportGenerator");
|
|
4
4
|
const { runTests } = require("./testRunner");
|
|
@@ -10,6 +10,7 @@ module.exports = {
|
|
|
10
10
|
generateReport,
|
|
11
11
|
runTests,
|
|
12
12
|
showHelp,
|
|
13
|
+
showAIHelp,
|
|
13
14
|
showVersion,
|
|
14
15
|
cleanUp,
|
|
15
16
|
};
|
|
@@ -129,6 +129,40 @@ function showHelp() {
|
|
|
129
129
|
`);
|
|
130
130
|
}
|
|
131
131
|
|
|
132
|
+
|
|
133
|
+
function showAIHelp() {
|
|
134
|
+
console.log(`
|
|
135
|
+
๐ Artes AI Bug reporter
|
|
136
|
+
|
|
137
|
+
Usage:
|
|
138
|
+
npx artes [options]
|
|
139
|
+
|
|
140
|
+
Options:
|
|
141
|
+
|
|
142
|
+
๐ค --ai Enable AI-generated bug reports and test summaries
|
|
143
|
+
Usage: artes --ai
|
|
144
|
+
|
|
145
|
+
๐ง --aiModel AI model to use for report generation
|
|
146
|
+
Supported: "gpt-4o", "gemini 2.5 flash", "claude sonnet", "mistral large"
|
|
147
|
+
Usage: artes --ai --aiModel "gemini 2.5 flash"
|
|
148
|
+
|
|
149
|
+
๐ --aiKey API key for the selected AI provider
|
|
150
|
+
Usage: artes --ai --aiKey "your-api-key"
|
|
151
|
+
|
|
152
|
+
๐ --aiURL Local AI endpoint URL (e.g., Ollama, LM Studio). Overrides --aiModel and --aiKey when set
|
|
153
|
+
Usage: artes --ai --aiURL "http://localhost:11434/api/chat"
|
|
154
|
+
|
|
155
|
+
๐ --aiLanguage Language for AI-generated reports (default: "English")
|
|
156
|
+
Usage: artes --ai --aiLanguage "Azerbaijani"
|
|
157
|
+
|
|
158
|
+
๐ข --maxTokens Maximum tokens for AI-generated reports (default: 4000)
|
|
159
|
+
Usage: artes --ai --maxTokens 8000
|
|
160
|
+
|
|
161
|
+
๐ --maxReports Maximum number of AI reports to generate per test run (default: 10)
|
|
162
|
+
Usage: artes --ai --maxReports 5
|
|
163
|
+
`);
|
|
164
|
+
}
|
|
165
|
+
|
|
132
166
|
module.exports = {
|
|
133
|
-
showHelp,
|
|
167
|
+
showHelp, showAIHelp
|
|
134
168
|
};
|
|
@@ -52,6 +52,15 @@ function createProject(createYes, noDeps) {
|
|
|
52
52
|
// paths: [], // string[] - Paths to feature files
|
|
53
53
|
// steps: "", // string - Step definitions files
|
|
54
54
|
// pomPath: "", // string - Path to POM files
|
|
55
|
+
// ai: {
|
|
56
|
+
// ai: false, // boolean - Enable AI-generated bug reports and test summaries
|
|
57
|
+
// url: "", // string - Local AI endpoint URL (e.g., Ollama, LM Studio). Overrides model/key when set
|
|
58
|
+
// model: "gemini 2.5 flash", // string - AI model to use (e.g., "gpt-4o", "gemini 2.5 flash", "claude sonnet", "mistral large")
|
|
59
|
+
// key: "", // string - API key for the selected AI provider
|
|
60
|
+
// language: "English", // string - Language for generated reports (e.g., "English", "Azerbaijani"),
|
|
61
|
+
// maxTokens: 4000, // number - Maximum tokens for AI-generated reports (default: 4000)
|
|
62
|
+
// maxReports: 10, // number - Maximum number of AI reports to generate per test run
|
|
63
|
+
// },
|
|
55
64
|
// timeout : 0, // number - Test timeout in seconds
|
|
56
65
|
// slowMo: 0, // number - Slow down test execution (Default: 0 seconds)
|
|
57
66
|
// parallel: 0, // number - Number of parallel workers
|
package/src/hooks/hooks.js
CHANGED
|
@@ -21,6 +21,8 @@ const allure = require("allure-js-commons");
|
|
|
21
21
|
const ffprobe = require("ffprobe-static");
|
|
22
22
|
const ffmpegPath = require("ffmpeg-static");
|
|
23
23
|
const { execSync } = require("child_process");
|
|
24
|
+
const { attachAiBugReport } = require("artes/src/helper/controller/aiBugReporter");
|
|
25
|
+
|
|
24
26
|
|
|
25
27
|
const HTTP_METHODS = ["GET", "HEAD", "POST", "PUT", "PATCH", "DELETE"];
|
|
26
28
|
|
|
@@ -157,11 +159,31 @@ AfterStep(async function ({ pickleStep }) {
|
|
|
157
159
|
});
|
|
158
160
|
|
|
159
161
|
After(async function ({ result, pickle }) {
|
|
162
|
+
const shouldReport =
|
|
163
|
+
cucumberConfig.default.successReport || result?.status !== Status.PASSED;
|
|
164
|
+
|
|
165
|
+
await attachResponse(allure.attachment);
|
|
166
|
+
|
|
167
|
+
if (shouldReport && cucumberConfig.ai.ai) {
|
|
168
|
+
await attachAiBugReport({
|
|
169
|
+
result,
|
|
170
|
+
pickle,
|
|
171
|
+
response: context.response,
|
|
172
|
+
language: cucumberConfig.ai.language,
|
|
173
|
+
url: cucumberConfig.ai.url,
|
|
174
|
+
aiModel: cucumberConfig.ai.model,
|
|
175
|
+
aiKey: cucumberConfig.ai.key,
|
|
176
|
+
maxReports: cucumberConfig.ai.maxReports,
|
|
177
|
+
maxTokens: cucumberConfig.ai.maxTokens
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
|
|
160
182
|
if (typeof projectHooks.After === "function") {
|
|
161
183
|
await projectHooks.After();
|
|
162
184
|
}
|
|
163
185
|
|
|
164
|
-
|
|
186
|
+
|
|
165
187
|
context.response = await {};
|
|
166
188
|
|
|
167
189
|
Object.keys(context.vars).length > 0 &&
|
|
@@ -171,8 +193,6 @@ After(async function ({ result, pickle }) {
|
|
|
171
193
|
"application/json",
|
|
172
194
|
);
|
|
173
195
|
|
|
174
|
-
const shouldReport =
|
|
175
|
-
cucumberConfig.default.successReport || result?.status !== Status.PASSED;
|
|
176
196
|
|
|
177
197
|
if (shouldReport & (context.page.url() !== "about:blank")) {
|
|
178
198
|
const screenshotBuffer = await context.page.screenshot({ type: "png" });
|