artes 1.6.3 โ†’ 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,7 +4,36 @@
4
4
 
5
5
  <h1 align="center">Artes</h1>
6
6
 
7
- ## ๐Ÿš€ Summary
7
+ <p >
8
+ <a href="https://www.npmjs.com/package/artes">
9
+ <img src="https://img.shields.io/npm/dm/artes?label=npm&logo=npm&color=blue" alt="npm version">
10
+ </a>
11
+
12
+ <a href="https://github.com/4gayev1/Artes">
13
+ <img src="https://img.shields.io/badge/GitHub-Artes-181717?logo=github&logoColor=white" alt="GitHub Artes">
14
+ </a>
15
+
16
+ <a href="https://hub.docker.com/r/vahidaghayev/artes">
17
+ <img src="https://img.shields.io/badge/Docker-Artes-blue?logo=docker&logoColor=white" alt="Docker Artes">
18
+ </a>
19
+
20
+ <a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript">
21
+ <img src="https://img.shields.io/badge/JavaScript-F7DF1E?logo=javascript&logoColor=black" alt="JavaScript">
22
+ </a>
23
+
24
+ <a href="https://playwright.dev/">
25
+ <img src="https://img.shields.io/badge/Playwright-2EAD33?logo=playwright&logoColor=white" alt="Playwright">
26
+ </a>
27
+
28
+ <a href="https://cucumber.io/">
29
+ <img src="https://img.shields.io/badge/Cucumber-23D96C?logo=cucumber&logoColor=white" alt="Cucumber">
30
+ </a>
31
+
32
+ <a href="https://www.apache.org/licenses/LICENSE-2.0">
33
+ <img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="Apache 2.0 License">
34
+ </a>
35
+
36
+ </p>
8
37
 
9
38
  Artes is a test runner for Playwright that executes [predefined Cucumber tests](./docs/stepDefinitions.md) and can generate Allure reports for test results. It simplifies setting up Playwright with Cucumber in your automation workflow. With Artes, you can easily run tests without writing step definitions, generate reports, and customize your testing environment.
10
39
 
@@ -20,7 +49,7 @@ Artes is a test runner for Playwright that executes [predefined Cucumber tests](
20
49
  - Well-structured, easy-to-follow documentation for a smooth learning curve
21
50
  - Designed for long-term maintainability and scalability
22
51
 
23
- ### ๐Ÿงฉ Powerful & Developer-Friendly Architecture
52
+ ### ๐Ÿงฉ Powerful & QA-Friendly Architecture
24
53
 
25
54
  - Intuitive API for writing custom step definitions
26
55
  - Rich set of ready-to-use step definitions to speed up test creation
@@ -58,9 +87,19 @@ Artes is a test runner for Playwright that executes [predefined Cucumber tests](
58
87
  ### ๐Ÿงช CLI, CI/CD & Containerization
59
88
 
60
89
  - Powerful CLI for full control from the command line
90
+ - Quality gate support โ€” set a minimum success rate threshold to automatically fail the pipeline when test results drop below the required percentage
61
91
  - Official [Artes Docker image](https://hub.docker.com/r/vahidaghayev/artes) for seamless containerized execution
62
92
  - CI/CD-ready โ€” integrate easily with any pipeline
63
93
 
94
+ ### ๐Ÿค– AI-Powered Bug Reports & Test Summaries
95
+
96
+ - Automatically generates professional bug reports for failed test cases
97
+ - Generates concise test summaries for passed scenarios
98
+ - Supports multiple AI providers โ€” OpenAI, Google Gemini, Anthropic Claude, Mistral, Groq, Cohere, DeepSeek (see [Supported AI Providers](docs/aiProviders.md) for full model list and setup)
99
+ - Works with local AI models (Ollama, LM Studio) โ€” no API key required
100
+ - Multi-language report generation โ€” produce reports in any language
101
+ - Configurable report cap to control API usage and costs
102
+
64
103
  ### ๐Ÿ“Š Artes Reporting System
65
104
 
66
105
  - Easy installation with docker compose (For detailed info: [Artes Reporting System](https://github.com/4gayev1/artes-reporting-system))
@@ -115,12 +154,12 @@ npx artes [options]
115
154
  | `-rwt, --reportWithTrace` | Add trace to the report | `artes -rwt` or `artes --reportWithTrace` |
116
155
  | `--singleFileReport` | Generate single file allure report | `artes -r --singleFileReport` |
117
156
  | `--zip` | Zip the report folder after generation | `artes -r --zip` |
118
- | `--uploadReport` | Upload the generated report to Artes Reporting System | `artes --uploadReport --reporterURL "https://example.com"` |
119
- | `--reporterURL` | URL of the Artes Reporting System to upload the report | `artes --uploadReport --reporterURL "https://example.com"` |
120
- | `--projectName` | Name of the project in the Artes Reporting System (default: `"Artes Report"`) | `artes --uploadReport --reporterURL "https://example.com" --projectName "My Project"` |
121
- | `--projectType` | Type of the project for reporting purposes (default: `"Artes"`) | `artes --uploadReport --reporterURL "https://example.com" --projectType "API"` |
122
- | `--reportPath` | Path to the report zip file to upload (default: `./report.zip`) | `artes --uploadReport --reporterURL "https://example.com" --reportPath "./my_report.zip"` |
123
- | ๐Ÿ–ผ๏ธ `--logo` | Set a custom logo in the report sidebar. Accepts an absolute path, a relative path, or a direct image URL | `artes --logo /abs/path/logo.png`<br>`artes --logo logo.png`<br>`artes --logo 'https://example.com/logo.png'` |
157
+ | `--uploadReport` | Upload the generated report to Artes Reporting System | `artes -r --zip --uploadReport --reporterURL "https://example.com"` |
158
+ | `--reporterURL` | URL of the Artes Reporting System to upload the report | `artes -r --zip --uploadReport --reporterURL "https://example.com"` |
159
+ | `--projectName` | Name of the project in the Artes Reporting System (default: `"Artes Report"`) | `artes -r --zip --uploadReport --reporterURL "https://example.com" --projectName "My Project"` |
160
+ | `--projectType` | Type of the project for reporting purposes (default: `"Artes"`) | `artes -r --zip --uploadReport --reporterURL "https://example.com" --projectType "API"` |
161
+ | `--reportPath` | Path to the report zip file to upload (default: `./report.zip`) | `artes -r --zip --uploadReport --reporterURL "https://example.com" --reportPath "./my_report.zip"` |
162
+ | ๐Ÿ–ผ๏ธ `--logo` | Set a custom logo in the report sidebar. Accepts an absolute path, a relative path, or a direct image URL | `artes -r --logo /abs/path/logo.png`<br>`artes --logo logo.png`<br>`artes --logo 'https://example.com/logo.png'` |
124
163
  | ๐Ÿข `--brandName` | Set the brand name displayed next to the logo in the report sidebar | `artes --brandName 'My Company'` |
125
164
  | ๐Ÿ“„ `--reportName` | Report name displayed on the summary widget and in the Artes Reporting System | `artes --reportName 'Alma UI'`
126
165
  | ๐Ÿ“ `--features` | Specify one or more feature files' relative paths to run (comma-separated) | `artes --features "tests/features/Alma,tests/features/Banan.feature"` |
@@ -144,6 +183,18 @@ npx artes [options]
144
183
  | โฑ๏ธ `--timeout` | Set timeout for each test step in seconds (default is 30 seconds) | `artes --timeout 10` |
145
184
  | ๐Ÿข `--slowMo` | Slow down text execution for clear view (default: 0 seconds) | `artes --slowMo 1` |
146
185
 
186
+ ### AI Options
187
+
188
+ | Option | Description | Usage Example |
189
+ | --- | --- | --- |
190
+ | ๐Ÿค– `--ai` | Enable AI-generated bug reports and test summaries | `artes --ai` |
191
+ | ๐Ÿง  `--aiModel` | AI model to use for report generation | `artes --ai --aiModel "gemini 2.5 flash"` |
192
+ | ๐Ÿ”‘ `--aiKey` | API key for the selected AI provider | `artes --ai --aiKey "your-api-key"` |
193
+ | ๐Ÿ”— `--aiURL` | Local AI endpoint URL (e.g. Ollama, LM Studio). Overrides `--aiModel` and `--aiKey` when set | `artes --ai --aiURL "http://localhost:11434/api/chat"` |
194
+ | ๐ŸŒ `--aiLanguage` | Language for AI-generated reports (default: `"English"`) | `artes --ai --aiLanguage "Azerbaijani"` |
195
+ | ๐Ÿ“‹ `--maxReports` | Maximum number of AI reports to generate per test run (default: `10`) | `artes --ai --maxReports 5` |
196
+
197
+
147
198
  \*\* To just run the tests: <br>
148
199
  Globally: artes <br>
149
200
  Locally: npx artes
@@ -176,8 +227,8 @@ After running the `-c` flag to create a new project, the structure will look lik
176
227
  (Your feature files here)
177
228
  /POMs // Optional
178
229
  (POM JSON file here)
179
- /steps // For custom steps
180
- (Your step definition JS files here)
230
+ /steps // For custom steps and hooks
231
+ (Your step definition and hooks JS files here)
181
232
  artes.config.js
182
233
  /report
183
234
  (Generated Allure report HTML here)
@@ -537,6 +588,19 @@ You can configure Artes by editing the `artes.config.js` file. Below are the def
537
588
 
538
589
  ---
539
590
 
591
+ ## ๐Ÿค– AI Configuration
592
+
593
+ | **Option** | **Default Value** | **Description** |
594
+ | -------------------- | ----------------- | ---------------------------------------------------------------------------------------------------- |
595
+ | `ai.ai` | `false` | Enable AI-generated bug reports and test summaries. |
596
+ | `ai.model` | `"gpt-4o"` | AI model to use for report generation (e.g. `"gemini 2.5 flash"`, `"claude sonnet"`) |
597
+ | `ai.key` | `""` | API key for the selected AI provider. |
598
+ | `ai.url` | `""` | Local AI endpoint URL (e.g. Ollama, LM Studio). Overrides `model` and `key` when set. |
599
+ | `ai.language` | `"English"` | Language for AI-generated reports (e.g. `"Azerbaijani"`, `"German"`). |
600
+ | `ai.maxReports` | `10` | Maximum number of AI reports to generate per test run. |
601
+
602
+ ---
603
+
540
604
  ## ๐ŸŒ Environment Configuration
541
605
 
542
606
  | **Option** | **Default Value** | **Description** |
@@ -191,6 +191,22 @@ module.exports = {
191
191
  : false,
192
192
  zip: process.env.ZIP == "true" ? true : artesConfig.zip ? true : false,
193
193
  },
194
+ ai:{
195
+ ai: process.env.AI ? process.env.AI : artesConfig?.ai?.ai || false,
196
+ url: process.env.AI_URL ? process.env.AI_URL : artesConfig?.ai?.url || "",
197
+ model: process.env.AI_MODEL
198
+ ? process.env.AI_MODEL
199
+ : artesConfig?.ai?.model || "gpt-4o",
200
+ key: process.env.AI_KEY
201
+ ? process.env.AI_KEY
202
+ : artesConfig?.ai?.key || "",
203
+ language: process.env.AI_LANGUAGE
204
+ ? process.env.AI_LANGUAGE
205
+ : artesConfig?.ai?.language || "English",
206
+ maxReports: process.env.MAX_REPORTS
207
+ ? parseInt(process.env.MAX_REPORTS)
208
+ : artesConfig?.ai?.maxReports || 10,
209
+ },
194
210
  env: env,
195
211
  variables: loadVariables(process.env.VARS, artesConfig.variables),
196
212
  baseURL: process.env.BASE_URL
@@ -0,0 +1,193 @@
1
+ # ๐Ÿค– AI Providers
2
+
3
+ Artes supports multiple AI providers for generating bug reports and test summaries.
4
+ Configure the provider via `--aiModel` flag or the `ai.model` field in artes config file.
5
+ Use the given exact model name in cli and config file(Just copy and paste the model name).
6
+
7
+ ---
8
+
9
+ ## Supported Providers
10
+
11
+ ### ๐Ÿ”ต Google Gemini
12
+
13
+ **Flag keyword:** `gemini`
14
+ **Get API key:** https://aistudio.google.com/app/apikey
15
+
16
+ | Model Name | `--aiModel` value |
17
+ |--------------------|-------------------------|
18
+ | Gemini 2.5 Flash โญ | `"gemini 2.5 flash"` |
19
+ | Gemini 2.5 Flash Lite | `"gemini 2.5 flash lite"` |
20
+ | Gemini 2.5 Pro | `"gemini 2.5 pro"` |
21
+ | Gemini 2.0 Flash | `"gemini 2.0 flash"` |
22
+
23
+ ```bash
24
+ artes --ai --aiModel "gemini 2.5 flash" --aiKey "your-key"
25
+ ```
26
+
27
+ ---
28
+
29
+ ### ๐ŸŸข OpenAI
30
+
31
+ **Flag keyword:** `openai`, `chatgpt`, `gpt`
32
+ **Get API key:** https://platform.openai.com/api-keys
33
+
34
+ | Model Name | `--aiModel` value |
35
+ |-----------------|---------------------|
36
+ | GPT-4o โญ | `"gpt-4o"` |
37
+ | GPT-4o Mini | `"gpt-4o mini"` |
38
+ | GPT-4 Turbo | `"gpt-4 turbo"` |
39
+ | GPT-4 | `"gpt-4"` |
40
+ | GPT-3.5 Turbo | `"gpt-3.5 turbo"` |
41
+ | o1 Mini | `"o1 mini"` |
42
+ | o1 | `"o1"` |
43
+
44
+ ```bash
45
+ artes --ai --aiModel "gpt-4o" --aiKey "your-key"
46
+ ```
47
+
48
+ ---
49
+
50
+ ### ๐ŸŸ  Anthropic Claude
51
+
52
+ **Flag keyword:** `claude`, `anthropic`
53
+ **Get API key:** https://console.anthropic.com/settings/keys
54
+
55
+ | Model Name | `--aiModel` value |
56
+ |------------------|----------------------|
57
+ | Claude Sonnet โญ | `"claude sonnet"` |
58
+ | Claude Sonnet 4 | `"claude sonnet 4"` |
59
+ | Claude Opus | `"claude opus"` |
60
+ | Claude Opus 4 | `"claude opus 4"` |
61
+ | Claude Haiku | `"claude haiku"` |
62
+
63
+ ```bash
64
+ artes --ai --aiModel "claude sonnet" --aiKey "your-key"
65
+ ```
66
+
67
+ ---
68
+
69
+ ### ๐Ÿ”ด Mistral
70
+
71
+ **Flag keyword:** `mistral`
72
+ **Get API key:** https://console.mistral.ai/api-keys
73
+
74
+ | Model Name | `--aiModel` value |
75
+ |-----------------|-----------------------|
76
+ | Mistral Large โญ | `"mistral large"` |
77
+ | Mistral Medium | `"mistral medium"` |
78
+ | Mistral Small | `"mistral small"` |
79
+ | Mistral Nemo | `"mistral nemo"` |
80
+ | Mistral 7B | `"mistral 7b"` |
81
+
82
+ ```bash
83
+ artes --ai --aiModel "mistral large" --aiKey "your-key"
84
+ ```
85
+
86
+ ---
87
+
88
+ ### โšก Groq
89
+
90
+ **Flag keyword:** `groq`
91
+ **Get API key:** https://console.groq.com/keys
92
+
93
+ | Model Name | `--aiModel` value |
94
+ |-------------------|-------------------------|
95
+ | LLaMA 3 70B โญ | `"groq llama 3 70b"` |
96
+ | LLaMA 3 8B | `"groq llama 3 8b"` |
97
+ | Mixtral 8x7B | `"groq mixtral"` |
98
+ | Gemma 7B | `"groq gemma 7b"` |
99
+
100
+ ```bash
101
+ artes --ai --aiModel "groq llama 3 70b" --aiKey "your-key"
102
+ ```
103
+
104
+ ---
105
+
106
+ ### ๐Ÿ”ท Cohere
107
+
108
+ **Flag keyword:** `cohere`, `command`
109
+ **Get API key:** https://dashboard.cohere.com/api-keys
110
+
111
+ | Model Name | `--aiModel` value |
112
+ |-----------------|----------------------|
113
+ | Command R+ โญ | `"cohere r+"` |
114
+ | Command R | `"cohere r"` |
115
+
116
+ ```bash
117
+ artes --ai --aiModel "cohere r+" --aiKey "your-key"
118
+ ```
119
+
120
+ ---
121
+
122
+ ### ๐Ÿ‹ DeepSeek
123
+
124
+ **Flag keyword:** `deepseek`
125
+ **Get API key:** https://platform.deepseek.com/api_keys
126
+
127
+ | Model Name | `--aiModel` value |
128
+ |------------------|-------------------------|
129
+ | DeepSeek Chat โญ | `"deepseek chat"` |
130
+ | DeepSeek Coder | `"deepseek coder"` |
131
+
132
+ ```bash
133
+ artes --ai --aiModel "deepseek chat" --aiKey "your-key"
134
+ ```
135
+
136
+ ---
137
+
138
+ ### ๐Ÿ  Local AI (Ollama, LM Studio, etc.)
139
+
140
+ Run any local model without an API key using `--aiURL` instead of `--aiModel` and `--aiKey`.
141
+ The endpoint must be OpenAI-compatible (accepts `messages` array in the request body).
142
+
143
+ ```bash
144
+ # Ollama
145
+ artes --ai --aiURL "http://localhost:11434/api/chat"
146
+
147
+ # LM Studio
148
+ artes --ai --aiURL "http://localhost:1234/v1/chat/completions"
149
+ ```
150
+
151
+ ---
152
+
153
+ ## Config File Usage
154
+
155
+ Instead of CLI flags, you can set AI options in your `artes.config.js`:
156
+
157
+ ```js
158
+ module.exports = {
159
+ ai: {
160
+ ai : true,
161
+ model : "gemini 2.5 flash",
162
+ key : "your-api-key",
163
+ language : "English",
164
+ maxReports: 10,
165
+
166
+ // For local AI โ€” overrides model and key when set:
167
+ // url: "http://localhost:11434/api/chat",
168
+ },
169
+ };
170
+ ```
171
+
172
+ ---
173
+
174
+ ## Environment Variables
175
+
176
+ All AI options can also be set via environment variables:
177
+
178
+ | Variable | Description |
179
+ |-----------------|------------------------------------------|
180
+ | `AI` | Enable AI reports (`true` / `false`) |
181
+ | `AI_MODEL` | Model string (e.g. `"gemini 2.5 flash"`) |
182
+ | `AI_KEY` | API key for the provider |
183
+ | `AI_URL` | Local AI endpoint URL |
184
+ | `AI_LANGUAGE` | Report language (e.g. `"Azerbaijani"`) |
185
+ | `MAX_REPORTS` | Max reports per run (e.g. `10`) |
186
+
187
+ ```bash
188
+ AI=true AI_MODEL="gpt-4o" AI_KEY="your-key" npx artes
189
+ ```
190
+
191
+ ---
192
+
193
+ > โญ = default model for that provider
package/executer.js CHANGED
@@ -1,6 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  const {
3
3
  showHelp,
4
+ showAIHelp,
4
5
  showVersion,
5
6
  createProject,
6
7
  runTests,
@@ -20,7 +21,6 @@ const {
20
21
  const { getEnvInfo } = require("artes/src/helper/controller/getEnvInfo");
21
22
  const { uploadReport } = require("./src/helper/controller/reportUploader");
22
23
 
23
-
24
24
  const artesConfigPath = path.resolve(process.cwd(), "artes.config.js");
25
25
 
26
26
  let artesConfig = {};
@@ -56,6 +56,13 @@ const flags = {
56
56
  projectName: args.includes("--projectName"),
57
57
  projectType: args.includes("--projectType"),
58
58
  reportPath: args.includes("--reportPath"),
59
+ aiHelp: args.includes("--aiHelp"),
60
+ ai: args.includes("--ai"),
61
+ aiURL: args.includes("--aiURL"),
62
+ aiModel: args.includes("--aiModel"),
63
+ aiKey: args.includes("--aiKey"),
64
+ aiLanguage: args.includes("--aiLanguage"),
65
+ maxReports: args.includes("--maxReports"),
59
66
  features: args.includes("--features"),
60
67
  stepDef: args.includes("--stepDef"),
61
68
  tags: args.includes("--tags"),
@@ -87,6 +94,12 @@ const reportPath = getArgValue("--reportPath");
87
94
  const logo = getArgValue("--logo");
88
95
  const brandName = getArgValue("--brandName");
89
96
  const reportName = getArgValue("--reportName");
97
+ const ai = args.includes("--ai");
98
+ const aiURL = getArgValue("--aiURL");
99
+ const aiModel = getArgValue("--aiModel");
100
+ const aiKey = getArgValue("--aiKey");
101
+ const aiLanguage = getArgValue("--aiLanguage");
102
+ const maxReports = getArgValue("--maxReports");
90
103
  const featureFiles = getArgValue("--features");
91
104
  const features = flags.features && featureFiles;
92
105
  const stepDef = getArgValue("--stepDef");
@@ -145,6 +158,13 @@ flags.singleFileReport
145
158
 
146
159
  flags.zip ? (process.env.ZIP = true) : (process.env.ZIP = false);
147
160
 
161
+ flags.ai ? (process.env.AI = ai) : (process.env.AI = false);
162
+ flags.aiURL ? (process.env.AI_URL = aiURL) : "";
163
+ flags.aiModel ? (process.env.AI_MODEL = aiModel) : "";
164
+ flags.aiKey ? (process.env.AI_KEY = aiKey) : "";
165
+ flags.aiLanguage ? (process.env.AI_LANGUAGE = aiLanguage) : "";
166
+ flags.maxReports ? (process.env.MAX_REPORTS = maxReports) : "";
167
+
148
168
  flags.headless &&
149
169
  console.log("Running mode:", flags.headless ? "headless" : "headed");
150
170
  flags.headless ? (process.env.MODE = JSON.stringify(true)) : false;
@@ -186,6 +206,7 @@ flags.slowMo ? (process.env.SLOWMO = slowMo) : "";
186
206
 
187
207
  async function main() {
188
208
  if (flags.help) return showHelp();
209
+ if (flags.aiHelp) return showAIHelp();
189
210
  if (flags.version) return showVersion();
190
211
  if (flags.create) return createProject(flags.createYes, flags.noDeps);
191
212
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "artes",
3
- "version": "1.6.3",
3
+ "version": "1.7.0",
4
4
  "description": "The simplest way to automate UI and API tests using Cucumber-style steps.",
5
5
  "main": "index.js",
6
6
  "scripts": {
@@ -0,0 +1,349 @@
1
+
2
+
3
+ require("allure-cucumberjs");
4
+ const allure = require("allure-js-commons");
5
+
6
+ const { PROVIDERS } = require("./aiProvider");
7
+
8
+
9
+ function resolveProvider(aiFlag = "gemini 2.5 flash") {
10
+ const flag = aiFlag.trim().toLowerCase();
11
+
12
+ const provider = PROVIDERS.find((p) =>
13
+ p.keywords.some((kw) => flag.includes(kw))
14
+ );
15
+
16
+ if (!provider) {
17
+ console.warn(` Unknown AI provider "${aiFlag}" โ€” falling back to Gemini.`);
18
+ return { provider: PROVIDERS[0], modelId: PROVIDERS[0].models.default };
19
+ }
20
+
21
+
22
+ const modelKey = Object.keys(provider.models)
23
+ .filter((k) => k !== "default")
24
+ .sort((a, b) => b.length - a.length)
25
+ .find((k) => flag.includes(k));
26
+
27
+ const modelId = modelKey ? provider.models[modelKey] : provider.models.default;
28
+
29
+ return { provider, modelId };
30
+ }
31
+
32
+
33
+ async function callAI({ prompt, aiFlag, apiKey }) {
34
+ const { provider, modelId } = resolveProvider(aiFlag);
35
+
36
+ console.log(`Using ${provider.name} โ€” model: ${modelId}`);
37
+
38
+ const url = provider.buildUrl(modelId, apiKey);
39
+ const body = provider.buildBody(prompt, modelId);
40
+
41
+ const headers = { "Content-Type": "application/json" };
42
+
43
+ if (provider.authStyle === "header") {
44
+ headers[provider.authKey] = provider.authValue(apiKey);
45
+ }
46
+
47
+ if (provider.buildExtraHeaders) {
48
+ Object.assign(headers, provider.buildExtraHeaders());
49
+ }
50
+
51
+ const res = await fetch(url, {
52
+ method : "POST",
53
+ headers,
54
+ body : JSON.stringify(body),
55
+ });
56
+
57
+ if (!res.ok) {
58
+ const err = await res.text();
59
+ throw new Error(`${provider.name} API error ${res.status}: ${err}`);
60
+ }
61
+
62
+ const data = await res.json();
63
+ const text = provider.parseResp(data);
64
+
65
+ if (!text) throw new Error(`${provider.name} returned an empty response`);
66
+
67
+ return text.trim();
68
+ }
69
+
70
+
71
+ function stripAnsi(str = "") {
72
+
73
+ return str.replace(/\x1B\[[0-9;]*m/g, "");
74
+ }
75
+
76
+ const DUMMY_NAMES = new Set([
77
+ "alma", "test", "scenario", "example", "untitled",
78
+ "sample", "demo", "placeholder", "temp", "tmp",
79
+ ]);
80
+
81
+ function isMeaningfulName(name = "") {
82
+ return !DUMMY_NAMES.has(name.trim().toLowerCase());
83
+ }
84
+
85
+
86
+ function buildResultContext(result = {}) {
87
+ const raw = stripAnsi(result.message ?? "");
88
+ const exType = result.exception?.type ?? "Error";
89
+ const exMsg = stripAnsi(result.exception?.message ?? raw);
90
+ const stack = stripAnsi(result.exception?.stackTrace ?? "");
91
+
92
+ const expectedMatch = raw.match(/Expected[:\s]+(.+)/);
93
+ const receivedMatch = raw.match(/Received[:\s]+(.+)/);
94
+
95
+ return {
96
+ errorType : exType,
97
+ errorMessage: exMsg,
98
+ expected : expectedMatch ? expectedMatch[1].trim() : null,
99
+ received : receivedMatch ? receivedMatch[1].trim() : null,
100
+ stackTrace : stack,
101
+ durationMs : result.duration
102
+ ? (result.duration.seconds * 1000 + result.duration.nanos / 1e6).toFixed(1)
103
+ : null,
104
+ };
105
+ }
106
+
107
+ function buildPickleContext(pickle = {}) {
108
+ const steps = (pickle.steps ?? []).map((s) => s.text ?? "");
109
+ const failedStep = steps.at(-1) ?? null;
110
+
111
+ return {
112
+ scenarioName : pickle.name ?? "Unnamed scenario",
113
+ featureFile : pickle.uri ?? null,
114
+ steps,
115
+ failedStep,
116
+ useMeaningfulName: isMeaningfulName(pickle.name),
117
+ };
118
+ }
119
+
120
+ async function callLocalAI({ prompt, url, apiKey }) {
121
+ console.log(`Using AI โ€” endpoint: ${url}`);
122
+
123
+ const headers = { "Content-Type": "application/json" };
124
+ if (apiKey) headers["Authorization"] = `Bearer ${apiKey}`;
125
+
126
+ const res = await fetch(url, {
127
+ method: "POST",
128
+ headers,
129
+ body: JSON.stringify({
130
+ messages: [{ role: "user", content: prompt }],
131
+ }),
132
+ });
133
+
134
+ if (!res.ok) {
135
+ const err = await res.text();
136
+ throw new Error(`Local AI error ${res.status}: ${err}`);
137
+ }
138
+
139
+ const data = await res.json();
140
+
141
+
142
+ const text =
143
+ data?.choices?.[0]?.message?.content ||
144
+ data?.response ||
145
+ data?.content ||
146
+ data?.text;
147
+
148
+ if (!text) throw new Error("Local AI returned an empty response");
149
+
150
+ return text.trim();
151
+ }
152
+
153
+
154
+ async function generateFailedBugReport({ resultCtx, pickleCtx, response, language, aiFlag, apiKey, url }) {
155
+
156
+ const nameInstruction = pickleCtx.useMeaningfulName
157
+ ? `The test case is named "${pickleCtx.scenarioName}" โ€” use this as context for the bug report title.`
158
+ : `The test case name "${pickleCtx.scenarioName}" is a placeholder โ€” do NOT use it; derive the title from the actual failure.`;
159
+
160
+ const prompt = `
161
+ You are a senior QA engineer writing a professional bug report with formal language and like human written.
162
+ Write the entire report in ${language ?? "English"}.
163
+ Translate ALL section headings into ${language ?? "English"} โ€” do not leave any heading in English.
164
+
165
+ ${nameInstruction}
166
+
167
+ โ”โ”โ” TEST SCENARIO โ”โ”โ”
168
+ Feature file : ${pickleCtx.featureFile ?? "unknown"}
169
+ Scenario : ${pickleCtx.scenarioName}
170
+
171
+ Steps in this scenario:
172
+ ${pickleCtx.steps.map((s, i) => ` ${i + 1}. ${s}`).join("\n")}
173
+
174
+ Failed step : ${pickleCtx.failedStep ?? "unknown"}
175
+
176
+ โ”โ”โ” ASSERTION FAILURE โ”โ”โ”
177
+ Error type : ${resultCtx.errorType}
178
+ Error message: ${resultCtx.errorMessage}
179
+ Expected : ${resultCtx.expected ?? "see error message"}
180
+ Received : ${resultCtx.received ?? "see error message"}
181
+ Duration : ${resultCtx.durationMs ? resultCtx.durationMs + " ms" : "unknown"}
182
+
183
+ โ”โ”โ” API RESPONSE โ”โ”โ”
184
+ ${JSON.stringify(response ?? {}, null, 2)}
185
+
186
+ โ”โ”โ” FAILED REQUEST cURL โ”โ”โ”
187
+ ${response["cURL Command"]}
188
+
189
+ โ”โ”โ” INSTRUCTIONS โ”โ”โ”
190
+ Write a bug report with these exact sections in this order.
191
+ Translate ALL section headings below into ${language ?? "English"} โ€” do not leave any heading in English.
192
+ Do NOT add extra sections. Do NOT use markdown โ€” plain text only.
193
+
194
+ BUG REPORT NAME
195
+ (One concise sentence describing the defect โ€” derive from the failure, not the test name if it is a placeholder)
196
+
197
+ SUMMARY
198
+ (2โ€“4 sentences: what the test was doing, what assertion failed, and why this is a problem)
199
+
200
+ ACTUAL RESULT
201
+ (What the API/system actually did โ€” status code, response body key points)
202
+
203
+ EXPECTED RESULT
204
+ (What it should have done according to the test assertion)
205
+
206
+ STEPS TO REPRODUCE
207
+ (Numbered list โ€” use the scenario steps above; make each step actionable and clear)
208
+
209
+ FAILED REQUEST
210
+ (Paste the cURL command exactly as provided above)
211
+ `.trim();
212
+
213
+
214
+ if (url) {
215
+ return callLocalAI({ prompt, url, apiKey });
216
+ }
217
+
218
+ return callAI({ prompt, aiFlag, apiKey });
219
+ }
220
+
221
+ async function generatePassedSummary({ pickleCtx, response, language, aiFlag, apiKey, url }) {
222
+ const lang = language ?? "English";
223
+
224
+ const prompt = [
225
+ `You are a senior QA engineer writing a short test execution summary for your team.`,
226
+ `Write the entire summary in ${lang}.`,
227
+ `Keep it concise. No bullet soup, no corporate filler.`,
228
+ ``,
229
+ `TEST SCENARIO`,
230
+ `Feature file : ${pickleCtx.featureFile ?? "unknown"}`,
231
+ `Scenario : ${pickleCtx.scenarioName}`,
232
+ ``,
233
+ `Steps executed:`,
234
+ ...pickleCtx.steps.map((s, i) => ` ${i + 1}. ${s}`),
235
+ ``,
236
+ `API RESPONSE`,
237
+ JSON.stringify(response ?? {}, null, 2),
238
+ ``,
239
+ `INSTRUCTIONS`,
240
+ `Write a short passed test summary with these exact sections in this order.`,
241
+ `Do NOT add extra sections. Do NOT use markdown โ€” plain text only.`,
242
+ `Use these exact translated headings โ€” do not change them, do not revert to English.`,
243
+ ``,
244
+ `Test Name`,
245
+ `One sentence: the name or purpose of this test case. Derive it from the steps if the scenario name is generic.`,
246
+ ``,
247
+ `Test Purpose`,
248
+ `1-2 sentences: what behaviour this test case is verifying and why it matters.`,
249
+ ``,
250
+ `Summary`,
251
+ `2-3 sentences: what happened during execution โ€” which endpoint was called, what data was used,`,
252
+ `what the API returned, and that all assertions passed. Be specific about status codes and key response fields.`,
253
+ ].join("\n");
254
+
255
+ if (url) {
256
+ return callLocalAI({ prompt, url, apiKey });
257
+ }
258
+
259
+ return callAI({ prompt, aiFlag, apiKey });
260
+ }
261
+
262
+
263
+
264
+
265
+ let _reportCount = 0;
266
+
267
+
268
+
269
+ const DEFAULT_DELAY_MS = 1000;
270
+
271
+ async function attachAiBugReport({
272
+ result,
273
+ pickle,
274
+ response,
275
+ language = "English",
276
+ aiModel,
277
+ aiKey,
278
+ url,
279
+ maxReports = 10,
280
+ delayMs = DEFAULT_DELAY_MS,
281
+ }) {
282
+ try {
283
+ if (!aiKey && !url) {
284
+ console.warn("โš ๏ธ No AI key or local URL provided. Skipping bug report.");
285
+ return;
286
+ }
287
+
288
+ if (_reportCount >= maxReports) {
289
+ console.warn(`โš ๏ธ AI bug report cap reached (${maxReports}). Skipping "${pickle.name}".`);
290
+ return;
291
+ }
292
+
293
+ if (_reportCount > 0 && delayMs > 0) {
294
+ await new Promise((resolve) => setTimeout(resolve, delayMs));
295
+ }
296
+
297
+ const pickleCtx = buildPickleContext(pickle);
298
+
299
+ let reportText;
300
+ let attachmentLabel;
301
+
302
+ if (result?.status === "PASSED") {
303
+ reportText = await generatePassedSummary({
304
+ pickleCtx,
305
+ response,
306
+ language,
307
+ aiFlag: aiModel,
308
+ apiKey: aiKey,
309
+ url,
310
+ });
311
+ attachmentLabel = "Test Summary";
312
+ } else {
313
+ const resultCtx = buildResultContext(result);
314
+ reportText = await generateFailedBugReport({
315
+ resultCtx,
316
+ pickleCtx,
317
+ response,
318
+ language,
319
+ aiFlag: aiModel,
320
+ apiKey: aiKey,
321
+ url,
322
+ });
323
+ attachmentLabel = "Bug Report";
324
+ }
325
+
326
+ if (!reportText) return;
327
+
328
+ _reportCount++;
329
+
330
+ await allure.attachment(attachmentLabel, reportText, "text/plain");
331
+
332
+ } catch (err) {
333
+ console.warn("โš ๏ธ Bug report generation failed:", err.message);
334
+ }
335
+ }
336
+
337
+
338
+ function resetReportCount() {
339
+ _reportCount = 0;
340
+ }
341
+
342
+
343
+ module.exports = {
344
+ attachAiBugReport,
345
+ buildResultContext,
346
+ buildPickleContext,
347
+ resolveProvider,
348
+ resetReportCount,
349
+ };
@@ -0,0 +1,169 @@
1
+
2
+ const PROVIDERS = [
3
+
4
+ {
5
+ name : "Gemini",
6
+ keywords: ["gemini"],
7
+ models : {
8
+ "2.5 flash lite" : "gemini-2.5-flash-lite",
9
+ "2.5 flash" : "gemini-2.5-flash",
10
+ "2.5 pro" : "gemini-2.5-pro",
11
+ "2.0 flash" : "gemini-2.0-flash",
12
+ default : "gemini-2.5-flash",
13
+ },
14
+ authStyle : "queryparam",
15
+ buildUrl : (modelId, apiKey) =>
16
+ `https://generativelanguage.googleapis.com/v1beta/models/${modelId}:generateContent?key=${apiKey}`,
17
+ buildBody : (prompt) => ({
18
+ contents : [{ parts: [{ text: prompt }] }],
19
+ generationConfig: { temperature: 0.6, maxOutputTokens: 4000 },
20
+ }),
21
+ parseResp : (data) => data?.candidates?.[0]?.content?.parts?.[0]?.text ?? "",
22
+ },
23
+
24
+
25
+ {
26
+ name : "OpenAI",
27
+ keywords: ["openai", "chatgpt", "gpt"],
28
+ models : {
29
+ "4o mini" : "gpt-4o-mini",
30
+ "4o" : "gpt-4o",
31
+ "4 turbo" : "gpt-4-turbo",
32
+ "4" : "gpt-4",
33
+ "3.5 turbo" : "gpt-3.5-turbo",
34
+ "o1 mini" : "o1-mini",
35
+ "o1" : "o1-preview",
36
+ default : "gpt-4o",
37
+ },
38
+ authStyle : "header",
39
+ authKey : "Authorization",
40
+ authValue : (apiKey) => `Bearer ${apiKey}`,
41
+ buildUrl : () => "https://api.openai.com/v1/chat/completions",
42
+ buildBody : (prompt, modelId) => ({
43
+ model : modelId,
44
+ max_tokens : 4000,
45
+ temperature: 0.6,
46
+ messages : [{ role: "user", content: prompt }],
47
+ }),
48
+ parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
49
+ },
50
+
51
+
52
+ {
53
+ name : "Claude",
54
+ keywords: ["claude", "anthropic"],
55
+ models : {
56
+ "opus 4" : "claude-opus-4-5",
57
+ "sonnet 4" : "claude-sonnet-4-5",
58
+ "haiku" : "claude-haiku-4-5-20251001",
59
+ "opus" : "claude-opus-4-5",
60
+ "sonnet" : "claude-sonnet-4-5",
61
+ default : "claude-sonnet-4-5",
62
+ },
63
+ authStyle : "header",
64
+ authKey : "x-api-key",
65
+ authValue : (apiKey) => apiKey,
66
+ buildExtraHeaders: () => ({ "anthropic-version": "2023-06-01" }),
67
+ buildUrl : () => "https://api.anthropic.com/v1/messages",
68
+ buildBody : (prompt, modelId) => ({
69
+ model : modelId,
70
+ max_tokens: 4000,
71
+ messages : [{ role: "user", content: prompt }],
72
+ }),
73
+ parseResp : (data) =>
74
+ (data?.content ?? []).filter((b) => b.type === "text").map((b) => b.text).join(""),
75
+ },
76
+
77
+
78
+ {
79
+ name : "Mistral",
80
+ keywords: ["mistral"],
81
+ models : {
82
+ "large" : "mistral-large-latest",
83
+ "medium" : "mistral-medium-latest",
84
+ "small" : "mistral-small-latest",
85
+ "nemo" : "open-mistral-nemo",
86
+ "7b" : "open-mistral-7b",
87
+ default : "mistral-large-latest",
88
+ },
89
+ authStyle : "header",
90
+ authKey : "Authorization",
91
+ authValue : (apiKey) => `Bearer ${apiKey}`,
92
+ buildUrl : () => "https://api.mistral.ai/v1/chat/completions",
93
+ buildBody : (prompt, modelId) => ({
94
+ model : modelId,
95
+ max_tokens : 4000,
96
+ temperature: 0.6,
97
+ messages : [{ role: "user", content: prompt }],
98
+ }),
99
+ parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
100
+ },
101
+
102
+
103
+ {
104
+ name : "Groq",
105
+ keywords: ["groq"],
106
+ models : {
107
+ "llama 3 70b" : "llama3-70b-8192",
108
+ "llama 3 8b" : "llama3-8b-8192",
109
+ "mixtral" : "mixtral-8x7b-32768",
110
+ "gemma 7b" : "gemma-7b-it",
111
+ default : "llama3-70b-8192",
112
+ },
113
+ authStyle : "header",
114
+ authKey : "Authorization",
115
+ authValue : (apiKey) => `Bearer ${apiKey}`,
116
+ buildUrl : () => "https://api.groq.com/openai/v1/chat/completions",
117
+ buildBody : (prompt, modelId) => ({
118
+ model : modelId,
119
+ max_tokens : 4000,
120
+ temperature: 0.6,
121
+ messages : [{ role: "user", content: prompt }],
122
+ }),
123
+ parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
124
+ },
125
+
126
+
127
+ {
128
+ name : "Cohere",
129
+ keywords: ["cohere", "command"],
130
+ models : {
131
+ "r+" : "command-r-plus",
132
+ "r" : "command-r",
133
+ default: "command-r-plus",
134
+ },
135
+ authStyle : "header",
136
+ authKey : "Authorization",
137
+ authValue : (apiKey) => `Bearer ${apiKey}`,
138
+ buildUrl : () => "https://api.cohere.com/v2/chat",
139
+ buildBody : (prompt, modelId) => ({
140
+ model : modelId,
141
+ messages: [{ role: "user", content: prompt }],
142
+ }),
143
+ parseResp : (data) => data?.message?.content?.[0]?.text ?? "",
144
+ },
145
+
146
+
147
+ {
148
+ name : "DeepSeek",
149
+ keywords: ["deepseek"],
150
+ models : {
151
+ "coder" : "deepseek-coder",
152
+ "chat" : "deepseek-chat",
153
+ default : "deepseek-chat",
154
+ },
155
+ authStyle : "header",
156
+ authKey : "Authorization",
157
+ authValue : (apiKey) => `Bearer ${apiKey}`,
158
+ buildUrl : () => "https://api.deepseek.com/chat/completions",
159
+ buildBody : (prompt, modelId) => ({
160
+ model : modelId,
161
+ max_tokens : 4000,
162
+ temperature: 0.6,
163
+ messages : [{ role: "user", content: prompt }],
164
+ }),
165
+ parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
166
+ },
167
+ ];
168
+
169
+ module.exports = { PROVIDERS };
@@ -60,6 +60,7 @@ function findDuplicateTestNames() {
60
60
  console.log("");
61
61
  });
62
62
  console.log("\x1b[0m");
63
+ console.log("");
63
64
  }
64
65
 
65
66
  return duplicates;
@@ -77,6 +77,7 @@ function logPomWarnings() {
77
77
  });
78
78
 
79
79
  console.log("\x1b[0m");
80
+ console.log("");
80
81
  }
81
82
 
82
83
  module.exports = { pomCollector, logPomWarnings };
@@ -73,6 +73,7 @@ function testCoverageCalculator({ silent = false } = {}) {
73
73
  console.warn(`- "${t.scenario}" ran ${t.count} times`);
74
74
  });
75
75
  console.log("\x1b[0m");
76
+ console.log("");
76
77
  }
77
78
 
78
79
  return {
@@ -1,4 +1,4 @@
1
- const { showHelp } = require("./helper");
1
+ const { showHelp, showAIHelp } = require("./helper");
2
2
  const { createProject } = require("./projectCreator");
3
3
  const { generateReport } = require("./reportGenerator");
4
4
  const { runTests } = require("./testRunner");
@@ -10,6 +10,7 @@ module.exports = {
10
10
  generateReport,
11
11
  runTests,
12
12
  showHelp,
13
+ showAIHelp,
13
14
  showVersion,
14
15
  cleanUp,
15
16
  };
@@ -129,6 +129,37 @@ function showHelp() {
129
129
  `);
130
130
  }
131
131
 
132
+
133
+ function showAIHelp() {
134
+ console.log(`
135
+ ๐Ÿš€ Artes AI Bug reporter
136
+
137
+ Usage:
138
+ npx artes [options]
139
+
140
+ Options:
141
+
142
+ ๐Ÿค– --ai Enable AI-generated bug reports and test summaries
143
+ Usage: artes --ai
144
+
145
+ ๐Ÿง  --aiModel AI model to use for report generation
146
+ Supported: "gpt-4o", "gemini 2.5 flash", "claude sonnet", "mistral large"
147
+ Usage: artes --ai --aiModel "gemini 2.5 flash"
148
+
149
+ ๐Ÿ”‘ --aiKey API key for the selected AI provider
150
+ Usage: artes --ai --aiKey "your-api-key"
151
+
152
+ ๐Ÿ”— --aiURL Local AI endpoint URL (e.g., Ollama, LM Studio). Overrides --aiModel and --aiKey when set
153
+ Usage: artes --ai --aiURL "http://localhost:11434/api/chat"
154
+
155
+ ๐ŸŒ --aiLanguage Language for AI-generated reports (default: "English")
156
+ Usage: artes --ai --aiLanguage "Azerbaijani"
157
+
158
+ ๐Ÿ“‹ --maxReports Maximum number of AI reports to generate per test run (default: 10)
159
+ Usage: artes --ai --maxReports 5
160
+ `);
161
+ }
162
+
132
163
  module.exports = {
133
- showHelp,
164
+ showHelp, showAIHelp
134
165
  };
@@ -52,6 +52,14 @@ function createProject(createYes, noDeps) {
52
52
  // paths: [], // string[] - Paths to feature files
53
53
  // steps: "", // string - Step definitions files
54
54
  // pomPath: "", // string - Path to POM files
55
+ // ai: {
56
+ // ai: false, // boolean - Enable AI-generated bug reports and test summaries
57
+ // url: "", // string - Local AI endpoint URL (e.g., Ollama, LM Studio). Overrides model/key when set
58
+ // model: "gpt-4o", // string - AI model to use (e.g., "gpt-4o", "gemini 2.5 flash", "claude sonnet", "mistral large")
59
+ // key: "", // string - API key for the selected AI provider
60
+ // language: "English", // string - Language for generated reports (e.g., "English", "Azerbaijani")
61
+ // maxReports: 10, // number - Maximum number of AI reports to generate per test run
62
+ // },
55
63
  // timeout : 0, // number - Test timeout in seconds
56
64
  // slowMo: 0, // number - Slow down test execution (Default: 0 seconds)
57
65
  // parallel: 0, // number - Number of parallel workers
@@ -21,6 +21,8 @@ const allure = require("allure-js-commons");
21
21
  const ffprobe = require("ffprobe-static");
22
22
  const ffmpegPath = require("ffmpeg-static");
23
23
  const { execSync } = require("child_process");
24
+ const { attachAiBugReport } = require("artes/src/helper/controller/aiBugReporter");
25
+
24
26
 
25
27
  const HTTP_METHODS = ["GET", "HEAD", "POST", "PUT", "PATCH", "DELETE"];
26
28
 
@@ -161,7 +163,26 @@ After(async function ({ result, pickle }) {
161
163
  await projectHooks.After();
162
164
  }
163
165
 
166
+
167
+ const shouldReport =
168
+ cucumberConfig.default.successReport || result?.status !== Status.PASSED;
169
+
164
170
  await attachResponse(allure.attachment);
171
+
172
+ if (shouldReport && cucumberConfig.ai.ai) {
173
+ await attachAiBugReport({
174
+ result,
175
+ pickle,
176
+ response: context.response,
177
+ language: cucumberConfig.ai.language,
178
+ url: cucumberConfig.ai.url,
179
+ aiModel: cucumberConfig.ai.model,
180
+ aiKey: cucumberConfig.ai.key,
181
+ maxReports: cucumberConfig.ai.maxReports
182
+ });
183
+ }
184
+
185
+
165
186
  context.response = await {};
166
187
 
167
188
  Object.keys(context.vars).length > 0 &&
@@ -171,8 +192,6 @@ After(async function ({ result, pickle }) {
171
192
  "application/json",
172
193
  );
173
194
 
174
- const shouldReport =
175
- cucumberConfig.default.successReport || result?.status !== Status.PASSED;
176
195
 
177
196
  if (shouldReport & (context.page.url() !== "about:blank")) {
178
197
  const screenshotBuffer = await context.page.screenshot({ type: "png" });