artes 1.7.0 → 1.7.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -192,6 +192,7 @@ npx artes [options]
192
192
  | 🔑 `--aiKey` | API key for the selected AI provider | `artes --ai --aiKey "your-api-key"` |
193
193
  | 🔗 `--aiURL` | Local AI endpoint URL (e.g. Ollama, LM Studio). Overrides `--aiModel` and `--aiKey` when set | `artes --ai --aiURL "http://localhost:11434/api/chat"` |
194
194
  | 🌍 `--aiLanguage` | Language for AI-generated reports (default: `"English"`) | `artes --ai --aiLanguage "Azerbaijani"` |
195
+ | 🔢 --maxTokens | Maximum tokens for AI response output (default: 4000) | artes --ai --maxTokens 8000 |
195
196
  | 📋 `--maxReports` | Maximum number of AI reports to generate per test run (default: `10`) | `artes --ai --maxReports 5` |
196
197
 
197
198
 
@@ -597,6 +598,7 @@ You can configure Artes by editing the `artes.config.js` file. Below are the def
597
598
  | `ai.key` | `""` | API key for the selected AI provider. |
598
599
  | `ai.url` | `""` | Local AI endpoint URL (e.g. Ollama, LM Studio). Overrides `model` and `key` when set. |
599
600
  | `ai.language` | `"English"` | Language for AI-generated reports (e.g. `"Azerbaijani"`, `"German"`). |
601
+ | ai.maxTokens | 4000 | Maximum tokens for AI response output |
600
602
  | `ai.maxReports` | `10` | Maximum number of AI reports to generate per test run. |
601
603
 
602
604
  ---
@@ -203,6 +203,9 @@ key: process.env.AI_KEY
203
203
  language: process.env.AI_LANGUAGE
204
204
  ? process.env.AI_LANGUAGE
205
205
  : artesConfig?.ai?.language || "English",
206
+ maxTokens: process.env.MAX_TOKENS
207
+ ? parseInt(process.env.MAX_TOKENS)
208
+ : artesConfig?.ai?.maxTokens || 4000,
206
209
  maxReports: process.env.MAX_REPORTS
207
210
  ? parseInt(process.env.MAX_REPORTS)
208
211
  : artesConfig?.ai?.maxReports || 10,
package/executer.js CHANGED
@@ -62,6 +62,7 @@ const flags = {
62
62
  aiModel: args.includes("--aiModel"),
63
63
  aiKey: args.includes("--aiKey"),
64
64
  aiLanguage: args.includes("--aiLanguage"),
65
+ maxTokens: args.includes("--maxTokens"),
65
66
  maxReports: args.includes("--maxReports"),
66
67
  features: args.includes("--features"),
67
68
  stepDef: args.includes("--stepDef"),
@@ -99,6 +100,7 @@ const aiURL = getArgValue("--aiURL");
99
100
  const aiModel = getArgValue("--aiModel");
100
101
  const aiKey = getArgValue("--aiKey");
101
102
  const aiLanguage = getArgValue("--aiLanguage");
103
+ const maxTokens = getArgValue("--maxTokens")
102
104
  const maxReports = getArgValue("--maxReports");
103
105
  const featureFiles = getArgValue("--features");
104
106
  const features = flags.features && featureFiles;
@@ -163,6 +165,7 @@ flags.aiURL ? (process.env.AI_URL = aiURL) : "";
163
165
  flags.aiModel ? (process.env.AI_MODEL = aiModel) : "";
164
166
  flags.aiKey ? (process.env.AI_KEY = aiKey) : "";
165
167
  flags.aiLanguage ? (process.env.AI_LANGUAGE = aiLanguage) : "";
168
+ flags.maxTokens ? (process.env.MAX_TOKENS = maxTokens) : "";
166
169
  flags.maxReports ? (process.env.MAX_REPORTS = maxReports) : "";
167
170
 
168
171
  flags.headless &&
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "artes",
3
- "version": "1.7.0",
3
+ "version": "1.7.2",
4
4
  "description": "The simplest way to automate UI and API tests using Cucumber-style steps.",
5
5
  "main": "index.js",
6
6
  "scripts": {
@@ -30,13 +30,11 @@ function resolveProvider(aiFlag = "gemini 2.5 flash") {
30
30
  }
31
31
 
32
32
 
33
- async function callAI({ prompt, aiFlag, apiKey }) {
33
+ async function callAI({ prompt, aiFlag, apiKey, maxTokens }) {
34
34
  const { provider, modelId } = resolveProvider(aiFlag);
35
-
36
- console.log(`Using ${provider.name} — model: ${modelId}`);
37
-
35
+
38
36
  const url = provider.buildUrl(modelId, apiKey);
39
- const body = provider.buildBody(prompt, modelId);
37
+ const body = provider.buildBody(prompt, modelId, maxTokens);
40
38
 
41
39
  const headers = { "Content-Type": "application/json" };
42
40
 
@@ -118,7 +116,6 @@ function buildPickleContext(pickle = {}) {
118
116
  }
119
117
 
120
118
  async function callLocalAI({ prompt, url, apiKey }) {
121
- console.log(`Using AI — endpoint: ${url}`);
122
119
 
123
120
  const headers = { "Content-Type": "application/json" };
124
121
  if (apiKey) headers["Authorization"] = `Bearer ${apiKey}`;
@@ -151,7 +148,7 @@ async function callLocalAI({ prompt, url, apiKey }) {
151
148
  }
152
149
 
153
150
 
154
- async function generateFailedBugReport({ resultCtx, pickleCtx, response, language, aiFlag, apiKey, url }) {
151
+ async function generateFailedBugReport({ resultCtx, pickleCtx, response, language, aiFlag, apiKey, url, maxTokens }) {
155
152
 
156
153
  const nameInstruction = pickleCtx.useMeaningfulName
157
154
  ? `The test case is named "${pickleCtx.scenarioName}" — use this as context for the bug report title.`
@@ -215,10 +212,10 @@ if (url) {
215
212
  return callLocalAI({ prompt, url, apiKey });
216
213
  }
217
214
 
218
- return callAI({ prompt, aiFlag, apiKey });
215
+ return callAI({ prompt, aiFlag, apiKey, maxTokens });
219
216
  }
220
217
 
221
- async function generatePassedSummary({ pickleCtx, response, language, aiFlag, apiKey, url }) {
218
+ async function generatePassedSummary({ pickleCtx, response, language, aiFlag, apiKey, url, maxTokens }) {
222
219
  const lang = language ?? "English";
223
220
 
224
221
  const prompt = [
@@ -256,7 +253,7 @@ async function generatePassedSummary({ pickleCtx, response, language, aiFlag, ap
256
253
  return callLocalAI({ prompt, url, apiKey });
257
254
  }
258
255
 
259
- return callAI({ prompt, aiFlag, apiKey });
256
+ return callAI({ prompt, aiFlag, apiKey, maxTokens });
260
257
  }
261
258
 
262
259
 
@@ -266,7 +263,7 @@ let _reportCount = 0;
266
263
 
267
264
 
268
265
 
269
- const DEFAULT_DELAY_MS = 1000;
266
+ const DEFAULT_DELAY_MS = 3000;
270
267
 
271
268
  async function attachAiBugReport({
272
269
  result,
@@ -278,6 +275,7 @@ async function attachAiBugReport({
278
275
  url,
279
276
  maxReports = 10,
280
277
  delayMs = DEFAULT_DELAY_MS,
278
+ maxTokens
281
279
  }) {
282
280
  try {
283
281
  if (!aiKey && !url) {
@@ -307,6 +305,7 @@ async function attachAiBugReport({
307
305
  aiFlag: aiModel,
308
306
  apiKey: aiKey,
309
307
  url,
308
+ maxTokens
310
309
  });
311
310
  attachmentLabel = "Test Summary";
312
311
  } else {
@@ -318,7 +317,8 @@ async function attachAiBugReport({
318
317
  language,
319
318
  aiFlag: aiModel,
320
319
  apiKey: aiKey,
321
- url,
320
+ url,
321
+ maxTokens
322
322
  });
323
323
  attachmentLabel = "Bug Report";
324
324
  }
@@ -1,27 +1,24 @@
1
-
2
1
  const PROVIDERS = [
3
-
4
2
  {
5
3
  name : "Gemini",
6
4
  keywords: ["gemini"],
7
5
  models : {
8
- "2.5 flash lite" : "gemini-2.5-flash-lite",
9
- "2.5 flash" : "gemini-2.5-flash",
10
- "2.5 pro" : "gemini-2.5-pro",
11
- "2.0 flash" : "gemini-2.0-flash",
12
- default : "gemini-2.5-flash",
6
+ "2.5 flash lite" : "gemini-2.5-flash-lite",
7
+ "2.5 flash" : "gemini-2.5-flash",
8
+ "2.5 pro" : "gemini-2.5-pro",
9
+ "2.0 flash" : "gemini-2.0-flash",
10
+ default : "gemini-2.5-flash",
13
11
  },
14
12
  authStyle : "queryparam",
15
13
  buildUrl : (modelId, apiKey) =>
16
14
  `https://generativelanguage.googleapis.com/v1beta/models/${modelId}:generateContent?key=${apiKey}`,
17
- buildBody : (prompt) => ({
15
+ buildBody : (prompt, modelId, maxTokens = 4000) => ({
18
16
  contents : [{ parts: [{ text: prompt }] }],
19
- generationConfig: { temperature: 0.6, maxOutputTokens: 4000 },
17
+ generationConfig: { temperature: 0.6, maxOutputTokens: maxTokens },
20
18
  }),
21
19
  parseResp : (data) => data?.candidates?.[0]?.content?.parts?.[0]?.text ?? "",
22
20
  },
23
21
 
24
-
25
22
  {
26
23
  name : "OpenAI",
27
24
  keywords: ["openai", "chatgpt", "gpt"],
@@ -39,16 +36,15 @@ const PROVIDERS = [
39
36
  authKey : "Authorization",
40
37
  authValue : (apiKey) => `Bearer ${apiKey}`,
41
38
  buildUrl : () => "https://api.openai.com/v1/chat/completions",
42
- buildBody : (prompt, modelId) => ({
39
+ buildBody : (prompt, modelId, maxTokens = 4000) => ({
43
40
  model : modelId,
44
- max_tokens : 4000,
41
+ max_tokens : maxTokens,
45
42
  temperature: 0.6,
46
43
  messages : [{ role: "user", content: prompt }],
47
44
  }),
48
45
  parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
49
46
  },
50
47
 
51
-
52
48
  {
53
49
  name : "Claude",
54
50
  keywords: ["claude", "anthropic"],
@@ -65,16 +61,15 @@ const PROVIDERS = [
65
61
  authValue : (apiKey) => apiKey,
66
62
  buildExtraHeaders: () => ({ "anthropic-version": "2023-06-01" }),
67
63
  buildUrl : () => "https://api.anthropic.com/v1/messages",
68
- buildBody : (prompt, modelId) => ({
64
+ buildBody : (prompt, modelId, maxTokens = 4000) => ({
69
65
  model : modelId,
70
- max_tokens: 4000,
66
+ max_tokens: maxTokens,
71
67
  messages : [{ role: "user", content: prompt }],
72
68
  }),
73
69
  parseResp : (data) =>
74
70
  (data?.content ?? []).filter((b) => b.type === "text").map((b) => b.text).join(""),
75
71
  },
76
72
 
77
-
78
73
  {
79
74
  name : "Mistral",
80
75
  keywords: ["mistral"],
@@ -90,16 +85,15 @@ const PROVIDERS = [
90
85
  authKey : "Authorization",
91
86
  authValue : (apiKey) => `Bearer ${apiKey}`,
92
87
  buildUrl : () => "https://api.mistral.ai/v1/chat/completions",
93
- buildBody : (prompt, modelId) => ({
88
+ buildBody : (prompt, modelId, maxTokens = 4000) => ({
94
89
  model : modelId,
95
- max_tokens : 4000,
90
+ max_tokens : maxTokens,
96
91
  temperature: 0.6,
97
92
  messages : [{ role: "user", content: prompt }],
98
93
  }),
99
94
  parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
100
95
  },
101
96
 
102
-
103
97
  {
104
98
  name : "Groq",
105
99
  keywords: ["groq"],
@@ -114,16 +108,15 @@ const PROVIDERS = [
114
108
  authKey : "Authorization",
115
109
  authValue : (apiKey) => `Bearer ${apiKey}`,
116
110
  buildUrl : () => "https://api.groq.com/openai/v1/chat/completions",
117
- buildBody : (prompt, modelId) => ({
111
+ buildBody : (prompt, modelId, maxTokens = 4000) => ({
118
112
  model : modelId,
119
- max_tokens : 4000,
113
+ max_tokens : maxTokens,
120
114
  temperature: 0.6,
121
115
  messages : [{ role: "user", content: prompt }],
122
116
  }),
123
117
  parseResp : (data) => data?.choices?.[0]?.message?.content ?? "",
124
118
  },
125
119
 
126
-
127
120
  {
128
121
  name : "Cohere",
129
122
  keywords: ["cohere", "command"],
@@ -136,14 +129,14 @@ const PROVIDERS = [
136
129
  authKey : "Authorization",
137
130
  authValue : (apiKey) => `Bearer ${apiKey}`,
138
131
  buildUrl : () => "https://api.cohere.com/v2/chat",
139
- buildBody : (prompt, modelId) => ({
140
- model : modelId,
141
- messages: [{ role: "user", content: prompt }],
132
+ buildBody : (prompt, modelId, maxTokens = 4000) => ({
133
+ model : modelId,
134
+ max_tokens : maxTokens,
135
+ messages : [{ role: "user", content: prompt }],
142
136
  }),
143
137
  parseResp : (data) => data?.message?.content?.[0]?.text ?? "",
144
138
  },
145
139
 
146
-
147
140
  {
148
141
  name : "DeepSeek",
149
142
  keywords: ["deepseek"],
@@ -156,9 +149,9 @@ const PROVIDERS = [
156
149
  authKey : "Authorization",
157
150
  authValue : (apiKey) => `Bearer ${apiKey}`,
158
151
  buildUrl : () => "https://api.deepseek.com/chat/completions",
159
- buildBody : (prompt, modelId) => ({
152
+ buildBody : (prompt, modelId, maxTokens = 4000) => ({
160
153
  model : modelId,
161
- max_tokens : 4000,
154
+ max_tokens : maxTokens,
162
155
  temperature: 0.6,
163
156
  messages : [{ role: "user", content: prompt }],
164
157
  }),
@@ -55,6 +55,14 @@ async function getEnvInfo() {
55
55
  Parallel_Runner: cucumberConfig.default.parallel,
56
56
  Timeout: cucumberConfig.default.timeout ?? "N/A",
57
57
 
58
+ // ── AI Config ─────────────────────────
59
+ AI_Enabled: cucumberConfig.ai.ai ?? false,
60
+ AI_URL: cucumberConfig.ai.url || "N/A",
61
+ AI_Model: cucumberConfig.ai.model || "N/A",
62
+ AI_Language: cucumberConfig.ai.language || "English",
63
+ AI_Max_Tokens: cucumberConfig.ai.maxTokens ?? "4000",
64
+ AI_Max_Reports: cucumberConfig.ai.maxReports ?? 10,
65
+
58
66
  // ── Git ─────────────────────────────────
59
67
  Git_Branch: process.env.GIT_BRANCH ?? process.env.BRANCH_NAME ?? "N/A",
60
68
  Git_Commit: process.env.GIT_COMMIT ?? process.env.GIT_SHA ?? "N/A",
@@ -154,6 +154,9 @@ function showAIHelp() {
154
154
 
155
155
  🌍 --aiLanguage Language for AI-generated reports (default: "English")
156
156
  Usage: artes --ai --aiLanguage "Azerbaijani"
157
+
158
+ 🔢 --maxTokens Maximum tokens for AI-generated reports (default: 4000)
159
+ Usage: artes --ai --maxTokens 8000
157
160
 
158
161
  📋 --maxReports Maximum number of AI reports to generate per test run (default: 10)
159
162
  Usage: artes --ai --maxReports 5
@@ -55,9 +55,10 @@ function createProject(createYes, noDeps) {
55
55
  // ai: {
56
56
  // ai: false, // boolean - Enable AI-generated bug reports and test summaries
57
57
  // url: "", // string - Local AI endpoint URL (e.g., Ollama, LM Studio). Overrides model/key when set
58
- // model: "gpt-4o", // string - AI model to use (e.g., "gpt-4o", "gemini 2.5 flash", "claude sonnet", "mistral large")
58
+ // model: "gemini 2.5 flash", // string - AI model to use (e.g., "gpt-4o", "gemini 2.5 flash", "claude sonnet", "mistral large")
59
59
  // key: "", // string - API key for the selected AI provider
60
- // language: "English", // string - Language for generated reports (e.g., "English", "Azerbaijani")
60
+ // language: "English", // string - Language for generated reports (e.g., "English", "Azerbaijani"),
61
+ // maxTokens: 4000, // number - Maximum tokens for AI-generated reports (default: 4000)
61
62
  // maxReports: 10, // number - Maximum number of AI reports to generate per test run
62
63
  // },
63
64
  // timeout : 0, // number - Test timeout in seconds
@@ -159,27 +159,28 @@ AfterStep(async function ({ pickleStep }) {
159
159
  });
160
160
 
161
161
  After(async function ({ result, pickle }) {
162
- if (typeof projectHooks.After === "function") {
163
- await projectHooks.After();
164
- }
165
-
166
-
167
162
  const shouldReport =
168
- cucumberConfig.default.successReport || result?.status !== Status.PASSED;
169
-
170
- await attachResponse(allure.attachment);
163
+ cucumberConfig.default.successReport || result?.status !== Status.PASSED;
164
+
165
+ await attachResponse(allure.attachment);
171
166
 
172
167
  if (shouldReport && cucumberConfig.ai.ai) {
173
- await attachAiBugReport({
174
- result,
175
- pickle,
176
- response: context.response,
177
- language: cucumberConfig.ai.language,
178
- url: cucumberConfig.ai.url,
179
- aiModel: cucumberConfig.ai.model,
180
- aiKey: cucumberConfig.ai.key,
181
- maxReports: cucumberConfig.ai.maxReports
182
- });
168
+ await attachAiBugReport({
169
+ result,
170
+ pickle,
171
+ response: context.response,
172
+ language: cucumberConfig.ai.language,
173
+ url: cucumberConfig.ai.url,
174
+ aiModel: cucumberConfig.ai.model,
175
+ aiKey: cucumberConfig.ai.key,
176
+ maxReports: cucumberConfig.ai.maxReports,
177
+ maxTokens: cucumberConfig.ai.maxTokens
178
+ });
179
+ }
180
+
181
+
182
+ if (typeof projectHooks.After === "function") {
183
+ await projectHooks.After();
183
184
  }
184
185
 
185
186