@testcollab/cli 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/release.yml +53 -0
- package/DEVELOPMENT.md +225 -0
- package/LICENSE +21 -0
- package/README.md +378 -0
- package/docs/frameworks.md +485 -0
- package/docs/specgen.md +77 -0
- package/package.json +54 -0
- package/samples/reports/junit.xml +12 -0
- package/samples/reports/mochawesome.json +110 -0
- package/scripts/bump-version.js +145 -0
- package/src/ai/discovery.js +123 -0
- package/src/commands/createTestPlan.js +259 -0
- package/src/commands/featuresync.js +753 -0
- package/src/commands/report.js +1109 -0
- package/src/commands/specgen.js +430 -0
- package/src/index.js +74 -0
|
@@ -0,0 +1,485 @@
|
|
|
1
|
+
# Framework Setup Guide
|
|
2
|
+
|
|
3
|
+
How to generate test result files compatible with `tc report` for each supported framework.
|
|
4
|
+
|
|
5
|
+
`tc report` accepts two formats:
|
|
6
|
+
- **Mochawesome JSON** (`--format mochawesome`)
|
|
7
|
+
- **JUnit XML** (`--format junit`)
|
|
8
|
+
|
|
9
|
+
Your test names must include a TestCollab case ID (e.g., `[TC-123]`, `TC-123`, `id-123`, or `testcase-123`) so results can be matched to test cases. See the [README](../README.md#mapping-test-cases) for all supported patterns.
|
|
10
|
+
|
|
11
|
+
### Supported frameworks
|
|
12
|
+
|
|
13
|
+
[Cypress](#cypress) | [Playwright](#playwright) | [Jest](#jest) | [Pytest](#pytest) | [TestNG](#testng) | [JUnit 4/5](#junit-45) | [Robot Framework](#robot-framework) | [PHPUnit](#phpunit) | [Cucumber.js](#cucumberjs) | [Cucumber JVM](#cucumber-jvm) | [WebDriverIO](#webdriverio) | [TestCafe](#testcafe) | [Newman (Postman)](#newman-postman) | [Behave (Python)](#behave-python) | [Go (`go test`)](#go-go-test) | [Kaspresso / Kotlin](#kaspresso--kotlin)
|
|
14
|
+
|
|
15
|
+
### JUnit XML example
|
|
16
|
+
|
|
17
|
+
```xml
|
|
18
|
+
<?xml version="1.0" encoding="UTF-8"?>
|
|
19
|
+
<testsuites name="My Test Suite">
|
|
20
|
+
<testsuite name="Authentication" tests="3" failures="1" skipped="1">
|
|
21
|
+
<testcase classname="Authentication.Login" name="[TC-123] should login with valid credentials" time="0.12" />
|
|
22
|
+
<testcase classname="Authentication.Login" name="[TC-124] should reject invalid password" time="0.43">
|
|
23
|
+
<failure message="Expected 401 but got 200">AssertionError: expected status 401 but got 200</failure>
|
|
24
|
+
</testcase>
|
|
25
|
+
<testcase classname="Authentication.Login" name="[TC-125] should support SSO login" time="0.07">
|
|
26
|
+
<skipped />
|
|
27
|
+
</testcase>
|
|
28
|
+
</testsuite>
|
|
29
|
+
</testsuites>
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### Mochawesome JSON example
|
|
33
|
+
|
|
34
|
+
```json
|
|
35
|
+
{
|
|
36
|
+
"results": [
|
|
37
|
+
{
|
|
38
|
+
"title": "Authentication",
|
|
39
|
+
"tests": [
|
|
40
|
+
{
|
|
41
|
+
"title": "[TC-123] should login with valid credentials",
|
|
42
|
+
"fullTitle": "Authentication [TC-123] should login with valid credentials",
|
|
43
|
+
"state": "passed",
|
|
44
|
+
"pass": true,
|
|
45
|
+
"fail": false
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"title": "[TC-124] should reject invalid password",
|
|
49
|
+
"state": "failed",
|
|
50
|
+
"pass": false,
|
|
51
|
+
"fail": true,
|
|
52
|
+
"err": { "message": "Expected 401 but got 200" }
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"title": "[TC-125] should support SSO login",
|
|
56
|
+
"state": "pending",
|
|
57
|
+
"pass": false,
|
|
58
|
+
"fail": false,
|
|
59
|
+
"pending": true
|
|
60
|
+
}
|
|
61
|
+
]
|
|
62
|
+
}
|
|
63
|
+
]
|
|
64
|
+
}
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
The key requirement is that each test name contains a TestCollab case ID (e.g., `[TC-123]`). The CLI extracts this ID to match results to the correct test case in your test plan.
|
|
68
|
+
|
|
69
|
+
All examples below assume you've set the `TESTCOLLAB_TOKEN` environment variable (or pass `--api-key` to each command). See [Authentication](../README.md#authentication).
|
|
70
|
+
|
|
71
|
+
---
|
|
72
|
+
|
|
73
|
+
## Cypress
|
|
74
|
+
|
|
75
|
+
Cypress has native Mochawesome support.
|
|
76
|
+
|
|
77
|
+
**Install:**
|
|
78
|
+
|
|
79
|
+
```bash
|
|
80
|
+
npm install --save-dev mochawesome mochawesome-merge mochawesome-report-generator
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
**Configure** (`cypress.config.js`):
|
|
84
|
+
|
|
85
|
+
```js
|
|
86
|
+
module.exports = {
|
|
87
|
+
reporter: 'mochawesome',
|
|
88
|
+
reporterOptions: {
|
|
89
|
+
reportDir: 'mochawesome-report',
|
|
90
|
+
overwrite: false,
|
|
91
|
+
html: false,
|
|
92
|
+
json: true
|
|
93
|
+
}
|
|
94
|
+
};
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
**Run and upload:**
|
|
98
|
+
|
|
99
|
+
```bash
|
|
100
|
+
npx cypress run
|
|
101
|
+
tc report --project 123 --test-plan-id 456 \
|
|
102
|
+
--format mochawesome --result-file ./mochawesome-report/mochawesome.json
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
|
|
107
|
+
## Playwright
|
|
108
|
+
|
|
109
|
+
Playwright has a built-in JUnit reporter.
|
|
110
|
+
|
|
111
|
+
**Run:**
|
|
112
|
+
|
|
113
|
+
```bash
|
|
114
|
+
npx playwright test --reporter=junit
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
This writes to stdout by default. To write to a file, set the `PLAYWRIGHT_JUNIT_OUTPUT_NAME` env var:
|
|
118
|
+
|
|
119
|
+
```bash
|
|
120
|
+
PLAYWRIGHT_JUNIT_OUTPUT_NAME=results.xml npx playwright test --reporter=junit
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Or configure in `playwright.config.ts`:
|
|
124
|
+
|
|
125
|
+
```ts
|
|
126
|
+
export default {
|
|
127
|
+
reporter: [['junit', { outputFile: 'results.xml' }]]
|
|
128
|
+
};
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
**Upload:**
|
|
132
|
+
|
|
133
|
+
```bash
|
|
134
|
+
tc report --project 123 --test-plan-id 456 \
|
|
135
|
+
--format junit --result-file ./results.xml
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
---
|
|
139
|
+
|
|
140
|
+
## Jest
|
|
141
|
+
|
|
142
|
+
Use the `jest-junit` package to generate JUnit XML.
|
|
143
|
+
|
|
144
|
+
**Install:**
|
|
145
|
+
|
|
146
|
+
```bash
|
|
147
|
+
npm install --save-dev jest-junit
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
**Run:**
|
|
151
|
+
|
|
152
|
+
```bash
|
|
153
|
+
JEST_JUNIT_OUTPUT_DIR=./reports npx jest --reporters=default --reporters=jest-junit
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
Or configure in `package.json`:
|
|
157
|
+
|
|
158
|
+
```json
|
|
159
|
+
{
|
|
160
|
+
"jest": {
|
|
161
|
+
"reporters": [
|
|
162
|
+
"default",
|
|
163
|
+
["jest-junit", { "outputDirectory": "./reports", "outputName": "results.xml" }]
|
|
164
|
+
]
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
**Upload:**
|
|
170
|
+
|
|
171
|
+
```bash
|
|
172
|
+
tc report --project 123 --test-plan-id 456 \
|
|
173
|
+
--format junit --result-file ./reports/results.xml
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
---
|
|
177
|
+
|
|
178
|
+
## Pytest
|
|
179
|
+
|
|
180
|
+
Pytest has built-in JUnit XML output.
|
|
181
|
+
|
|
182
|
+
**Run:**
|
|
183
|
+
|
|
184
|
+
```bash
|
|
185
|
+
pytest --junitxml=results.xml
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
**Upload:**
|
|
189
|
+
|
|
190
|
+
```bash
|
|
191
|
+
tc report --project 123 --test-plan-id 456 \
|
|
192
|
+
--format junit --result-file ./results.xml
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
---
|
|
196
|
+
|
|
197
|
+
## TestNG
|
|
198
|
+
|
|
199
|
+
TestNG generates JUnit-compatible XML by default.
|
|
200
|
+
|
|
201
|
+
**Run:**
|
|
202
|
+
|
|
203
|
+
The default output is at `test-output/junitreports/`. You can also configure the output in your `testng.xml` or build tool.
|
|
204
|
+
|
|
205
|
+
**Maven example:**
|
|
206
|
+
|
|
207
|
+
```bash
|
|
208
|
+
mvn test
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
**Upload:**
|
|
212
|
+
|
|
213
|
+
```bash
|
|
214
|
+
tc report --project 123 --test-plan-id 456 \
|
|
215
|
+
--format junit --result-file ./test-output/junitreports/TEST-TestSuite.xml
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
---
|
|
219
|
+
|
|
220
|
+
## JUnit 4/5
|
|
221
|
+
|
|
222
|
+
JUnit is the native source of the XML format — no extra setup needed.
|
|
223
|
+
|
|
224
|
+
**Maven:**
|
|
225
|
+
|
|
226
|
+
```bash
|
|
227
|
+
mvn test
|
|
228
|
+
# Results at target/surefire-reports/
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
**Gradle:**
|
|
232
|
+
|
|
233
|
+
```bash
|
|
234
|
+
gradle test
|
|
235
|
+
# Results at build/test-results/test/
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
**Upload:**
|
|
239
|
+
|
|
240
|
+
```bash
|
|
241
|
+
tc report --project 123 --test-plan-id 456 \
|
|
242
|
+
--format junit --result-file ./target/surefire-reports/TEST-com.example.MyTest.xml
|
|
243
|
+
```
|
|
244
|
+
|
|
245
|
+
---
|
|
246
|
+
|
|
247
|
+
## Robot Framework
|
|
248
|
+
|
|
249
|
+
Use the `--xunit` flag to generate JUnit-compatible XML.
|
|
250
|
+
|
|
251
|
+
**Run:**
|
|
252
|
+
|
|
253
|
+
```bash
|
|
254
|
+
robot --xunit results.xml tests/
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
**Upload:**
|
|
258
|
+
|
|
259
|
+
```bash
|
|
260
|
+
tc report --project 123 --test-plan-id 456 \
|
|
261
|
+
--format junit --result-file ./results.xml
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
---
|
|
265
|
+
|
|
266
|
+
## PHPUnit
|
|
267
|
+
|
|
268
|
+
PHPUnit has built-in JUnit XML logging.
|
|
269
|
+
|
|
270
|
+
**Run:**
|
|
271
|
+
|
|
272
|
+
```bash
|
|
273
|
+
phpunit --log-junit results.xml
|
|
274
|
+
```
|
|
275
|
+
|
|
276
|
+
**Upload:**
|
|
277
|
+
|
|
278
|
+
```bash
|
|
279
|
+
tc report --project 123 --test-plan-id 456 \
|
|
280
|
+
--format junit --result-file ./results.xml
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
---
|
|
284
|
+
|
|
285
|
+
## Cucumber.js
|
|
286
|
+
|
|
287
|
+
Use a JUnit formatter plugin.
|
|
288
|
+
|
|
289
|
+
**Install:**
|
|
290
|
+
|
|
291
|
+
```bash
|
|
292
|
+
npm install --save-dev cucumber-junit
|
|
293
|
+
```
|
|
294
|
+
|
|
295
|
+
**Run:**
|
|
296
|
+
|
|
297
|
+
```bash
|
|
298
|
+
npx cucumber-js --format json:./reports/cucumber.json
|
|
299
|
+
npx cucumber-junit < ./reports/cucumber.json > ./reports/results.xml
|
|
300
|
+
```
|
|
301
|
+
|
|
302
|
+
Or use `cucumber-junit-formatter` directly:
|
|
303
|
+
|
|
304
|
+
```bash
|
|
305
|
+
npm install --save-dev @cucumber/junit-xml-formatter
|
|
306
|
+
npx cucumber-js --format @cucumber/junit-xml-formatter:./reports/results.xml
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
**Upload:**
|
|
310
|
+
|
|
311
|
+
```bash
|
|
312
|
+
tc report --project 123 --test-plan-id 456 \
|
|
313
|
+
--format junit --result-file ./reports/results.xml
|
|
314
|
+
```
|
|
315
|
+
|
|
316
|
+
---
|
|
317
|
+
|
|
318
|
+
## Cucumber JVM
|
|
319
|
+
|
|
320
|
+
Cucumber JVM has a built-in JUnit XML plugin.
|
|
321
|
+
|
|
322
|
+
**Configure** (in `@CucumberOptions` or `cucumber.properties`):
|
|
323
|
+
|
|
324
|
+
```java
|
|
325
|
+
@CucumberOptions(plugin = {"junit:target/cucumber-reports/results.xml"})
|
|
326
|
+
```
|
|
327
|
+
|
|
328
|
+
Or in `cucumber.properties`:
|
|
329
|
+
|
|
330
|
+
```
|
|
331
|
+
cucumber.plugin=junit:target/cucumber-reports/results.xml
|
|
332
|
+
```
|
|
333
|
+
|
|
334
|
+
**Upload:**
|
|
335
|
+
|
|
336
|
+
```bash
|
|
337
|
+
tc report --project 123 --test-plan-id 456 \
|
|
338
|
+
--format junit --result-file ./target/cucumber-reports/results.xml
|
|
339
|
+
```
|
|
340
|
+
|
|
341
|
+
---
|
|
342
|
+
|
|
343
|
+
## WebDriverIO
|
|
344
|
+
|
|
345
|
+
Use the `@wdio/junit-reporter` package.
|
|
346
|
+
|
|
347
|
+
**Install:**
|
|
348
|
+
|
|
349
|
+
```bash
|
|
350
|
+
npm install --save-dev @wdio/junit-reporter
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
**Configure** (`wdio.conf.js`):
|
|
354
|
+
|
|
355
|
+
```js
|
|
356
|
+
exports.config = {
|
|
357
|
+
reporters: [
|
|
358
|
+
['junit', {
|
|
359
|
+
outputDir: './reports',
|
|
360
|
+
outputFileFormat: () => 'results.xml'
|
|
361
|
+
}]
|
|
362
|
+
]
|
|
363
|
+
};
|
|
364
|
+
```
|
|
365
|
+
|
|
366
|
+
**Upload:**
|
|
367
|
+
|
|
368
|
+
```bash
|
|
369
|
+
tc report --project 123 --test-plan-id 456 \
|
|
370
|
+
--format junit --result-file ./reports/results.xml
|
|
371
|
+
```
|
|
372
|
+
|
|
373
|
+
---
|
|
374
|
+
|
|
375
|
+
## TestCafe
|
|
376
|
+
|
|
377
|
+
Use the `testcafe-reporter-junit` package.
|
|
378
|
+
|
|
379
|
+
**Install:**
|
|
380
|
+
|
|
381
|
+
```bash
|
|
382
|
+
npm install --save-dev testcafe-reporter-junit
|
|
383
|
+
```
|
|
384
|
+
|
|
385
|
+
**Run:**
|
|
386
|
+
|
|
387
|
+
```bash
|
|
388
|
+
npx testcafe chrome tests/ --reporter junit:results.xml
|
|
389
|
+
```
|
|
390
|
+
|
|
391
|
+
**Upload:**
|
|
392
|
+
|
|
393
|
+
```bash
|
|
394
|
+
tc report --project 123 --test-plan-id 456 \
|
|
395
|
+
--format junit --result-file ./results.xml
|
|
396
|
+
```
|
|
397
|
+
|
|
398
|
+
---
|
|
399
|
+
|
|
400
|
+
## Newman (Postman)
|
|
401
|
+
|
|
402
|
+
Use the `newman-reporter-junit` package.
|
|
403
|
+
|
|
404
|
+
**Install:**
|
|
405
|
+
|
|
406
|
+
```bash
|
|
407
|
+
npm install --save-dev newman-reporter-junit
|
|
408
|
+
```
|
|
409
|
+
|
|
410
|
+
**Run:**
|
|
411
|
+
|
|
412
|
+
```bash
|
|
413
|
+
npx newman run collection.json --reporters cli,junit --reporter-junit-export results.xml
|
|
414
|
+
```
|
|
415
|
+
|
|
416
|
+
**Upload:**
|
|
417
|
+
|
|
418
|
+
```bash
|
|
419
|
+
tc report --project 123 --test-plan-id 456 \
|
|
420
|
+
--format junit --result-file ./results.xml
|
|
421
|
+
```
|
|
422
|
+
|
|
423
|
+
---
|
|
424
|
+
|
|
425
|
+
## Behave (Python)
|
|
426
|
+
|
|
427
|
+
Behave has built-in JUnit output.
|
|
428
|
+
|
|
429
|
+
**Run:**
|
|
430
|
+
|
|
431
|
+
```bash
|
|
432
|
+
behave --junit --junit-directory ./reports
|
|
433
|
+
```
|
|
434
|
+
|
|
435
|
+
**Upload:**
|
|
436
|
+
|
|
437
|
+
```bash
|
|
438
|
+
tc report --project 123 --test-plan-id 456 \
|
|
439
|
+
--format junit --result-file ./reports/TESTS-features.xml
|
|
440
|
+
```
|
|
441
|
+
|
|
442
|
+
---
|
|
443
|
+
|
|
444
|
+
## Go (`go test`)
|
|
445
|
+
|
|
446
|
+
Use `go-junit-report` to convert Go test output to JUnit XML.
|
|
447
|
+
|
|
448
|
+
**Install:**
|
|
449
|
+
|
|
450
|
+
```bash
|
|
451
|
+
go install github.com/jstemmer/go-junit-report/v2@latest
|
|
452
|
+
```
|
|
453
|
+
|
|
454
|
+
**Run:**
|
|
455
|
+
|
|
456
|
+
```bash
|
|
457
|
+
go test ./... -v 2>&1 | go-junit-report > results.xml
|
|
458
|
+
```
|
|
459
|
+
|
|
460
|
+
**Upload:**
|
|
461
|
+
|
|
462
|
+
```bash
|
|
463
|
+
tc report --project 123 --test-plan-id 456 \
|
|
464
|
+
--format junit --result-file ./results.xml
|
|
465
|
+
```
|
|
466
|
+
|
|
467
|
+
---
|
|
468
|
+
|
|
469
|
+
## Kaspresso / Kotlin
|
|
470
|
+
|
|
471
|
+
Kaspresso and Kotlin test frameworks inherit from the JUnit runner, so they produce JUnit XML natively.
|
|
472
|
+
|
|
473
|
+
**Run (Gradle):**
|
|
474
|
+
|
|
475
|
+
```bash
|
|
476
|
+
./gradlew connectedAndroidTest
|
|
477
|
+
# Results at app/build/outputs/androidTest-results/
|
|
478
|
+
```
|
|
479
|
+
|
|
480
|
+
**Upload:**
|
|
481
|
+
|
|
482
|
+
```bash
|
|
483
|
+
tc report --project 123 --test-plan-id 456 \
|
|
484
|
+
--format junit --result-file ./app/build/outputs/androidTest-results/TEST-results.xml
|
|
485
|
+
```
|
package/docs/specgen.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
## TODO
|
|
2
|
+
- [x] Wire `specgen` command with structured-output feature generation.
|
|
3
|
+
- [x] Add AI-assisted discovery (cached to `.testcollab/specgen.json`).
|
|
4
|
+
- [x] Support Gemini/Claude provider selection and document key requirements.
|
|
5
|
+
- [x] Update `package-lock.json` after installing new deps (Gemini SDK).
|
|
6
|
+
- [ ] Smoke-test both providers with real keys; add tests for provider selection/discovery parsing/error handling.
|
|
7
|
+
- [ ] Expand CLI help with model examples and first-run vs cached behavior.
|
|
8
|
+
|
|
9
|
+
`specgen` crawls your source code and generates `.feature` files with ready-to-run scenarios.
|
|
10
|
+
|
|
11
|
+
## Terminology
|
|
12
|
+
|
|
13
|
+
- **Target family:** A domain bucket (e.g., QA Copilot, Billing, BDD) that groups related code paths and sets defaults like parsing strategy, priority, and output folder.
|
|
14
|
+
|
|
15
|
+
- **Target:** A concrete unit inside a family (e.g., `ChatPanel.tsx`, `qacconversationthread` API handler, `testplan` service). Each target has entry files, supporting context, and its own `.feature` output path.
|
|
16
|
+
|
|
17
|
+
### Attributes
|
|
18
|
+
- **Target family:** `name`, `kind` (backend/ui/cli/job), `paths` (globs), `priority`, `owner` (optional), `notes`/exclusions, `output_root` for the family.
|
|
19
|
+
- **Target:** `id`, `family`, `type` (api_controller/model/ui_component/job/cli_command), `entry` files, `context` (supporting code/docs/tests), `routes/events` or `states/flows`, `priority`, `confidence_flags` (e.g., low-context), `output_path` for the `.feature` file.
|
|
20
|
+
|
|
21
|
+
### JSON shapes
|
|
22
|
+
Target family
|
|
23
|
+
```json
|
|
24
|
+
{
|
|
25
|
+
"name": "qa_copilot",
|
|
26
|
+
"kind": "backend",
|
|
27
|
+
"paths": ["api/qacopilot/**", "api/qacconversation*/**"],
|
|
28
|
+
"priority": "high",
|
|
29
|
+
"owner": "team-copilot",
|
|
30
|
+
"notes": "skip legacy/v1 controllers",
|
|
31
|
+
"output_root": "features/qa_copilot"
|
|
32
|
+
}
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Target
|
|
36
|
+
```json
|
|
37
|
+
{
|
|
38
|
+
"id": "qacconversationthread",
|
|
39
|
+
"family": "qa_copilot",
|
|
40
|
+
"type": "api_controller",
|
|
41
|
+
"entry": ["api/qacconversationthread/controllers/qacconversationthread.js"],
|
|
42
|
+
"context": ["api/qacconversationthread/services/**", "tests/qacconversationthread/**"],
|
|
43
|
+
"routes": ["GET /qacconversationthreads", "POST /qacconversationthreads"],
|
|
44
|
+
"priority": "core",
|
|
45
|
+
"confidence_flags": ["low-context"],
|
|
46
|
+
"output_path": "features/qa_copilot/qacconversationthread.feature"
|
|
47
|
+
}
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
## State/cache
|
|
51
|
+
- Discovered target families/targets and their output paths are cached in `.testcollab/specgen.json` at repo root. Commit it for reproducible runs, or add it to `.gitignore` if you prefer ephemeral caching.
|
|
52
|
+
|
|
53
|
+
## How to run it
|
|
54
|
+
Requires an AI key:
|
|
55
|
+
- Claude models (default): set `ANTHROPIC_API_KEY`.
|
|
56
|
+
- Gemini 3 models: set `GOOGLE_GENAI_API_KEY` (or `GEMINI_API_KEY`).
|
|
57
|
+
|
|
58
|
+
1) From your project root, run `tc specgen --src ./src --out ./features` (or `npx tc specgen ...` if installed locally). If you omit `--out`, it defaults to `./features`.
|
|
59
|
+
2) Point `--src` at the codebase you want crawled; point `--out` at where you want the generated `.feature` files to land.
|
|
60
|
+
3) Review the generated Gherkin, adjust wording/step names to match your domain language, then commit.
|
|
61
|
+
|
|
62
|
+
### Dev workspace example
|
|
63
|
+
From a workspace root with `tc-cli` installed locally:
|
|
64
|
+
```bash
|
|
65
|
+
cd tc-cli
|
|
66
|
+
ANTHROPIC_API_KEY=... node ./src/index.js specgen --src ../qac_widget/src --out ../qac_widget/features --cache ../qac_widget/.testcollab/specgen.json --model claude-opus-4-5-20251101
|
|
67
|
+
```
|
|
68
|
+
Swap `--model` to a Gemini model (e.g., `gemini-2.0-pro`) and set `GOOGLE_GENAI_API_KEY` instead if you prefer Gemini.
|
|
69
|
+
|
|
70
|
+
## Quality checks
|
|
71
|
+
|
|
72
|
+
- **Small codebase (20–30 project files):** Quick skim of every generated `.feature` file, remove obvious duplicates, tighten scenario names, and ensure steps mirror the real user flows.
|
|
73
|
+
|
|
74
|
+
- **Medium & large (30+ source files):** Generate per module/folder to control noise, spot-check high-traffic paths first, dedupe cross-module scenarios, and keep a short backlog of follow-up edits for any low-confidence sections marked by the generator.
|
|
75
|
+
|
|
76
|
+
"@anthropic-ai/sdk": "^0.71.0",
|
|
77
|
+
"@google/generative-ai": "^0.11.0",
|
package/package.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@testcollab/cli",
|
|
3
|
+
"version": "1.3.0",
|
|
4
|
+
"description": "Command-line interface for TestCollab operations",
|
|
5
|
+
"main": "src/index.js",
|
|
6
|
+
"bin": {
|
|
7
|
+
"tc": "./src/index.js"
|
|
8
|
+
},
|
|
9
|
+
"type": "module",
|
|
10
|
+
"scripts": {
|
|
11
|
+
"test": "NODE_OPTIONS=\"--experimental-vm-modules\" jest"
|
|
12
|
+
},
|
|
13
|
+
"jest": {
|
|
14
|
+
"testEnvironment": "node"
|
|
15
|
+
},
|
|
16
|
+
"devDependencies": {
|
|
17
|
+
"jest": "^29.7.0"
|
|
18
|
+
},
|
|
19
|
+
"keywords": [
|
|
20
|
+
"testcollab",
|
|
21
|
+
"testing",
|
|
22
|
+
"bdd",
|
|
23
|
+
"gherkin",
|
|
24
|
+
"cli"
|
|
25
|
+
],
|
|
26
|
+
"author": "TestCollab",
|
|
27
|
+
"license": "MIT",
|
|
28
|
+
"dependencies": {
|
|
29
|
+
"@anthropic-ai/sdk": "^0.71.2",
|
|
30
|
+
"@cucumber/gherkin": "^33.1.0",
|
|
31
|
+
"@cucumber/messages": "^28.1.0",
|
|
32
|
+
"@google/generative-ai": "^0.24.1",
|
|
33
|
+
"commander": "^11.1.0",
|
|
34
|
+
"simple-git": "^3.28.0",
|
|
35
|
+
"testcollab-cypress-plugin": "^1.0.3",
|
|
36
|
+
"testcollab-sdk": "^2.1.1-SNAPSHOT.202509242236"
|
|
37
|
+
},
|
|
38
|
+
"engines": {
|
|
39
|
+
"node": ">=18.0.0"
|
|
40
|
+
},
|
|
41
|
+
"repository": {
|
|
42
|
+
"type": "git",
|
|
43
|
+
"url": "https://github.com/TCSoftInc/testcollab-cli.git"
|
|
44
|
+
},
|
|
45
|
+
"bugs": {
|
|
46
|
+
"url": "https://github.com/TCSoftInc/testcollab-cli/issues"
|
|
47
|
+
},
|
|
48
|
+
"homepage": "https://github.com/TCSoftInc/testcollab-cli#readme",
|
|
49
|
+
"publishConfig": {
|
|
50
|
+
"registry": "https://registry.npmjs.org/",
|
|
51
|
+
"access": "public",
|
|
52
|
+
"provenance": true
|
|
53
|
+
}
|
|
54
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
<?xml version="1.0" encoding="UTF-8"?>
|
|
2
|
+
<testsuites name="TestCollab CLI Sample JUnit">
|
|
3
|
+
<testsuite name="Authentication" tests="3" failures="1" skipped="1" time="0.62">
|
|
4
|
+
<testcase classname="Authentication.Login" name="[TC-1913] should login with valid credentials" time="0.12" />
|
|
5
|
+
<testcase classname="Authentication.Login" name="[TC-1914] should reject invalid password" time="0.43">
|
|
6
|
+
<failure message="Expected invalid password error">AssertionError: expected status 401 but got 200</failure>
|
|
7
|
+
</testcase>
|
|
8
|
+
<testcase classname="Authentication.Login" name="[TC-1915] should support SSO login" time="0.07">
|
|
9
|
+
<skipped />
|
|
10
|
+
</testcase>
|
|
11
|
+
</testsuite>
|
|
12
|
+
</testsuites>
|