depfix-ai 0.1.2 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +146 -0
- package/bin/run.js +0 -0
- package/dist/commands/audit.d.ts +12 -0
- package/dist/commands/audit.js +66 -0
- package/dist/commands/env/generate.d.ts +16 -0
- package/dist/commands/env/generate.d.ts.map +1 -1
- package/dist/commands/env/generate.js +45 -0
- package/dist/commands/fix.d.ts +14 -0
- package/dist/commands/fix.js +33 -0
- package/dist/commands/onboard.d.ts +12 -0
- package/dist/commands/onboard.js +25 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +6 -0
- package/dist/lib/audit/npmAudit.d.ts +13 -0
- package/dist/lib/audit/npmAudit.js +22 -0
- package/dist/lib/audit/summarize.d.ts +30 -0
- package/dist/lib/audit/summarize.js +79 -0
- package/dist/lib/config/store.d.ts +6 -0
- package/dist/lib/config/store.js +9 -0
- package/dist/lib/env/ai.d.ts +20 -0
- package/dist/lib/env/ai.d.ts.map +1 -0
- package/dist/lib/env/ai.js +139 -0
- package/dist/lib/env/render.d.ts +4 -0
- package/dist/lib/env/render.d.ts.map +1 -1
- package/dist/lib/env/render.js +50 -0
- package/dist/lib/env/scan.d.ts +14 -0
- package/dist/lib/env/scan.d.ts.map +1 -1
- package/dist/lib/env/scan.js +83 -0
- package/dist/lib/env/write.d.ts +11 -0
- package/dist/lib/env/write.d.ts.map +1 -1
- package/dist/lib/env/write.js +108 -0
- package/dist/lib/git/commit.d.ts +2 -0
- package/dist/lib/git/commit.js +5 -0
- package/dist/lib/git/diff.d.ts +6 -0
- package/dist/lib/git/diff.js +12 -0
- package/dist/lib/git/stash.d.ts +2 -0
- package/dist/lib/git/stash.js +4 -0
- package/dist/lib/git/status.d.ts +2 -0
- package/dist/lib/git/status.js +5 -0
- package/dist/lib/pm/detect.d.ts +3 -0
- package/dist/lib/pm/detect.js +19 -0
- package/dist/lib/pm/run.d.ts +3 -0
- package/dist/lib/pm/run.js +10 -0
- package/dist/lib/pm/types.d.ts +11 -0
- package/dist/lib/pm/types.js +1 -0
- package/dist/lib/safety/backup.d.ts +7 -0
- package/dist/lib/safety/backup.js +16 -0
- package/dist/lib/safety/dryRun.d.ts +3 -0
- package/dist/lib/safety/dryRun.js +7 -0
- package/dist/lib/ui/log.d.ts +3 -0
- package/dist/lib/ui/log.js +7 -0
- package/dist/lib/ui/spinner.d.ts +2 -0
- package/dist/lib/ui/spinner.js +4 -0
- package/package.json +8 -2
package/README.md
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# depfix-ai
|
|
2
|
+
|
|
3
|
+
[](https://www.npmjs.com/package/depfix-ai)
|
|
4
|
+
[](https://opensource.org/licenses/MIT)
|
|
5
|
+
|
|
6
|
+
> CLI for dependency audit, env file generation, and contributor onboarding. Fix your deps, generate `.env.example`, and get projects ready in one command.
|
|
7
|
+
|
|
8
|
+
**Requires Node.js ≥ 18.**
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## Install
|
|
13
|
+
|
|
14
|
+
**Global (npm or pnpm):**
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
npm install -g depfix-ai
|
|
18
|
+
# or
|
|
19
|
+
pnpm add -g depfix-ai
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
**Run without installing (npx / pnpm dlx):**
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
npx depfix-ai --help
|
|
26
|
+
# or
|
|
27
|
+
pnpm dlx depfix-ai --help
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
## Quick start
|
|
33
|
+
|
|
34
|
+
After installing globally or with `npx` / `pnpm dlx`:
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
depfix-ai audit # Security audit + human summary
|
|
38
|
+
depfix-ai env generate # Scan source → .env.example
|
|
39
|
+
depfix-ai onboard # Install deps + env + tests
|
|
40
|
+
depfix-ai fix # Preview fixes (dry-run); use --apply to apply
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
One-off (no install):
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
npx depfix-ai audit
|
|
47
|
+
pnpm dlx depfix-ai env generate
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## Commands
|
|
53
|
+
|
|
54
|
+
### `depfix-ai audit`
|
|
55
|
+
|
|
56
|
+
Run a security audit and get a human-readable summary (npm only for now).
|
|
57
|
+
|
|
58
|
+
| Flag | Description |
|
|
59
|
+
|------|-------------|
|
|
60
|
+
| `--json` | Print raw npm audit JSON |
|
|
61
|
+
| `--severity <level>` | `low` \| `moderate` \| `high` \| `critical` (default: low) |
|
|
62
|
+
| `--fail` | Exit 1 if vulnerabilities ≥ severity |
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
depfix-ai audit
|
|
66
|
+
depfix-ai audit --severity high --fail
|
|
67
|
+
depfix-ai audit --json
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
---
|
|
71
|
+
|
|
72
|
+
### `depfix-ai env generate`
|
|
73
|
+
|
|
74
|
+
Scan source for `process.env.*` and `import.meta.env.*`; generate grouped `.env.example` (and optionally a blank `.env`).
|
|
75
|
+
|
|
76
|
+
| Flag | Description |
|
|
77
|
+
|------|-------------|
|
|
78
|
+
| `--out <path>` | Output file (default: .env.example) |
|
|
79
|
+
| `--create` | Create .env with blank values if missing |
|
|
80
|
+
| `--force` | Overwrite .env when used with --create |
|
|
81
|
+
| `--check` | Verify .env.example has all vars; exit 1 if not |
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
depfix-ai env generate
|
|
85
|
+
depfix-ai env generate --create
|
|
86
|
+
depfix-ai env generate --check
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
---
|
|
90
|
+
|
|
91
|
+
### `depfix-ai fix`
|
|
92
|
+
|
|
93
|
+
Preview dependency fixes (dry-run by default). Use `--apply` to write changes.
|
|
94
|
+
|
|
95
|
+
| Flag | Description |
|
|
96
|
+
|------|-------------|
|
|
97
|
+
| `--apply` | Apply changes |
|
|
98
|
+
| `--force` | Pass --force to npm audit fix |
|
|
99
|
+
| `--stash` | Auto-stash if git dirty |
|
|
100
|
+
| `--commit` | Auto-commit with chore(deps): audit fix |
|
|
101
|
+
| `--dry-run` | Preview only (default) |
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
depfix-ai fix
|
|
105
|
+
depfix-ai fix --apply
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
---
|
|
109
|
+
|
|
110
|
+
### `depfix-ai onboard`
|
|
111
|
+
|
|
112
|
+
One-command setup: backup (git stash), install deps, env generate, run tests.
|
|
113
|
+
|
|
114
|
+
| Flag | Description |
|
|
115
|
+
|------|-------------|
|
|
116
|
+
| `--skip-install` | Skip npm install |
|
|
117
|
+
| `--skip-env` | Skip env generate |
|
|
118
|
+
| `--skip-test` | Skip test script |
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
depfix-ai onboard
|
|
122
|
+
depfix-ai onboard --skip-test
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
---
|
|
126
|
+
|
|
127
|
+
## Development
|
|
128
|
+
|
|
129
|
+
```bash
|
|
130
|
+
git clone https://github.com/hesxo/depfix-ai.git
|
|
131
|
+
cd depfix-ai
|
|
132
|
+
npm ci
|
|
133
|
+
# or: pnpm install
|
|
134
|
+
npm run build
|
|
135
|
+
npm test
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
**Scripts:** `build` · `test` · `lint` · `version:patch`
|
|
139
|
+
|
|
140
|
+
---
|
|
141
|
+
|
|
142
|
+
## License
|
|
143
|
+
|
|
144
|
+
MIT
|
|
145
|
+
|
|
146
|
+
lol
|
package/bin/run.js
CHANGED
|
File without changes
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { Command } from "@oclif/core";
|
|
2
|
+
export default class Audit extends Command {
|
|
3
|
+
static readonly id = "audit";
|
|
4
|
+
static readonly description = "Audit dependencies and summarize vulnerabilities (npm only for now).";
|
|
5
|
+
static readonly flags: {
|
|
6
|
+
json: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
7
|
+
severity: import("@oclif/core/interfaces").OptionFlag<string, import("@oclif/core/interfaces").CustomOptions>;
|
|
8
|
+
fail: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
9
|
+
};
|
|
10
|
+
run(): Promise<void>;
|
|
11
|
+
}
|
|
12
|
+
//# sourceMappingURL=audit.d.ts.map
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { Command, Flags } from "@oclif/core";
|
|
2
|
+
import { runNpmAuditJson } from "../lib/audit/npmAudit.js";
|
|
3
|
+
import { printAuditSummary, summarizeNpmAudit, } from "../lib/audit/summarize.js";
|
|
4
|
+
import { logError, logInfo } from "../lib/ui/log.js";
|
|
5
|
+
function normalizeSeverity(s) {
|
|
6
|
+
if (s && ["low", "moderate", "high", "critical"].includes(s))
|
|
7
|
+
return s;
|
|
8
|
+
return "low";
|
|
9
|
+
}
|
|
10
|
+
export default class Audit extends Command {
|
|
11
|
+
static id = "audit";
|
|
12
|
+
static description = "Audit dependencies and summarize vulnerabilities (npm only for now).";
|
|
13
|
+
static flags = {
|
|
14
|
+
json: Flags.boolean({
|
|
15
|
+
description: "Print raw npm audit JSON to stdout",
|
|
16
|
+
}),
|
|
17
|
+
severity: Flags.string({
|
|
18
|
+
description: "Minimum severity to include",
|
|
19
|
+
options: ["low", "moderate", "high", "critical"],
|
|
20
|
+
default: "low",
|
|
21
|
+
}),
|
|
22
|
+
fail: Flags.boolean({
|
|
23
|
+
description: "Exit with code 1 if vulnerabilities at or above --severity exist",
|
|
24
|
+
}),
|
|
25
|
+
};
|
|
26
|
+
async run() {
|
|
27
|
+
const { flags } = await this.parse(Audit);
|
|
28
|
+
const severity = normalizeSeverity(flags.severity);
|
|
29
|
+
const { pm, rawJson, exitCode } = await runNpmAuditJson();
|
|
30
|
+
if (pm !== "npm") {
|
|
31
|
+
logInfo(`Audit is only implemented for npm right now. Detected ${pm}.`);
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
if (!rawJson) {
|
|
35
|
+
logError("npm audit did not return JSON output.");
|
|
36
|
+
if (typeof exitCode === "number")
|
|
37
|
+
this.exit(exitCode);
|
|
38
|
+
return;
|
|
39
|
+
}
|
|
40
|
+
let parsed;
|
|
41
|
+
try {
|
|
42
|
+
parsed = JSON.parse(rawJson);
|
|
43
|
+
}
|
|
44
|
+
catch {
|
|
45
|
+
logError("Failed to parse npm audit JSON output.");
|
|
46
|
+
if (flags.json)
|
|
47
|
+
console.log(rawJson);
|
|
48
|
+
if (typeof exitCode === "number")
|
|
49
|
+
this.exit(exitCode);
|
|
50
|
+
return;
|
|
51
|
+
}
|
|
52
|
+
const summaryOptions = { minSeverity: severity };
|
|
53
|
+
const summary = summarizeNpmAudit(parsed, summaryOptions);
|
|
54
|
+
printAuditSummary(summary);
|
|
55
|
+
if (flags.json)
|
|
56
|
+
console.log(rawJson);
|
|
57
|
+
if (flags.fail) {
|
|
58
|
+
const hasIssues = summary.counts.critical > 0 ||
|
|
59
|
+
(severity !== "critical" && summary.counts.high > 0) ||
|
|
60
|
+
(["low", "moderate"].includes(severity) && summary.counts.moderate > 0) ||
|
|
61
|
+
(severity === "low" && summary.counts.low > 0);
|
|
62
|
+
if (hasIssues)
|
|
63
|
+
this.exit(1);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { Command } from "@oclif/core";
|
|
2
|
+
export default class EnvGenerate extends Command {
|
|
3
|
+
static readonly id = "env:generate";
|
|
4
|
+
static readonly description = "Generate .env.example from detected environment variable usage in source.";
|
|
5
|
+
static readonly flags: {
|
|
6
|
+
out: import("@oclif/core/interfaces").OptionFlag<string, import("@oclif/core/interfaces").CustomOptions>;
|
|
7
|
+
create: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
8
|
+
force: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
9
|
+
check: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
10
|
+
ai: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
11
|
+
model: import("@oclif/core/interfaces").OptionFlag<string, import("@oclif/core/interfaces").CustomOptions>;
|
|
12
|
+
"api-key": import("@oclif/core/interfaces").OptionFlag<string | undefined, import("@oclif/core/interfaces").CustomOptions>;
|
|
13
|
+
};
|
|
14
|
+
run(): Promise<void>;
|
|
15
|
+
}
|
|
16
|
+
//# sourceMappingURL=generate.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"generate.d.ts","sourceRoot":"","sources":["../../../src/commands/env/generate.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAS,MAAM,aAAa,CAAC;AAG7C,MAAM,CAAC,OAAO,OAAO,WAAY,SAAQ,OAAO;IAC9C,MAAM,CAAC,QAAQ,CAAC,EAAE,kBAAkB;IACpC,MAAM,CAAC,QAAQ,CAAC,WAAW,+EACmD;IAE9E,MAAM,CAAC,QAAQ,CAAC,KAAK
|
|
1
|
+
{"version":3,"file":"generate.d.ts","sourceRoot":"","sources":["../../../src/commands/env/generate.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAS,MAAM,aAAa,CAAC;AAG7C,MAAM,CAAC,OAAO,OAAO,WAAY,SAAQ,OAAO;IAC9C,MAAM,CAAC,QAAQ,CAAC,EAAE,kBAAkB;IACpC,MAAM,CAAC,QAAQ,CAAC,WAAW,+EACmD;IAE9E,MAAM,CAAC,QAAQ,CAAC,KAAK;;;;;;;;MA0BnB;IAEI,GAAG,IAAI,OAAO,CAAC,IAAI,CAAC;CAY3B"}
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import { Command, Flags } from "@oclif/core";
|
|
2
|
+
import { runEnvGenerate } from "../../lib/env/write.js";
|
|
3
|
+
export default class EnvGenerate extends Command {
|
|
4
|
+
static id = "env:generate";
|
|
5
|
+
static description = "Generate .env.example from detected environment variable usage in source.";
|
|
6
|
+
static flags = {
|
|
7
|
+
out: Flags.string({
|
|
8
|
+
description: "Output path for example file",
|
|
9
|
+
default: ".env.example",
|
|
10
|
+
}),
|
|
11
|
+
create: Flags.boolean({
|
|
12
|
+
description: "Create .env with blank values if missing",
|
|
13
|
+
}),
|
|
14
|
+
force: Flags.boolean({
|
|
15
|
+
description: "Overwrite existing outputs (e.g. .env when used with --create)",
|
|
16
|
+
}),
|
|
17
|
+
check: Flags.boolean({
|
|
18
|
+
description: "Verify .env.example contains all required vars; exit 1 if not",
|
|
19
|
+
}),
|
|
20
|
+
ai: Flags.boolean({
|
|
21
|
+
description: "Use OpenAI to add descriptions and where-to-get for each variable",
|
|
22
|
+
default: false,
|
|
23
|
+
}),
|
|
24
|
+
model: Flags.string({
|
|
25
|
+
description: "OpenAI model for AI mode (e.g. gpt-4o-mini)",
|
|
26
|
+
default: "gpt-4o-mini",
|
|
27
|
+
}),
|
|
28
|
+
"api-key": Flags.string({
|
|
29
|
+
description: "OpenAI API key (default: OPENAI_API_KEY env)",
|
|
30
|
+
env: "OPENAI_API_KEY",
|
|
31
|
+
}),
|
|
32
|
+
};
|
|
33
|
+
async run() {
|
|
34
|
+
const { flags } = await this.parse(EnvGenerate);
|
|
35
|
+
await runEnvGenerate({
|
|
36
|
+
out: flags.out,
|
|
37
|
+
create: flags.create,
|
|
38
|
+
force: flags.force,
|
|
39
|
+
check: flags.check,
|
|
40
|
+
ai: flags.ai,
|
|
41
|
+
model: flags.model,
|
|
42
|
+
apiKey: flags["api-key"] ?? "",
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { Command } from "@oclif/core";
|
|
2
|
+
export default class Fix extends Command {
|
|
3
|
+
static readonly id = "fix";
|
|
4
|
+
static readonly description = "Preview and apply dependency fixes safely (dry-run by default).";
|
|
5
|
+
static readonly flags: {
|
|
6
|
+
apply: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
7
|
+
force: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
8
|
+
stash: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
9
|
+
commit: import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
10
|
+
"dry-run": import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
11
|
+
};
|
|
12
|
+
run(): Promise<void>;
|
|
13
|
+
}
|
|
14
|
+
//# sourceMappingURL=fix.d.ts.map
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { Command, Flags } from "@oclif/core";
|
|
2
|
+
import { runFix } from "../lib/audit/summarize.js";
|
|
3
|
+
export default class Fix extends Command {
|
|
4
|
+
static id = "fix";
|
|
5
|
+
static description = "Preview and apply dependency fixes safely (dry-run by default).";
|
|
6
|
+
static flags = {
|
|
7
|
+
apply: Flags.boolean({
|
|
8
|
+
description: "Actually apply changes (default: dry-run only)",
|
|
9
|
+
}),
|
|
10
|
+
force: Flags.boolean({
|
|
11
|
+
description: "Pass --force to npm audit fix",
|
|
12
|
+
}),
|
|
13
|
+
stash: Flags.boolean({
|
|
14
|
+
description: "Auto-stash if git working tree is dirty",
|
|
15
|
+
}),
|
|
16
|
+
commit: Flags.boolean({
|
|
17
|
+
description: "Auto-commit changes with chore(deps): audit fix",
|
|
18
|
+
}),
|
|
19
|
+
"dry-run": Flags.boolean({
|
|
20
|
+
description: "Preview only; do not write files (default: true)",
|
|
21
|
+
default: true,
|
|
22
|
+
allowNo: true,
|
|
23
|
+
}),
|
|
24
|
+
};
|
|
25
|
+
async run() {
|
|
26
|
+
const { flags } = await this.parse(Fix);
|
|
27
|
+
// v0.1: placeholder; full implementation will use dry-run default, backups, diff preview
|
|
28
|
+
if (flags["dry-run"] !== false && !flags.apply) {
|
|
29
|
+
this.log("Dry-run: no changes will be written. Use --apply to apply fixes.");
|
|
30
|
+
}
|
|
31
|
+
await runFix();
|
|
32
|
+
}
|
|
33
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { Command } from "@oclif/core";
|
|
2
|
+
export default class Onboard extends Command {
|
|
3
|
+
static readonly id = "onboard";
|
|
4
|
+
static readonly description = "Run contributor onboarding: install deps, generate env, run tests.";
|
|
5
|
+
static readonly flags: {
|
|
6
|
+
"skip-install": import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
7
|
+
"skip-env": import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
8
|
+
"skip-test": import("@oclif/core/interfaces").BooleanFlag<boolean>;
|
|
9
|
+
};
|
|
10
|
+
run(): Promise<void>;
|
|
11
|
+
}
|
|
12
|
+
//# sourceMappingURL=onboard.d.ts.map
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { Command, Flags } from "@oclif/core";
|
|
2
|
+
import { runOnboard } from "../lib/safety/backup.js";
|
|
3
|
+
export default class Onboard extends Command {
|
|
4
|
+
static id = "onboard";
|
|
5
|
+
static description = "Run contributor onboarding: install deps, generate env, run tests.";
|
|
6
|
+
static flags = {
|
|
7
|
+
"skip-install": Flags.boolean({
|
|
8
|
+
description: "Skip dependency install",
|
|
9
|
+
}),
|
|
10
|
+
"skip-env": Flags.boolean({
|
|
11
|
+
description: "Skip env generate",
|
|
12
|
+
}),
|
|
13
|
+
"skip-test": Flags.boolean({
|
|
14
|
+
description: "Skip test script",
|
|
15
|
+
}),
|
|
16
|
+
};
|
|
17
|
+
async run() {
|
|
18
|
+
const { flags } = await this.parse(Onboard);
|
|
19
|
+
await runOnboard({
|
|
20
|
+
skipInstall: flags["skip-install"],
|
|
21
|
+
skipEnv: flags["skip-env"],
|
|
22
|
+
skipTest: flags["skip-test"],
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
}
|
package/dist/index.d.ts
ADDED
package/dist/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AASA,eAAO,MAAM,OAAO,QAAyB,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
import { readFileSync } from "node:fs";
|
|
2
|
+
import { fileURLToPath } from "node:url";
|
|
3
|
+
import { dirname, join } from "node:path";
|
|
4
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
5
|
+
const pkg = JSON.parse(readFileSync(join(__dirname, "..", "package.json"), "utf8"));
|
|
6
|
+
export const version = pkg.version ?? "0.0.0";
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
export interface NpmAuditResult {
|
|
2
|
+
pm: "npm" | string;
|
|
3
|
+
rawJson: string | undefined;
|
|
4
|
+
exitCode: number | undefined;
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Run an npm audit in JSON mode.
|
|
8
|
+
*
|
|
9
|
+
* For v0.1.0 we only support npm; if another package manager
|
|
10
|
+
* is detected we return early without running anything.
|
|
11
|
+
*/
|
|
12
|
+
export declare function runNpmAuditJson(cwd?: string): Promise<NpmAuditResult>;
|
|
13
|
+
//# sourceMappingURL=npmAudit.d.ts.map
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { detectPackageManager } from "../pm/detect.js";
|
|
2
|
+
import { runPmCommand } from "../pm/run.js";
|
|
3
|
+
/**
|
|
4
|
+
* Run an npm audit in JSON mode.
|
|
5
|
+
*
|
|
6
|
+
* For v0.1.0 we only support npm; if another package manager
|
|
7
|
+
* is detected we return early without running anything.
|
|
8
|
+
*/
|
|
9
|
+
export async function runNpmAuditJson(cwd = process.cwd()) {
|
|
10
|
+
const pm = detectPackageManager(cwd);
|
|
11
|
+
if (pm !== "npm") {
|
|
12
|
+
return { pm, rawJson: undefined, exitCode: undefined };
|
|
13
|
+
}
|
|
14
|
+
const { stdout, exitCode } = await runPmCommand(pm, ["audit", "--json"], {
|
|
15
|
+
cwd,
|
|
16
|
+
});
|
|
17
|
+
return {
|
|
18
|
+
pm,
|
|
19
|
+
rawJson: stdout,
|
|
20
|
+
exitCode,
|
|
21
|
+
};
|
|
22
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
export type Severity = "low" | "moderate" | "high" | "critical";
|
|
2
|
+
export interface AuditSummaryOptions {
|
|
3
|
+
/**
|
|
4
|
+
* Minimum severity to include in the summary.
|
|
5
|
+
* Vulnerabilities below this level will be ignored.
|
|
6
|
+
*/
|
|
7
|
+
minSeverity?: Severity;
|
|
8
|
+
}
|
|
9
|
+
export interface SeverityCounts {
|
|
10
|
+
low: number;
|
|
11
|
+
moderate: number;
|
|
12
|
+
high: number;
|
|
13
|
+
critical: number;
|
|
14
|
+
}
|
|
15
|
+
export interface PackageImpact {
|
|
16
|
+
name: string;
|
|
17
|
+
count: number;
|
|
18
|
+
}
|
|
19
|
+
export interface AuditSummary {
|
|
20
|
+
counts: SeverityCounts;
|
|
21
|
+
impactedPackages: PackageImpact[];
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Normalise npm audit JSON into a simple summary that is reasonably
|
|
25
|
+
* robust across npm versions (v6/v7+).
|
|
26
|
+
*/
|
|
27
|
+
export declare function summarizeNpmAudit(data: unknown, options?: AuditSummaryOptions): AuditSummary;
|
|
28
|
+
export declare function printAuditSummary(summary: AuditSummary): void;
|
|
29
|
+
export declare function runFix(): Promise<void>;
|
|
30
|
+
//# sourceMappingURL=summarize.d.ts.map
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import { logInfo } from "../ui/log.js";
|
|
2
|
+
const severityOrder = ["low", "moderate", "high", "critical"];
|
|
3
|
+
function severityAtLeast(a, min) {
|
|
4
|
+
return severityOrder.indexOf(a) >= severityOrder.indexOf(min);
|
|
5
|
+
}
|
|
6
|
+
function emptyCounts() {
|
|
7
|
+
return { low: 0, moderate: 0, high: 0, critical: 0 };
|
|
8
|
+
}
|
|
9
|
+
/**
|
|
10
|
+
* Normalise npm audit JSON into a simple summary that is reasonably
|
|
11
|
+
* robust across npm versions (v6/v7+).
|
|
12
|
+
*/
|
|
13
|
+
export function summarizeNpmAudit(data, options = {}) {
|
|
14
|
+
const minSeverity = options.minSeverity ?? "low";
|
|
15
|
+
const counts = emptyCounts();
|
|
16
|
+
const pkgCounts = new Map();
|
|
17
|
+
const anyData = data;
|
|
18
|
+
// Prefer the modern `vulnerabilities` shape if present.
|
|
19
|
+
if (anyData && typeof anyData === "object" && anyData.vulnerabilities) {
|
|
20
|
+
const vulns = anyData.vulnerabilities;
|
|
21
|
+
for (const [pkgName, vuln] of Object.entries(vulns)) {
|
|
22
|
+
const severity = vuln.severity;
|
|
23
|
+
if (!severity || !severityAtLeast(severity, minSeverity))
|
|
24
|
+
continue;
|
|
25
|
+
counts[severity] += 1;
|
|
26
|
+
pkgCounts.set(pkgName, (pkgCounts.get(pkgName) ?? 0) + 1);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
else if (anyData && typeof anyData === "object" && anyData.advisories) {
|
|
30
|
+
// Legacy npm audit format with `advisories`.
|
|
31
|
+
const advisories = anyData.advisories;
|
|
32
|
+
for (const adv of Object.values(advisories)) {
|
|
33
|
+
const severity = adv.severity;
|
|
34
|
+
const moduleName = adv.module_name;
|
|
35
|
+
if (!severity || !moduleName || !severityAtLeast(severity, minSeverity))
|
|
36
|
+
continue;
|
|
37
|
+
counts[severity] += 1;
|
|
38
|
+
pkgCounts.set(moduleName, (pkgCounts.get(moduleName) ?? 0) + 1);
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
else if (anyData && anyData.metadata && anyData.metadata.vulnerabilities) {
|
|
42
|
+
// Fallback: just lift counts from metadata if available.
|
|
43
|
+
const metaCounts = anyData.metadata.vulnerabilities;
|
|
44
|
+
for (const sev of severityOrder) {
|
|
45
|
+
const value = metaCounts[sev];
|
|
46
|
+
if (typeof value === "number" && severityAtLeast(sev, minSeverity)) {
|
|
47
|
+
counts[sev] += value;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
const impactedPackages = Array.from(pkgCounts.entries())
|
|
52
|
+
.map(([name, count]) => ({ name, count }))
|
|
53
|
+
.sort((a, b) => b.count - a.count)
|
|
54
|
+
.slice(0, 5);
|
|
55
|
+
return { counts, impactedPackages };
|
|
56
|
+
}
|
|
57
|
+
export function printAuditSummary(summary) {
|
|
58
|
+
const { counts, impactedPackages } = summary;
|
|
59
|
+
logInfo("Vulnerability summary:");
|
|
60
|
+
logInfo(` low: ${counts.low}, moderate: ${counts.moderate}, high: ${counts.high}, critical: ${counts.critical}`);
|
|
61
|
+
if (impactedPackages.length > 0) {
|
|
62
|
+
logInfo("Top affected packages:");
|
|
63
|
+
for (const pkg of impactedPackages) {
|
|
64
|
+
logInfo(` ${pkg.name}: ${pkg.count} issue(s)`);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
else {
|
|
68
|
+
logInfo("No vulnerable packages found at or above the selected severity.");
|
|
69
|
+
}
|
|
70
|
+
logInfo("What to do next:");
|
|
71
|
+
logInfo(" - Run `npm audit fix` to apply safe automatic fixes.");
|
|
72
|
+
logInfo(" - For remaining issues, review advisories and consider upgrading major versions or replacing packages.");
|
|
73
|
+
logInfo(" - If you cannot upgrade immediately, consider using overrides/resolutions with care and track them for cleanup.");
|
|
74
|
+
}
|
|
75
|
+
// Temporary placeholder to keep the existing `fix` command wired up.
|
|
76
|
+
// This can later be replaced with a real remediation flow.
|
|
77
|
+
export async function runFix() {
|
|
78
|
+
logInfo("Running depfix-ai fix (not implemented yet).");
|
|
79
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
export type AIEnvDoc = {
|
|
2
|
+
key: string;
|
|
3
|
+
description: string;
|
|
4
|
+
where_to_get: string;
|
|
5
|
+
example_value: string;
|
|
6
|
+
is_secret: boolean;
|
|
7
|
+
};
|
|
8
|
+
export type AIGenerateOptions = {
|
|
9
|
+
apiKey: string;
|
|
10
|
+
model: string;
|
|
11
|
+
projectHint?: string;
|
|
12
|
+
contexts: Record<string, {
|
|
13
|
+
file: string;
|
|
14
|
+
line: number;
|
|
15
|
+
snippet: string;
|
|
16
|
+
}[]>;
|
|
17
|
+
keys: string[];
|
|
18
|
+
};
|
|
19
|
+
export declare function generateEnvDocsWithOpenAI(opts: AIGenerateOptions): Promise<AIEnvDoc[]>;
|
|
20
|
+
//# sourceMappingURL=ai.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ai.d.ts","sourceRoot":"","sources":["../../../src/lib/env/ai.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,QAAQ,GAAG;IACrB,GAAG,EAAE,MAAM,CAAC;IACZ,WAAW,EAAE,MAAM,CAAC;IACpB,YAAY,EAAE,MAAM,CAAC;IACrB,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,OAAO,CAAC;CACpB,CAAC;AAEF,MAAM,MAAM,iBAAiB,GAAG;IAC9B,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,QAAQ,EAAE,MAAM,CAAC,MAAM,EAAE;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,IAAI,EAAE,MAAM,CAAC;QAAC,OAAO,EAAE,MAAM,CAAA;KAAE,EAAE,CAAC,CAAC;IAC5E,IAAI,EAAE,MAAM,EAAE,CAAC;CAChB,CAAC;AAoGF,wBAAsB,yBAAyB,CAC7C,IAAI,EAAE,iBAAiB,GACtB,OAAO,CAAC,QAAQ,EAAE,CAAC,CAiDrB"}
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
const JSON_SCHEMA = {
|
|
2
|
+
name: "env_docs",
|
|
3
|
+
strict: true,
|
|
4
|
+
schema: {
|
|
5
|
+
type: "object",
|
|
6
|
+
additionalProperties: false,
|
|
7
|
+
properties: {
|
|
8
|
+
items: {
|
|
9
|
+
type: "array",
|
|
10
|
+
items: {
|
|
11
|
+
type: "object",
|
|
12
|
+
additionalProperties: false,
|
|
13
|
+
properties: {
|
|
14
|
+
key: { type: "string" },
|
|
15
|
+
description: { type: "string" },
|
|
16
|
+
where_to_get: { type: "string" },
|
|
17
|
+
example_value: { type: "string" },
|
|
18
|
+
is_secret: { type: "boolean" },
|
|
19
|
+
},
|
|
20
|
+
required: [
|
|
21
|
+
"key",
|
|
22
|
+
"description",
|
|
23
|
+
"where_to_get",
|
|
24
|
+
"example_value",
|
|
25
|
+
"is_secret",
|
|
26
|
+
],
|
|
27
|
+
},
|
|
28
|
+
},
|
|
29
|
+
},
|
|
30
|
+
required: ["items"],
|
|
31
|
+
},
|
|
32
|
+
};
|
|
33
|
+
function buildInput(opts) {
|
|
34
|
+
const lines = opts.keys.map((k) => {
|
|
35
|
+
const ctx = opts.contexts[k]?.[0];
|
|
36
|
+
const seenAt = ctx ? `${ctx.file}:${ctx.line}` : "unknown";
|
|
37
|
+
const snippet = ctx ? ctx.snippet : "";
|
|
38
|
+
return `- ${k}\n seen_at: ${seenAt}\n snippet: ${snippet}`;
|
|
39
|
+
});
|
|
40
|
+
const system = [
|
|
41
|
+
"You generate documentation for environment variables.",
|
|
42
|
+
"Return ONLY JSON that matches the provided JSON Schema.",
|
|
43
|
+
"Do not include markdown or extra text.",
|
|
44
|
+
"Never output real secrets. Use safe placeholders.",
|
|
45
|
+
"Keep descriptions short and practical.",
|
|
46
|
+
"where_to_get must be actionable (dashboard, secret manager, CI, local service, etc.).",
|
|
47
|
+
].join(" ");
|
|
48
|
+
const user = [
|
|
49
|
+
opts.projectHint ? `Project hint: ${opts.projectHint}` : "",
|
|
50
|
+
"Variables:",
|
|
51
|
+
...lines,
|
|
52
|
+
]
|
|
53
|
+
.filter(Boolean)
|
|
54
|
+
.join("\n");
|
|
55
|
+
return [
|
|
56
|
+
{ role: "system", content: system },
|
|
57
|
+
{ role: "user", content: user },
|
|
58
|
+
];
|
|
59
|
+
}
|
|
60
|
+
function extractTextFromResponses(data) {
|
|
61
|
+
if (typeof data?.output_text === "string") {
|
|
62
|
+
const t = data.output_text.trim();
|
|
63
|
+
if (t)
|
|
64
|
+
return t;
|
|
65
|
+
}
|
|
66
|
+
const out = data?.output;
|
|
67
|
+
if (Array.isArray(out)) {
|
|
68
|
+
for (const item of out) {
|
|
69
|
+
const content = item?.content;
|
|
70
|
+
if (!Array.isArray(content))
|
|
71
|
+
continue;
|
|
72
|
+
for (const c of content) {
|
|
73
|
+
const text = c?.text;
|
|
74
|
+
if (typeof text === "string" && text.trim())
|
|
75
|
+
return text;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
return "";
|
|
80
|
+
}
|
|
81
|
+
function tryParseJsonLoose(raw) {
|
|
82
|
+
try {
|
|
83
|
+
return JSON.parse(raw);
|
|
84
|
+
}
|
|
85
|
+
catch {
|
|
86
|
+
const m = raw.match(/\{[\s\S]*\}/);
|
|
87
|
+
if (!m)
|
|
88
|
+
return null;
|
|
89
|
+
try {
|
|
90
|
+
return JSON.parse(m[0]);
|
|
91
|
+
}
|
|
92
|
+
catch {
|
|
93
|
+
return null;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
export async function generateEnvDocsWithOpenAI(opts) {
|
|
98
|
+
const input = buildInput(opts);
|
|
99
|
+
const res = await fetch("https://api.openai.com/v1/responses", {
|
|
100
|
+
method: "POST",
|
|
101
|
+
headers: {
|
|
102
|
+
Authorization: `Bearer ${opts.apiKey}`,
|
|
103
|
+
"Content-Type": "application/json",
|
|
104
|
+
},
|
|
105
|
+
body: JSON.stringify({
|
|
106
|
+
model: opts.model,
|
|
107
|
+
input,
|
|
108
|
+
text: {
|
|
109
|
+
format: {
|
|
110
|
+
type: "json_schema",
|
|
111
|
+
...JSON_SCHEMA,
|
|
112
|
+
},
|
|
113
|
+
},
|
|
114
|
+
}),
|
|
115
|
+
});
|
|
116
|
+
if (!res.ok) {
|
|
117
|
+
const text = await res.text();
|
|
118
|
+
throw new Error(`OpenAI request failed (${res.status}): ${text}`);
|
|
119
|
+
}
|
|
120
|
+
const data = await res.json();
|
|
121
|
+
const raw = extractTextFromResponses(data).trim();
|
|
122
|
+
const parsed = tryParseJsonLoose(raw);
|
|
123
|
+
if (!parsed) {
|
|
124
|
+
throw new Error("AI output was not valid JSON. Try again, or use a different model.");
|
|
125
|
+
}
|
|
126
|
+
const items = Array.isArray(parsed?.items) ? parsed.items : [];
|
|
127
|
+
return items
|
|
128
|
+
.map((x) => {
|
|
129
|
+
const o = x;
|
|
130
|
+
return {
|
|
131
|
+
key: String(o?.key ?? ""),
|
|
132
|
+
description: String(o?.description ?? ""),
|
|
133
|
+
where_to_get: String(o?.where_to_get ?? ""),
|
|
134
|
+
example_value: String(o?.example_value ?? ""),
|
|
135
|
+
is_secret: Boolean(o?.is_secret),
|
|
136
|
+
};
|
|
137
|
+
})
|
|
138
|
+
.filter((x) => x.key.length > 0);
|
|
139
|
+
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"render.d.ts","sourceRoot":"","sources":["../../../src/lib/env/render.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,aAAa,EAAE,MAAM,WAAW,CAAC;
|
|
1
|
+
{"version":3,"file":"render.d.ts","sourceRoot":"","sources":["../../../src/lib/env/render.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,SAAS,CAAC;AACxC,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,WAAW,CAAC;AAgB/C,wBAAgB,SAAS,CACvB,MAAM,EAAE,aAAa,EACrB,MAAM,CAAC,EAAE,QAAQ,EAAE,GAClB,MAAM,CA6CR"}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
const GROUPS = [
|
|
2
|
+
{ prefix: "DB_", heading: "Database" },
|
|
3
|
+
{ prefix: "REDIS_", heading: "Redis" },
|
|
4
|
+
{ prefix: "AWS_", heading: "AWS" },
|
|
5
|
+
{ prefix: "SMTP_", heading: "SMTP" },
|
|
6
|
+
{ prefix: "NEXT_PUBLIC_", heading: "Next.js public env" },
|
|
7
|
+
{ prefix: "VITE_", heading: "Vite env" },
|
|
8
|
+
];
|
|
9
|
+
export function renderEnv(result, aiDocs) {
|
|
10
|
+
const remaining = new Set(result.keys);
|
|
11
|
+
const groups = [];
|
|
12
|
+
for (const { prefix, heading } of GROUPS) {
|
|
13
|
+
const keys = result.keys.filter((k) => k.startsWith(prefix));
|
|
14
|
+
if (keys.length === 0)
|
|
15
|
+
continue;
|
|
16
|
+
keys.forEach((k) => remaining.delete(k));
|
|
17
|
+
groups.push({ heading, keys });
|
|
18
|
+
}
|
|
19
|
+
if (remaining.size > 0) {
|
|
20
|
+
groups.push({
|
|
21
|
+
heading: "Other",
|
|
22
|
+
keys: Array.from(remaining).sort(),
|
|
23
|
+
});
|
|
24
|
+
}
|
|
25
|
+
const byKey = aiDocs?.length
|
|
26
|
+
? new Map(aiDocs.map((d) => [d.key, d]))
|
|
27
|
+
: null;
|
|
28
|
+
const lines = [];
|
|
29
|
+
for (const group of groups) {
|
|
30
|
+
lines.push(`# ${group.heading}`);
|
|
31
|
+
for (const key of group.keys) {
|
|
32
|
+
const d = byKey?.get(key);
|
|
33
|
+
if (d) {
|
|
34
|
+
const secretNote = d.is_secret
|
|
35
|
+
? "Secret value. Do not commit."
|
|
36
|
+
: "Non-secret value (verify before committing).";
|
|
37
|
+
lines.push(`# ${d.key}`);
|
|
38
|
+
lines.push(`# ${d.description}`);
|
|
39
|
+
lines.push(`# Where to get it: ${d.where_to_get}`);
|
|
40
|
+
lines.push(`# ${secretNote}`);
|
|
41
|
+
lines.push(`${d.key}=${d.example_value ?? ""}`);
|
|
42
|
+
}
|
|
43
|
+
else {
|
|
44
|
+
lines.push(`${key}=`);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
lines.push("");
|
|
48
|
+
}
|
|
49
|
+
return lines.join("\n").trimEnd() + (lines.length ? "\n" : "");
|
|
50
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
export interface EnvScanResult {
|
|
2
|
+
keys: string[];
|
|
3
|
+
}
|
|
4
|
+
export interface KeyContext {
|
|
5
|
+
file: string;
|
|
6
|
+
line: number;
|
|
7
|
+
snippet: string;
|
|
8
|
+
}
|
|
9
|
+
export interface EnvScanWithContextResult extends EnvScanResult {
|
|
10
|
+
contexts: Record<string, KeyContext[]>;
|
|
11
|
+
}
|
|
12
|
+
export declare function scanEnv(cwd?: string): Promise<EnvScanResult>;
|
|
13
|
+
export declare function scanEnvWithContext(cwd?: string, maxContextPerKey?: number): Promise<EnvScanWithContextResult>;
|
|
14
|
+
//# sourceMappingURL=scan.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"scan.d.ts","sourceRoot":"","sources":["../../../src/lib/env/scan.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"scan.d.ts","sourceRoot":"","sources":["../../../src/lib/env/scan.ts"],"names":[],"mappings":"AAIA,MAAM,WAAW,aAAa;IAC5B,IAAI,EAAE,MAAM,EAAE,CAAC;CAChB;AAED,MAAM,WAAW,UAAU;IACzB,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,wBAAyB,SAAQ,aAAa;IAC7D,QAAQ,EAAE,MAAM,CAAC,MAAM,EAAE,UAAU,EAAE,CAAC,CAAC;CACxC;AA6BD,wBAAsB,OAAO,CAAC,GAAG,SAAgB,GAAG,OAAO,CAAC,aAAa,CAAC,CAGzE;AAED,wBAAsB,kBAAkB,CACtC,GAAG,SAAgB,EACnB,gBAAgB,SAAI,GACnB,OAAO,CAAC,wBAAwB,CAAC,CAqDnC"}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import fg from "fast-glob";
|
|
4
|
+
const INCLUDE_GLOBS = [
|
|
5
|
+
"src/**/*.{js,jsx,ts,tsx,mjs,cjs}",
|
|
6
|
+
"app/**/*.{js,jsx,ts,tsx,mjs,cjs}",
|
|
7
|
+
"server/**/*.{js,jsx,ts,tsx,mjs,cjs}",
|
|
8
|
+
"pages/**/*.{js,jsx,ts,tsx,mjs,cjs}",
|
|
9
|
+
];
|
|
10
|
+
const EXCLUDE_GLOBS = ["**/node_modules/**", "**/dist/**", "**/.next/**", "**/build/**", "**/coverage/**"];
|
|
11
|
+
const ENV_KEY_STRICT = /^[A-Z][A-Z0-9_]*$/;
|
|
12
|
+
// Patterns: [RegExp, group index for key]
|
|
13
|
+
const CODE_PATTERNS = [
|
|
14
|
+
[/\bprocess(?:\?\.|\.)env(?:\?\.|\.)([A-Za-z_][A-Za-z0-9_]*)\b/g, 1],
|
|
15
|
+
[/\bprocess(?:\?\.|\.)env\[\s*["']([A-Za-z_][A-Za-z0-9_]*)["']\s*\]/g, 1],
|
|
16
|
+
[/\bimport\.meta\.env\.([A-Za-z_][A-Za-z0-9_]*)\b/g, 1],
|
|
17
|
+
[/\bDeno\.env\.get\(\s*["']([A-Za-z_][A-Za-z0-9_]*)["']\s*\)/g, 1],
|
|
18
|
+
[/\bBun\.env\.([A-Za-z_][A-Za-z0-9_]*)\b/g, 1],
|
|
19
|
+
];
|
|
20
|
+
const PROCESS_ENV_REGEX = /process\.env\.([A-Z0-9_]+)/g;
|
|
21
|
+
const IMPORT_META_ENV_REGEX = /import\.meta\.env\.([A-Z0-9_]+)/g;
|
|
22
|
+
function keyOk(k) {
|
|
23
|
+
return ENV_KEY_STRICT.test(k);
|
|
24
|
+
}
|
|
25
|
+
export async function scanEnv(cwd = process.cwd()) {
|
|
26
|
+
const withCtx = await scanEnvWithContext(cwd, 0);
|
|
27
|
+
return { keys: withCtx.keys };
|
|
28
|
+
}
|
|
29
|
+
export async function scanEnvWithContext(cwd = process.cwd(), maxContextPerKey = 2) {
|
|
30
|
+
const files = await fg(INCLUDE_GLOBS, {
|
|
31
|
+
cwd,
|
|
32
|
+
ignore: EXCLUDE_GLOBS,
|
|
33
|
+
absolute: true,
|
|
34
|
+
});
|
|
35
|
+
const keys = new Set();
|
|
36
|
+
const contexts = {};
|
|
37
|
+
function addCtx(key, relFile, line, snippet) {
|
|
38
|
+
if (!keyOk(key))
|
|
39
|
+
return;
|
|
40
|
+
keys.add(key);
|
|
41
|
+
if (maxContextPerKey <= 0)
|
|
42
|
+
return;
|
|
43
|
+
if (!contexts[key])
|
|
44
|
+
contexts[key] = [];
|
|
45
|
+
if (contexts[key].length >= maxContextPerKey)
|
|
46
|
+
return;
|
|
47
|
+
contexts[key].push({
|
|
48
|
+
file: relFile,
|
|
49
|
+
line,
|
|
50
|
+
snippet: snippet.trim().slice(0, 220),
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
for (const absPath of files) {
|
|
54
|
+
let content;
|
|
55
|
+
try {
|
|
56
|
+
content = await fs.readFile(absPath, "utf8");
|
|
57
|
+
}
|
|
58
|
+
catch {
|
|
59
|
+
continue;
|
|
60
|
+
}
|
|
61
|
+
const relFile = path.relative(cwd, absPath).replace(/\\/g, "/");
|
|
62
|
+
const lines = content.split(/\r?\n/);
|
|
63
|
+
for (let i = 0; i < lines.length; i++) {
|
|
64
|
+
const ln = lines[i];
|
|
65
|
+
if (ln.trim().startsWith("//") || ln.trim().startsWith("#"))
|
|
66
|
+
continue;
|
|
67
|
+
for (const [re, group] of CODE_PATTERNS) {
|
|
68
|
+
re.lastIndex = 0;
|
|
69
|
+
let match;
|
|
70
|
+
while ((match = re.exec(ln)) !== null) {
|
|
71
|
+
const k = match[group];
|
|
72
|
+
if (!keyOk(k))
|
|
73
|
+
continue;
|
|
74
|
+
addCtx(k, relFile, i + 1, ln);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
return {
|
|
80
|
+
keys: Array.from(keys).sort(),
|
|
81
|
+
contexts,
|
|
82
|
+
};
|
|
83
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
export interface EnvGenerateFlags {
|
|
2
|
+
out: string;
|
|
3
|
+
create: boolean;
|
|
4
|
+
force: boolean;
|
|
5
|
+
check: boolean;
|
|
6
|
+
ai: boolean;
|
|
7
|
+
model: string;
|
|
8
|
+
apiKey: string;
|
|
9
|
+
}
|
|
10
|
+
export declare function runEnvGenerate(opts?: Partial<EnvGenerateFlags>): Promise<void>;
|
|
11
|
+
//# sourceMappingURL=write.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"write.d.ts","sourceRoot":"","sources":["../../../src/lib/env/write.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"write.d.ts","sourceRoot":"","sources":["../../../src/lib/env/write.ts"],"names":[],"mappings":"AAOA,MAAM,WAAW,gBAAgB;IAC/B,GAAG,EAAE,MAAM,CAAC;IACZ,MAAM,EAAE,OAAO,CAAC;IAChB,KAAK,EAAE,OAAO,CAAC;IACf,KAAK,EAAE,OAAO,CAAC;IACf,EAAE,EAAE,OAAO,CAAC;IACZ,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;CAChB;AAmCD,wBAAsB,cAAc,CAAC,IAAI,GAAE,OAAO,CAAC,gBAAgB,CAAM,iBA6ExE"}
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import { generateEnvDocsWithOpenAI } from "./ai.js";
|
|
4
|
+
import { renderEnv } from "./render.js";
|
|
5
|
+
import { scanEnv, scanEnvWithContext } from "./scan.js";
|
|
6
|
+
import { logInfo, logError } from "../ui/log.js";
|
|
7
|
+
const defaultEnvFlags = {
|
|
8
|
+
out: ".env.example",
|
|
9
|
+
create: false,
|
|
10
|
+
force: false,
|
|
11
|
+
check: false,
|
|
12
|
+
ai: false,
|
|
13
|
+
model: "gpt-4o-mini",
|
|
14
|
+
apiKey: "",
|
|
15
|
+
};
|
|
16
|
+
async function fileExists(p) {
|
|
17
|
+
try {
|
|
18
|
+
await fs.stat(p);
|
|
19
|
+
return true;
|
|
20
|
+
}
|
|
21
|
+
catch {
|
|
22
|
+
return false;
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
function parseEnvKeysFromExample(content) {
|
|
26
|
+
const keys = new Set();
|
|
27
|
+
const lines = content.split(/\r?\n/);
|
|
28
|
+
for (const line of lines) {
|
|
29
|
+
const trimmed = line.trim();
|
|
30
|
+
if (!trimmed || trimmed.startsWith("#"))
|
|
31
|
+
continue;
|
|
32
|
+
const eqIndex = trimmed.indexOf("=");
|
|
33
|
+
if (eqIndex <= 0)
|
|
34
|
+
continue;
|
|
35
|
+
const key = trimmed.slice(0, eqIndex).trim();
|
|
36
|
+
if (key)
|
|
37
|
+
keys.add(key);
|
|
38
|
+
}
|
|
39
|
+
return keys;
|
|
40
|
+
}
|
|
41
|
+
export async function runEnvGenerate(opts = {}) {
|
|
42
|
+
const flags = { ...defaultEnvFlags, ...opts };
|
|
43
|
+
const cwd = process.cwd();
|
|
44
|
+
const outPath = path.resolve(cwd, flags.out);
|
|
45
|
+
const envPath = path.resolve(cwd, ".env");
|
|
46
|
+
const scanResult = flags.ai
|
|
47
|
+
? await scanEnvWithContext(cwd, 2)
|
|
48
|
+
: await scanEnv(cwd);
|
|
49
|
+
if (flags.check) {
|
|
50
|
+
if (!(await fileExists(outPath))) {
|
|
51
|
+
logError(`${flags.out} does not exist. Run 'depfix-ai env generate' to create it.`);
|
|
52
|
+
process.exitCode = 1;
|
|
53
|
+
return;
|
|
54
|
+
}
|
|
55
|
+
const existing = await fs.readFile(outPath, "utf8");
|
|
56
|
+
const existingKeys = parseEnvKeysFromExample(existing);
|
|
57
|
+
const missing = scanResult.keys.filter((k) => !existingKeys.has(k));
|
|
58
|
+
if (missing.length > 0) {
|
|
59
|
+
logError(`${flags.out} is missing the following environment variables: ${missing.join(", ")}`);
|
|
60
|
+
process.exitCode = 1;
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
logInfo(`${flags.out} contains all required environment variables.`);
|
|
64
|
+
return;
|
|
65
|
+
}
|
|
66
|
+
let aiDocs;
|
|
67
|
+
if (flags.ai && scanResult.keys.length > 0) {
|
|
68
|
+
const apiKey = flags.apiKey || process.env.OPENAI_API_KEY?.trim();
|
|
69
|
+
if (!apiKey) {
|
|
70
|
+
logError("AI mode requires OPENAI_API_KEY or --api-key.");
|
|
71
|
+
process.exitCode = 1;
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
try {
|
|
75
|
+
logInfo("Generating descriptions with AI…");
|
|
76
|
+
const contexts = "contexts" in scanResult && scanResult.contexts
|
|
77
|
+
? scanResult.contexts
|
|
78
|
+
: Object.create(null);
|
|
79
|
+
aiDocs = await generateEnvDocsWithOpenAI({
|
|
80
|
+
apiKey,
|
|
81
|
+
model: flags.model,
|
|
82
|
+
projectHint: "Practical guidance for developers setting env vars.",
|
|
83
|
+
contexts,
|
|
84
|
+
keys: scanResult.keys,
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
catch (e) {
|
|
88
|
+
logError(e?.message ?? String(e));
|
|
89
|
+
process.exitCode = 1;
|
|
90
|
+
return;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
// Always (re)write the template example file.
|
|
94
|
+
const exampleContent = renderEnv(scanResult, aiDocs);
|
|
95
|
+
await fs.writeFile(outPath, exampleContent, "utf8");
|
|
96
|
+
logInfo(`Wrote environment template to ${outPath}`);
|
|
97
|
+
if (flags.create) {
|
|
98
|
+
const envExists = await fileExists(envPath);
|
|
99
|
+
if (envExists && !flags.force) {
|
|
100
|
+
logInfo(`.env already exists; not overwriting. Use --force to overwrite.`);
|
|
101
|
+
}
|
|
102
|
+
else {
|
|
103
|
+
const envContent = scanResult.keys.map((k) => `${k}=`).join("\n") + "\n";
|
|
104
|
+
await fs.writeFile(envPath, envContent, "utf8");
|
|
105
|
+
logInfo(`Wrote ${envExists ? "updated" : "new"} .env file to ${envPath}`);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { execa } from "execa";
|
|
2
|
+
/**
|
|
3
|
+
* Return git diff output for the given paths (e.g. package.json, package-lock.json).
|
|
4
|
+
* Used for fix preview before applying changes.
|
|
5
|
+
*/
|
|
6
|
+
export async function gitDiff(paths = [], cwd = process.cwd()) {
|
|
7
|
+
const args = ["diff", "--no-color"];
|
|
8
|
+
if (paths.length > 0)
|
|
9
|
+
args.push("--", ...paths);
|
|
10
|
+
const { stdout } = await execa("git", args, { cwd });
|
|
11
|
+
return stdout;
|
|
12
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import fs from "node:fs";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
export function detectPackageManager(cwd = process.cwd()) {
|
|
4
|
+
// Priority:
|
|
5
|
+
// 1. pnpm (pnpm-lock.yaml)
|
|
6
|
+
// 2. yarn (yarn.lock)
|
|
7
|
+
// 3. npm (package-lock.json)
|
|
8
|
+
// 4. bun (bun.lockb)
|
|
9
|
+
if (fs.existsSync(path.join(cwd, "pnpm-lock.yaml")))
|
|
10
|
+
return "pnpm";
|
|
11
|
+
if (fs.existsSync(path.join(cwd, "yarn.lock")))
|
|
12
|
+
return "yarn";
|
|
13
|
+
if (fs.existsSync(path.join(cwd, "package-lock.json")))
|
|
14
|
+
return "npm";
|
|
15
|
+
if (fs.existsSync(path.join(cwd, "bun.lockb")))
|
|
16
|
+
return "bun";
|
|
17
|
+
// Fallback to npm if no known lockfile is present.
|
|
18
|
+
return "npm";
|
|
19
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { execa } from "execa";
|
|
2
|
+
export async function runPmCommand(pm, args, options = {}) {
|
|
3
|
+
const { stdout, stderr, exitCode } = await execa(pm, args, {
|
|
4
|
+
cwd: options.cwd,
|
|
5
|
+
stdio: options.stdio ?? "pipe",
|
|
6
|
+
// Never throw on non‑zero exit codes; always resolve with the result.
|
|
7
|
+
reject: false,
|
|
8
|
+
});
|
|
9
|
+
return { stdout, stderr, exitCode };
|
|
10
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
export type PackageManager = "npm" | "yarn" | "pnpm" | "bun";
|
|
2
|
+
export interface RunOptions {
|
|
3
|
+
cwd?: string;
|
|
4
|
+
stdio?: "inherit" | "pipe";
|
|
5
|
+
}
|
|
6
|
+
export interface RunResult {
|
|
7
|
+
stdout?: string;
|
|
8
|
+
stderr?: string;
|
|
9
|
+
exitCode?: number;
|
|
10
|
+
}
|
|
11
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { gitStashSave } from "../git/stash.js";
|
|
2
|
+
import { logInfo } from "../ui/log.js";
|
|
3
|
+
export async function runOnboard(opts = {}) {
|
|
4
|
+
const skipInstall = opts.skipInstall ?? false;
|
|
5
|
+
const skipEnv = opts.skipEnv ?? false;
|
|
6
|
+
const skipTest = opts.skipTest ?? false;
|
|
7
|
+
logInfo("Onboarding project with depfix-ai (minimal stub).");
|
|
8
|
+
if (skipInstall)
|
|
9
|
+
logInfo("Will skip install (--skip-install).");
|
|
10
|
+
if (skipEnv)
|
|
11
|
+
logInfo("Will skip env generate (--skip-env).");
|
|
12
|
+
if (skipTest)
|
|
13
|
+
logInfo("Will skip tests (--skip-test).");
|
|
14
|
+
await gitStashSave("depfix-ai onboarding backup");
|
|
15
|
+
logInfo("Created git stash as a safety backup.");
|
|
16
|
+
}
|
package/package.json
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "depfix-ai",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.2",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"engines": {
|
|
7
7
|
"node": ">=18"
|
|
8
8
|
},
|
|
9
|
+
"packageManager": "pnpm@9.15.0",
|
|
9
10
|
"scripts": {
|
|
10
11
|
"build": "tsc -b",
|
|
11
12
|
"test": "vitest run",
|
|
@@ -20,7 +21,11 @@
|
|
|
20
21
|
"dist/**"
|
|
21
22
|
],
|
|
22
23
|
"bin": {
|
|
23
|
-
"depfix-ai": "
|
|
24
|
+
"depfix-ai": "bin/run.js"
|
|
25
|
+
},
|
|
26
|
+
"repository": {
|
|
27
|
+
"type": "git",
|
|
28
|
+
"url": "git+https://github.com/hesxo/depfix-ai.git"
|
|
24
29
|
},
|
|
25
30
|
"oclif": {
|
|
26
31
|
"commands": "./dist/commands",
|
|
@@ -29,6 +34,7 @@
|
|
|
29
34
|
"dependencies": {
|
|
30
35
|
"@oclif/core": "^4.0.0",
|
|
31
36
|
"chalk": "^5.6.2",
|
|
37
|
+
"depfix-ai": "^0.1.2",
|
|
32
38
|
"execa": "^9.6.1",
|
|
33
39
|
"fast-glob": "^3.3.3",
|
|
34
40
|
"ora": "^9.3.0",
|