@ranger1/dx 0.1.85 → 0.1.86
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +58 -0
- package/codex/skills/e2e-audit-fixer/SKILL.md +76 -0
- package/codex/skills/e2e-audit-fixer/agents/openai.yaml +4 -0
- package/codex/skills/e2e-audit-fixer/scripts/e2e_e2e_audit.py +523 -0
- package/codex/skills/env-accessor-audit-fixer/SKILL.md +149 -0
- package/codex/skills/env-accessor-audit-fixer/agents/openai.yaml +7 -0
- package/codex/skills/env-accessor-audit-fixer/references/bootstrap-env-foundation.md +156 -0
- package/codex/skills/env-accessor-audit-fixer/scripts/env_accessor_audit.py +250 -0
- package/codex/skills/error-handling-audit-fixer/SKILL.md +150 -0
- package/codex/skills/error-handling-audit-fixer/agents/openai.yaml +7 -0
- package/codex/skills/error-handling-audit-fixer/references/error-handling-standard.md +152 -0
- package/codex/skills/error-handling-audit-fixer/references/foundation-bootstrap.md +85 -0
- package/codex/skills/error-handling-audit-fixer/scripts/error_handling_audit.py +537 -0
- package/codex/skills/pagination-dto-audit-fixer/SKILL.md +69 -0
- package/codex/skills/pagination-dto-audit-fixer/agents/openai.yaml +7 -0
- package/codex/skills/pagination-dto-audit-fixer/references/pagination-standard.md +67 -0
- package/codex/skills/pagination-dto-audit-fixer/scripts/pagination_dto_audit.py +244 -0
- package/lib/codex-initial.js +155 -3
- package/lib/exec.js +21 -2
- package/lib/run-with-version-env.js +2 -1
- package/package.json +1 -1
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import argparse
|
|
3
|
+
import json
|
|
4
|
+
import re
|
|
5
|
+
from dataclasses import asdict, dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Iterable
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
PAGINATION_KEYS = (
|
|
11
|
+
"items",
|
|
12
|
+
"data",
|
|
13
|
+
"total",
|
|
14
|
+
"page",
|
|
15
|
+
"limit",
|
|
16
|
+
"pageSize",
|
|
17
|
+
"currentPage",
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
RESPONSE_HINTS = (
|
|
21
|
+
"Pagination",
|
|
22
|
+
"Paginated",
|
|
23
|
+
"ListResponse",
|
|
24
|
+
"PageResult",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class Finding:
|
|
30
|
+
kind: str
|
|
31
|
+
path: str
|
|
32
|
+
line: int
|
|
33
|
+
symbol: str
|
|
34
|
+
message: str
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class ClassBlock:
|
|
39
|
+
name: str
|
|
40
|
+
extends_name: str
|
|
41
|
+
body: str
|
|
42
|
+
start: int
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def parse_args() -> argparse.Namespace:
|
|
46
|
+
parser = argparse.ArgumentParser(description="审计 backend 分页 DTO 规范")
|
|
47
|
+
parser.add_argument("--workspace", required=True, help="仓库根目录")
|
|
48
|
+
parser.add_argument(
|
|
49
|
+
"--include-glob",
|
|
50
|
+
action="append",
|
|
51
|
+
default=["apps/backend/src/**/*.ts"],
|
|
52
|
+
help="附加扫描 glob,可重复传入",
|
|
53
|
+
)
|
|
54
|
+
parser.add_argument("--output-json", help="输出 JSON 文件路径")
|
|
55
|
+
return parser.parse_args()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def iter_files(workspace: Path, globs: Iterable[str]) -> list[Path]:
|
|
59
|
+
seen: set[Path] = set()
|
|
60
|
+
files: list[Path] = []
|
|
61
|
+
for pattern in globs:
|
|
62
|
+
for path in workspace.glob(pattern):
|
|
63
|
+
if not path.is_file():
|
|
64
|
+
continue
|
|
65
|
+
if path in seen:
|
|
66
|
+
continue
|
|
67
|
+
seen.add(path)
|
|
68
|
+
files.append(path)
|
|
69
|
+
return sorted(files)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def has_pagination_signal(block: str) -> bool:
|
|
73
|
+
hit_count = sum(1 for key in PAGINATION_KEYS if re.search(rf"\b{re.escape(key)}\b", block))
|
|
74
|
+
has_total = re.search(r"\btotal\b", block) is not None
|
|
75
|
+
has_items_or_data = re.search(r"\b(items|data)\b", block) is not None
|
|
76
|
+
has_page_signal = re.search(r"\b(page|limit|pageSize|currentPage)\b", block) is not None
|
|
77
|
+
return hit_count >= 3 and has_total and has_items_or_data and has_page_signal
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def line_no(content: str, index: int) -> int:
|
|
81
|
+
return content.count("\n", 0, index) + 1
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def iter_export_classes(content: str) -> list[ClassBlock]:
|
|
85
|
+
header_pattern = re.compile(
|
|
86
|
+
r"export\s+class\s+(?P<name>\w+)\s*(?:extends\s+(?P<extends>[^{\n]+))?\s*{",
|
|
87
|
+
re.MULTILINE,
|
|
88
|
+
)
|
|
89
|
+
classes: list[ClassBlock] = []
|
|
90
|
+
for match in header_pattern.finditer(content):
|
|
91
|
+
brace_start = match.end() - 1
|
|
92
|
+
depth = 0
|
|
93
|
+
index = brace_start
|
|
94
|
+
while index < len(content):
|
|
95
|
+
char = content[index]
|
|
96
|
+
if char == "{":
|
|
97
|
+
depth += 1
|
|
98
|
+
elif char == "}":
|
|
99
|
+
depth -= 1
|
|
100
|
+
if depth == 0:
|
|
101
|
+
body = content[brace_start + 1 : index]
|
|
102
|
+
classes.append(
|
|
103
|
+
ClassBlock(
|
|
104
|
+
name=match.group("name"),
|
|
105
|
+
extends_name=(match.group("extends") or "").strip(),
|
|
106
|
+
body=body,
|
|
107
|
+
start=match.start(),
|
|
108
|
+
)
|
|
109
|
+
)
|
|
110
|
+
break
|
|
111
|
+
index += 1
|
|
112
|
+
return classes
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def scan_request_dtos(path: Path, content: str) -> list[Finding]:
|
|
116
|
+
findings: list[Finding] = []
|
|
117
|
+
path_text = str(path)
|
|
118
|
+
is_dto_file = "/dto/" in path_text or path.name.endswith(".dto.ts")
|
|
119
|
+
for class_block in iter_export_classes(content):
|
|
120
|
+
name = class_block.name
|
|
121
|
+
extends_name = class_block.extends_name
|
|
122
|
+
body = class_block.body
|
|
123
|
+
if not is_dto_file:
|
|
124
|
+
continue
|
|
125
|
+
if "/responses/" in path_text or ".response." in path.name or "Response" in name:
|
|
126
|
+
continue
|
|
127
|
+
if not name.endswith("Dto"):
|
|
128
|
+
continue
|
|
129
|
+
if name == "BasePaginationRequestDto":
|
|
130
|
+
continue
|
|
131
|
+
if extends_name == "BasePaginationRequestDto":
|
|
132
|
+
continue
|
|
133
|
+
if not re.search(r"\b(page|limit|pageSize|currentPage)\b", body):
|
|
134
|
+
continue
|
|
135
|
+
findings.append(
|
|
136
|
+
Finding(
|
|
137
|
+
kind="request-dto-not-standard",
|
|
138
|
+
path=str(path),
|
|
139
|
+
line=line_no(content, class_block.start),
|
|
140
|
+
symbol=name,
|
|
141
|
+
message="请求 DTO 包含分页字段,但未继承 BasePaginationRequestDto",
|
|
142
|
+
)
|
|
143
|
+
)
|
|
144
|
+
return findings
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def scan_response_dtos(path: Path, content: str) -> list[Finding]:
|
|
148
|
+
findings: list[Finding] = []
|
|
149
|
+
path_text = str(path)
|
|
150
|
+
is_dto_file = "/dto/" in path_text or path.name.endswith(".dto.ts")
|
|
151
|
+
for class_block in iter_export_classes(content):
|
|
152
|
+
name = class_block.name
|
|
153
|
+
extends_name = class_block.extends_name
|
|
154
|
+
body = class_block.body
|
|
155
|
+
if not is_dto_file:
|
|
156
|
+
continue
|
|
157
|
+
if (
|
|
158
|
+
"/requests/" in path_text
|
|
159
|
+
or ".request." in path.name
|
|
160
|
+
or "Request" in name
|
|
161
|
+
or name == "BasePaginationRequestDto"
|
|
162
|
+
):
|
|
163
|
+
continue
|
|
164
|
+
if "BasePaginationResponseDto" in extends_name:
|
|
165
|
+
continue
|
|
166
|
+
if not (has_pagination_signal(body) or any(hint in name for hint in RESPONSE_HINTS)):
|
|
167
|
+
continue
|
|
168
|
+
findings.append(
|
|
169
|
+
Finding(
|
|
170
|
+
kind="response-dto-not-standard",
|
|
171
|
+
path=str(path),
|
|
172
|
+
line=line_no(content, class_block.start),
|
|
173
|
+
symbol=name,
|
|
174
|
+
message="响应 DTO 命中分页信号,但未继承 BasePaginationResponseDto",
|
|
175
|
+
)
|
|
176
|
+
)
|
|
177
|
+
return findings
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def scan_manual_returns(path: Path, content: str) -> list[Finding]:
|
|
181
|
+
findings: list[Finding] = []
|
|
182
|
+
return_pattern = re.compile(r"return\s*{(?P<body>[\s\S]*?)}", re.MULTILINE)
|
|
183
|
+
for match in return_pattern.finditer(content):
|
|
184
|
+
body = match.group("body")
|
|
185
|
+
if not has_pagination_signal(body):
|
|
186
|
+
continue
|
|
187
|
+
findings.append(
|
|
188
|
+
Finding(
|
|
189
|
+
kind="manual-pagination-return",
|
|
190
|
+
path=str(path),
|
|
191
|
+
line=line_no(content, match.start()),
|
|
192
|
+
symbol="return",
|
|
193
|
+
message="检测到手工拼装分页返回结构,建议改为统一分页 DTO",
|
|
194
|
+
)
|
|
195
|
+
)
|
|
196
|
+
return findings
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def scan_file(path: Path) -> list[Finding]:
|
|
200
|
+
try:
|
|
201
|
+
content = path.read_text(encoding="utf-8")
|
|
202
|
+
except UnicodeDecodeError:
|
|
203
|
+
return []
|
|
204
|
+
findings: list[Finding] = []
|
|
205
|
+
findings.extend(scan_request_dtos(path, content))
|
|
206
|
+
findings.extend(scan_response_dtos(path, content))
|
|
207
|
+
findings.extend(scan_manual_returns(path, content))
|
|
208
|
+
return findings
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def print_report(findings: list[Finding]) -> None:
|
|
212
|
+
if not findings:
|
|
213
|
+
print("未发现疑似非标准分页 DTO 或手工分页返回结构。")
|
|
214
|
+
return
|
|
215
|
+
grouped: dict[str, list[Finding]] = {}
|
|
216
|
+
for finding in findings:
|
|
217
|
+
grouped.setdefault(finding.kind, []).append(finding)
|
|
218
|
+
print(f"共发现 {len(findings)} 个问题:")
|
|
219
|
+
for kind in sorted(grouped):
|
|
220
|
+
print(f"\n[{kind}] {len(grouped[kind])} 个")
|
|
221
|
+
for finding in grouped[kind]:
|
|
222
|
+
print(f"- {finding.path}:{finding.line} {finding.symbol} -> {finding.message}")
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def main() -> int:
|
|
226
|
+
args = parse_args()
|
|
227
|
+
workspace = Path(args.workspace).resolve()
|
|
228
|
+
files = iter_files(workspace, args.include_glob)
|
|
229
|
+
findings: list[Finding] = []
|
|
230
|
+
for path in files:
|
|
231
|
+
findings.extend(scan_file(path))
|
|
232
|
+
print_report(findings)
|
|
233
|
+
if args.output_json:
|
|
234
|
+
output = Path(args.output_json)
|
|
235
|
+
output.write_text(
|
|
236
|
+
json.dumps([asdict(finding) for finding in findings], ensure_ascii=False, indent=2) + "\n",
|
|
237
|
+
encoding="utf-8",
|
|
238
|
+
)
|
|
239
|
+
print(f"\nJSON 已输出到 {output}")
|
|
240
|
+
return 0
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
if __name__ == "__main__":
|
|
244
|
+
raise SystemExit(main())
|
package/lib/codex-initial.js
CHANGED
|
@@ -4,6 +4,49 @@ import os from 'node:os'
|
|
|
4
4
|
|
|
5
5
|
import { logger } from './logger.js'
|
|
6
6
|
|
|
7
|
+
const REQUIRED_CODEX_CONFIG = [
|
|
8
|
+
{
|
|
9
|
+
section: 'features',
|
|
10
|
+
values: {
|
|
11
|
+
multi_agent: 'true',
|
|
12
|
+
},
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
section: 'agents',
|
|
16
|
+
values: {
|
|
17
|
+
max_threads: '15',
|
|
18
|
+
},
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
section: 'agents.fixer',
|
|
22
|
+
values: {
|
|
23
|
+
description: '"bugfix 代理"',
|
|
24
|
+
config_file: '"agents/fixer.toml"',
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
section: 'agents.orchestrator',
|
|
29
|
+
values: {
|
|
30
|
+
description: '"pr 修复流程编排代理"',
|
|
31
|
+
config_file: '"agents/orchestrator.toml"',
|
|
32
|
+
},
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
section: 'agents.reviewer',
|
|
36
|
+
values: {
|
|
37
|
+
description: '"代码评审代理"',
|
|
38
|
+
config_file: '"agents/reviewer.toml"',
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
section: 'agents.spark',
|
|
43
|
+
values: {
|
|
44
|
+
description: '"通用执行代理"',
|
|
45
|
+
config_file: '"agents/spark.toml"',
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
]
|
|
49
|
+
|
|
7
50
|
async function collectAllFiles(dir) {
|
|
8
51
|
const out = []
|
|
9
52
|
|
|
@@ -39,6 +82,110 @@ async function ensureDir(path) {
|
|
|
39
82
|
await fs.mkdir(path, { recursive: true })
|
|
40
83
|
}
|
|
41
84
|
|
|
85
|
+
function escapeRegExp(input) {
|
|
86
|
+
return String(input).replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
function ensureTrailingNewline(text) {
|
|
90
|
+
if (!text) return ''
|
|
91
|
+
return text.endsWith('\n') ? text : `${text}\n`
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
function upsertTomlSection(content, { section, values }) {
|
|
95
|
+
const header = `[${section}]`
|
|
96
|
+
const sectionPattern = new RegExp(`^\\[${escapeRegExp(section)}\\]\\s*$`, 'm')
|
|
97
|
+
const sectionHeaderMatch = content.match(sectionPattern)
|
|
98
|
+
let nextContent = content
|
|
99
|
+
let changedKeys = 0
|
|
100
|
+
let createdSection = false
|
|
101
|
+
|
|
102
|
+
if (!sectionHeaderMatch) {
|
|
103
|
+
const blockLines = [header, ...Object.entries(values).map(([key, value]) => `${key} = ${value}`), '']
|
|
104
|
+
nextContent = ensureTrailingNewline(content)
|
|
105
|
+
if (nextContent.length > 0 && !nextContent.endsWith('\n\n')) {
|
|
106
|
+
nextContent += '\n'
|
|
107
|
+
}
|
|
108
|
+
nextContent += `${blockLines.join('\n')}\n`
|
|
109
|
+
return {
|
|
110
|
+
content: nextContent,
|
|
111
|
+
changedKeys: Object.keys(values).length,
|
|
112
|
+
createdSection: true,
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
const sectionStart = sectionHeaderMatch.index
|
|
117
|
+
const sectionBodyStart = sectionStart + sectionHeaderMatch[0].length
|
|
118
|
+
const remaining = content.slice(sectionBodyStart)
|
|
119
|
+
const nextHeaderMatch = remaining.match(/\n(?=\[[^\]]+\]\s*$)/m)
|
|
120
|
+
const sectionEnd =
|
|
121
|
+
nextHeaderMatch && typeof nextHeaderMatch.index === 'number'
|
|
122
|
+
? sectionBodyStart + nextHeaderMatch.index + 1
|
|
123
|
+
: content.length
|
|
124
|
+
|
|
125
|
+
const beforeSection = content.slice(0, sectionStart)
|
|
126
|
+
const originalSectionText = content.slice(sectionStart, sectionEnd)
|
|
127
|
+
const trailing = content.slice(sectionEnd)
|
|
128
|
+
const sectionLines = originalSectionText.split('\n')
|
|
129
|
+
|
|
130
|
+
for (const [key, value] of Object.entries(values)) {
|
|
131
|
+
const desiredLine = `${key} = ${value}`
|
|
132
|
+
const keyPattern = new RegExp(`^${escapeRegExp(key)}\\s*=`, 'm')
|
|
133
|
+
const lineIndex = sectionLines.findIndex(line => keyPattern.test(line.trim()))
|
|
134
|
+
|
|
135
|
+
if (lineIndex === -1) {
|
|
136
|
+
let insertIndex = sectionLines.length
|
|
137
|
+
while (insertIndex > 1 && sectionLines[insertIndex - 1] === '') {
|
|
138
|
+
insertIndex--
|
|
139
|
+
}
|
|
140
|
+
sectionLines.splice(insertIndex, 0, desiredLine)
|
|
141
|
+
changedKeys++
|
|
142
|
+
continue
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
if (sectionLines[lineIndex].trim() !== desiredLine) {
|
|
146
|
+
sectionLines[lineIndex] = desiredLine
|
|
147
|
+
changedKeys++
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
const updatedSectionText = ensureTrailingNewline(sectionLines.join('\n'))
|
|
152
|
+
nextContent = `${beforeSection}${updatedSectionText}${trailing}`
|
|
153
|
+
|
|
154
|
+
return { content: nextContent, changedKeys, createdSection }
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
async function ensureCodexConfig({ codexDir }) {
|
|
158
|
+
const configPath = join(codexDir, 'config.toml')
|
|
159
|
+
let content = ''
|
|
160
|
+
|
|
161
|
+
try {
|
|
162
|
+
content = await fs.readFile(configPath, 'utf8')
|
|
163
|
+
} catch (error) {
|
|
164
|
+
if (error?.code !== 'ENOENT') throw error
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
let changedKeys = 0
|
|
168
|
+
let createdSections = 0
|
|
169
|
+
let nextContent = content
|
|
170
|
+
|
|
171
|
+
for (const sectionConfig of REQUIRED_CODEX_CONFIG) {
|
|
172
|
+
const result = upsertTomlSection(nextContent, sectionConfig)
|
|
173
|
+
nextContent = result.content
|
|
174
|
+
changedKeys += result.changedKeys
|
|
175
|
+
if (result.createdSection) createdSections++
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
if (nextContent !== content || content === '') {
|
|
179
|
+
await fs.writeFile(configPath, ensureTrailingNewline(nextContent), 'utf8')
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
return {
|
|
183
|
+
configPath,
|
|
184
|
+
changedKeys,
|
|
185
|
+
createdSections,
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
42
189
|
async function assertDirExists(path, label) {
|
|
43
190
|
try {
|
|
44
191
|
const st = await fs.stat(path)
|
|
@@ -116,10 +263,11 @@ export async function runCodexInitial(options = {}) {
|
|
|
116
263
|
if (!packageRoot) throw new Error('runCodexInitial: 缺少 packageRoot')
|
|
117
264
|
|
|
118
265
|
const homeDir = options.homeDir || os.homedir()
|
|
266
|
+
const codexDir = join(homeDir, '.codex')
|
|
119
267
|
const srcSkills = join(packageRoot, 'codex', 'skills')
|
|
120
|
-
const dstSkills = join(
|
|
268
|
+
const dstSkills = join(codexDir, 'skills')
|
|
121
269
|
const srcCodexAgents = join(packageRoot, 'codex', 'agents')
|
|
122
|
-
const dstCodexAgents = join(
|
|
270
|
+
const dstCodexAgents = join(codexDir, 'agents')
|
|
123
271
|
|
|
124
272
|
await assertDirExists(srcSkills, '模板目录 codex/skills')
|
|
125
273
|
await assertDirExists(srcCodexAgents, '模板目录 codex/agents')
|
|
@@ -129,8 +277,12 @@ export async function runCodexInitial(options = {}) {
|
|
|
129
277
|
|
|
130
278
|
const skillsStats = await copySkillsDirectories({ srcSkillsDir: srcSkills, dstSkillsDir: dstSkills })
|
|
131
279
|
const codexAgentsStats = await copyDirMerge({ srcDir: srcCodexAgents, dstDir: dstCodexAgents })
|
|
280
|
+
const configStats = await ensureCodexConfig({ codexDir })
|
|
132
281
|
|
|
133
|
-
logger.success(`已初始化 Codex 模板到: ${
|
|
282
|
+
logger.success(`已初始化 Codex 模板到: ${codexDir}`)
|
|
134
283
|
logger.info(`skills: ${skillsStats.copiedDirs} 个目录,覆盖复制 ${skillsStats.copiedFiles} 个文件 -> ${dstSkills}`)
|
|
135
284
|
logger.info(`codex agents: 覆盖复制 ${codexAgentsStats.fileCount} 个文件 -> ${dstCodexAgents}`)
|
|
285
|
+
logger.info(
|
|
286
|
+
`config.toml: 修复 ${configStats.changedKeys} 个配置项,新增 ${configStats.createdSections} 个分组 -> ${configStats.configPath}`,
|
|
287
|
+
)
|
|
136
288
|
}
|
package/lib/exec.js
CHANGED
|
@@ -12,6 +12,25 @@ import { confirmManager } from './confirm.js'
|
|
|
12
12
|
|
|
13
13
|
const execPromise = promisify(nodeExec)
|
|
14
14
|
|
|
15
|
+
export function sanitizeChildEnv(inputEnv = {}) {
|
|
16
|
+
const env = { ...inputEnv }
|
|
17
|
+
const noColor = env.NO_COLOR
|
|
18
|
+
const forceColor = env.FORCE_COLOR
|
|
19
|
+
|
|
20
|
+
// 某些工具链(npm/pnpm/chalk)会在子进程链路里自动打开 FORCE_COLOR,
|
|
21
|
+
// 这会与继承下去的 NO_COLOR 冲突并触发 "NO_COLOR is ignored" 警告。
|
|
22
|
+
// 这里优先移除 NO_COLOR,避免在 dx 执行链路中重复打印噪音告警。
|
|
23
|
+
if (noColor !== undefined && noColor !== null && String(noColor) !== '') {
|
|
24
|
+
delete env.NO_COLOR
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
if (forceColor === undefined || forceColor === null || String(forceColor) === '') {
|
|
28
|
+
delete env.FORCE_COLOR
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
return env
|
|
32
|
+
}
|
|
33
|
+
|
|
15
34
|
export class ExecManager {
|
|
16
35
|
constructor() {
|
|
17
36
|
this.runningProcesses = new Map()
|
|
@@ -222,12 +241,12 @@ export class ExecManager {
|
|
|
222
241
|
const result = await this.spawnCommand(fullCommand, {
|
|
223
242
|
cwd: cwd || process.cwd(),
|
|
224
243
|
stdio,
|
|
225
|
-
env: {
|
|
244
|
+
env: sanitizeChildEnv({
|
|
226
245
|
...process.env,
|
|
227
246
|
NODE_ENV: nodeEnvForProcess,
|
|
228
247
|
...forcedEnv,
|
|
229
248
|
...extraEnv,
|
|
230
|
-
},
|
|
249
|
+
}),
|
|
231
250
|
timeout,
|
|
232
251
|
})
|
|
233
252
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { spawn, execSync } from 'node:child_process'
|
|
2
|
+
import { sanitizeChildEnv } from './exec.js'
|
|
2
3
|
import { readFile } from 'node:fs/promises'
|
|
3
4
|
import { resolve, join, dirname } from 'node:path'
|
|
4
5
|
import { existsSync } from 'node:fs'
|
|
@@ -147,7 +148,7 @@ export async function runWithVersionEnv(argv = []) {
|
|
|
147
148
|
|
|
148
149
|
const child = spawn(command[0], command.slice(1), {
|
|
149
150
|
stdio: 'inherit',
|
|
150
|
-
env,
|
|
151
|
+
env: sanitizeChildEnv(env),
|
|
151
152
|
shell: false
|
|
152
153
|
})
|
|
153
154
|
|