@movemama/opencode-legacy 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +53 -0
- package/index.js +19 -0
- package/legacy-rules.json +19 -0
- package/package.json +36 -0
- package/plugin-meta.js +14 -0
- package/tools/edit.js +56 -0
- package/tools/edit.ts +64 -0
- package/tools/grep.js +210 -0
- package/tools/legacy-codec.js +13 -0
- package/tools/legacy-edit-core.mjs +134 -0
- package/tools/legacy-router.mjs +149 -0
- package/tools/legacy-search-core.mjs +84 -0
- package/tools/legacy.js +78 -0
- package/tools/legacy.ts +230 -0
- package/tools/opencode-paths.mjs +41 -0
- package/tools/read.js +148 -0
- package/tools/read.ts +213 -0
- package/tools/script-edit-core.mjs +126 -0
- package/tools/script-edit.js +59 -0
- package/tools/script-edit.ts +59 -0
- package/tools/txt-gb2312-tool.mjs +392 -0
- package/tools/write.js +53 -0
- package/tools/write.ts +67 -0
package/tools/read.js
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import { tool } from '@opencode-ai/plugin'
|
|
2
|
+
import { readFile } from 'node:fs/promises'
|
|
3
|
+
import { existsSync, readFileSync } from 'node:fs'
|
|
4
|
+
import path from 'node:path'
|
|
5
|
+
import { createDefaultLegacyRules, matchLegacyRule } from './legacy-router.mjs'
|
|
6
|
+
import { getBundledLegacyRulesPath } from './opencode-paths.mjs'
|
|
7
|
+
import { decodeLegacyBuffer } from './legacy-codec.js'
|
|
8
|
+
|
|
9
|
+
function loadLegacyRules(worktree) {
|
|
10
|
+
const candidates = [
|
|
11
|
+
path.join(worktree, '.opencode', 'legacy-rules.json'),
|
|
12
|
+
path.join(worktree, 'legacy-rules.json'),
|
|
13
|
+
getBundledLegacyRulesPath(),
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
for (const filePath of candidates) {
|
|
17
|
+
if (existsSync(filePath)) {
|
|
18
|
+
return JSON.parse(readFileSync(filePath, 'utf8')).rules ?? createDefaultLegacyRules()
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
return createDefaultLegacyRules()
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
function resolvePath(filePath, worktree) {
|
|
26
|
+
return path.isAbsolute(filePath) ? filePath : path.join(worktree, filePath)
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
function applyOffsetLimitFast(content, offset, limit) {
|
|
30
|
+
if (offset === undefined && limit === undefined) {
|
|
31
|
+
return content
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
if (offset !== undefined && offset < 1) {
|
|
35
|
+
throw new Error('offset must be greater than or equal to 1')
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const targetStartLine = offset || 1
|
|
39
|
+
const targetLineCount = limit || Number.MAX_SAFE_INTEGER
|
|
40
|
+
const targetEndLine = targetStartLine + targetLineCount - 1
|
|
41
|
+
|
|
42
|
+
let lineNumber = 1
|
|
43
|
+
let currentStart = 0
|
|
44
|
+
let sliceStart = -1
|
|
45
|
+
let sliceEnd = content.length
|
|
46
|
+
|
|
47
|
+
for (let index = 0; index < content.length; index += 1) {
|
|
48
|
+
if (content[index] !== '\n') {
|
|
49
|
+
continue
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
if (lineNumber === targetStartLine) {
|
|
53
|
+
sliceStart = currentStart
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if (lineNumber === targetEndLine) {
|
|
57
|
+
sliceEnd = index
|
|
58
|
+
break
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
lineNumber += 1
|
|
62
|
+
currentStart = index + 1
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
if (sliceStart === -1) {
|
|
66
|
+
if (lineNumber === targetStartLine) {
|
|
67
|
+
sliceStart = currentStart
|
|
68
|
+
} else {
|
|
69
|
+
return ''
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const result = content.slice(sliceStart, sliceEnd)
|
|
74
|
+
return result.endsWith('\r') ? result.slice(0, -1) : result
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
async function readLegacyFile(filePath, encoding, offset, limit) {
|
|
78
|
+
const buffer = await readFile(filePath)
|
|
79
|
+
const content = decodeLegacyBuffer(buffer, encoding)
|
|
80
|
+
return applyOffsetLimitFast(content, offset, limit)
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
function withLargeFileNotice(content, offset, limit) {
|
|
84
|
+
if (offset === undefined && limit === undefined) {
|
|
85
|
+
return content
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return [
|
|
89
|
+
'--- 读取回执 ---',
|
|
90
|
+
'读取方式:分段读取',
|
|
91
|
+
`起始行号:${offset ?? 1}`,
|
|
92
|
+
`读取行数:${limit ?? 'all'}`,
|
|
93
|
+
'说明:如需继续读取剩余内容,请继续增大 offset。',
|
|
94
|
+
content,
|
|
95
|
+
].join('\n')
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
function withLegacyReadNotice(content, matched, offset, limit, raw = false) {
|
|
99
|
+
if (raw) {
|
|
100
|
+
return content
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
const lines = [
|
|
104
|
+
'--- Legacy 读取回执 ---',
|
|
105
|
+
`编码:${matched?.encoding || 'unknown'}`,
|
|
106
|
+
`处理器:${matched?.tool || 'legacy-text'}`,
|
|
107
|
+
`严格模式:${matched?.strict === true ? 'true' : 'false'}`,
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
if (offset !== undefined || limit !== undefined) {
|
|
111
|
+
lines.push('读取方式:分段读取')
|
|
112
|
+
lines.push(`起始行号:${offset ?? 1}`)
|
|
113
|
+
lines.push(`读取行数:${limit ?? 'all'}`)
|
|
114
|
+
lines.push('说明:如需继续读取剩余内容,请继续增大 offset。')
|
|
115
|
+
} else {
|
|
116
|
+
lines.push('读取方式:完整读取')
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
lines.push(content)
|
|
120
|
+
return lines.join('\n')
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
export default tool({
|
|
124
|
+
description: '读取文件,命中 legacy 规则时自动按对应编码处理',
|
|
125
|
+
args: {
|
|
126
|
+
filePath: tool.schema.string().describe('文件路径'),
|
|
127
|
+
offset: tool.schema.number().optional().describe('起始行号,1 开始'),
|
|
128
|
+
limit: tool.schema.number().optional().describe('最多读取多少行'),
|
|
129
|
+
raw: tool.schema.boolean().optional().describe('内部调用时返回原始内容'),
|
|
130
|
+
},
|
|
131
|
+
async execute(args, context) {
|
|
132
|
+
const filePath = resolvePath(args.filePath, context.worktree)
|
|
133
|
+
const rules = loadLegacyRules(context.worktree)
|
|
134
|
+
const matched = matchLegacyRule(filePath, rules)
|
|
135
|
+
|
|
136
|
+
if (matched?.encoding) {
|
|
137
|
+
const content = await readLegacyFile(filePath, matched.encoding, args.offset, args.limit)
|
|
138
|
+
return withLegacyReadNotice(content, matched, args.offset, args.limit, Boolean(args.raw))
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
const content = await readFile(filePath, 'utf8')
|
|
142
|
+
if (args.raw) {
|
|
143
|
+
return applyOffsetLimitFast(content, args.offset, args.limit)
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return withLargeFileNotice(applyOffsetLimitFast(content, args.offset, args.limit), args.offset, args.limit)
|
|
147
|
+
},
|
|
148
|
+
})
|
package/tools/read.ts
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
import { tool } from '@opencode-ai/plugin'
|
|
2
|
+
import { readFile } from 'node:fs/promises'
|
|
3
|
+
import { existsSync, readFileSync } from 'node:fs'
|
|
4
|
+
import path from 'node:path'
|
|
5
|
+
import { spawnSync } from 'node:child_process'
|
|
6
|
+
import { createDefaultLegacyRules, matchLegacyRule } from './legacy-router.mjs'
|
|
7
|
+
import { getGlobalLegacyRulesPath, getKnownIconvCandidates } from './opencode-paths.mjs'
|
|
8
|
+
|
|
9
|
+
let cachedIconvPath = null
|
|
10
|
+
|
|
11
|
+
function loadLegacyRules(worktree) {
|
|
12
|
+
const projectConfigPath = path.join(worktree, '.opencode', 'legacy-rules.json')
|
|
13
|
+
const globalConfigPath = getGlobalLegacyRulesPath()
|
|
14
|
+
|
|
15
|
+
if (existsSync(projectConfigPath)) {
|
|
16
|
+
return JSON.parse(readFileSync(projectConfigPath, 'utf8')).rules ?? createDefaultLegacyRules()
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
if (existsSync(globalConfigPath)) {
|
|
20
|
+
return JSON.parse(readFileSync(globalConfigPath, 'utf8')).rules ?? createDefaultLegacyRules()
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
return createDefaultLegacyRules()
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function resolvePath(filePath, worktree) {
|
|
27
|
+
return path.isAbsolute(filePath) ? filePath : path.join(worktree, filePath)
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
function normalizeWindowsPath(filePath) {
|
|
31
|
+
return filePath.replace(/\\/g, '/')
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function getDerivedIconvCandidates() {
|
|
35
|
+
const pathValue = process.env.Path || process.env.PATH || ''
|
|
36
|
+
const parts = pathValue.split(';').map((item) => item.trim()).filter(Boolean)
|
|
37
|
+
const derived = []
|
|
38
|
+
|
|
39
|
+
for (const part of parts) {
|
|
40
|
+
const normalized = normalizeWindowsPath(part)
|
|
41
|
+
if (normalized.toLowerCase().endsWith('/git/cmd')) {
|
|
42
|
+
derived.push(normalized.replace(/\/cmd$/i, '/usr/bin/iconv.exe'))
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
return derived
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function resolveIconvPath() {
|
|
50
|
+
if (cachedIconvPath) {
|
|
51
|
+
return cachedIconvPath
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const candidates = [
|
|
55
|
+
'iconv',
|
|
56
|
+
process.env.OPENCODE_ICONV_PATH,
|
|
57
|
+
process.env.ICONV_PATH,
|
|
58
|
+
...getKnownIconvCandidates(),
|
|
59
|
+
...getDerivedIconvCandidates(),
|
|
60
|
+
].filter(Boolean)
|
|
61
|
+
|
|
62
|
+
for (const candidate of candidates) {
|
|
63
|
+
const probe = spawnSync(candidate, ['--version'], { encoding: 'utf8' })
|
|
64
|
+
if (probe.status === 0) {
|
|
65
|
+
cachedIconvPath = candidate
|
|
66
|
+
return cachedIconvPath
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
throw new Error('iconv 不可用,无法处理 legacy 文件读取')
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
function applyOffsetLimit(content, offset, limit) {
|
|
74
|
+
if (offset === undefined && limit === undefined) {
|
|
75
|
+
return content
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (offset !== undefined && offset < 1) {
|
|
79
|
+
throw new Error('offset must be greater than or equal to 1')
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const lines = content.split(/\r?\n/)
|
|
83
|
+
const start = offset ? offset - 1 : 0
|
|
84
|
+
const end = limit ? start + limit : lines.length
|
|
85
|
+
return lines.slice(start, end).join('\n')
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
function applyOffsetLimitFast(content, offset, limit) {
|
|
89
|
+
if (offset === undefined && limit === undefined) {
|
|
90
|
+
return content
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if (offset !== undefined && offset < 1) {
|
|
94
|
+
throw new Error('offset must be greater than or equal to 1')
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
const targetStartLine = offset || 1
|
|
98
|
+
const targetLineCount = limit || Number.MAX_SAFE_INTEGER
|
|
99
|
+
const targetEndLine = targetStartLine + targetLineCount - 1
|
|
100
|
+
|
|
101
|
+
let lineNumber = 1
|
|
102
|
+
let currentStart = 0
|
|
103
|
+
let sliceStart = -1
|
|
104
|
+
let sliceEnd = content.length
|
|
105
|
+
|
|
106
|
+
for (let index = 0; index < content.length; index += 1) {
|
|
107
|
+
const ch = content[index]
|
|
108
|
+
if (ch !== '\n') {
|
|
109
|
+
continue
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if (lineNumber === targetStartLine) {
|
|
113
|
+
sliceStart = currentStart
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
if (lineNumber === targetEndLine) {
|
|
117
|
+
sliceEnd = index
|
|
118
|
+
break
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
lineNumber += 1
|
|
122
|
+
currentStart = index + 1
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
if (sliceStart === -1) {
|
|
126
|
+
if (lineNumber === targetStartLine) {
|
|
127
|
+
sliceStart = currentStart
|
|
128
|
+
} else {
|
|
129
|
+
return ''
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const result = content.slice(sliceStart, sliceEnd)
|
|
134
|
+
return result.endsWith('\r') ? result.slice(0, -1) : result
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
function readGb2312File(filePath, offset, limit) {
|
|
138
|
+
const iconvPath = resolveIconvPath()
|
|
139
|
+
const result = spawnSync(iconvPath, ['-f', 'GB2312', '-t', 'UTF-8', filePath], {
|
|
140
|
+
encoding: 'utf8',
|
|
141
|
+
})
|
|
142
|
+
|
|
143
|
+
if (result.status !== 0) {
|
|
144
|
+
throw new Error(result.stderr || 'legacy txt 读取失败')
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
return applyOffsetLimitFast(result.stdout, offset, limit)
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
function withLargeFileNotice(content, offset, limit) {
|
|
151
|
+
if (offset === undefined && limit === undefined) {
|
|
152
|
+
return content
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
return [
|
|
156
|
+
'--- 读取回执 ---',
|
|
157
|
+
`读取方式:分段读取`,
|
|
158
|
+
`起始行号:${offset ?? 1}`,
|
|
159
|
+
`读取行数:${limit ?? 'all'}`,
|
|
160
|
+
'说明:如需继续读取剩余内容,请继续增大 offset。',
|
|
161
|
+
content,
|
|
162
|
+
].join('\n')
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
function withLegacyReadNotice(content, matched, offset, limit, raw = false) {
|
|
166
|
+
if (raw) {
|
|
167
|
+
return content
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
const lines = [
|
|
171
|
+
'--- Legacy 读取回执 ---',
|
|
172
|
+
`编码:${matched?.encoding || 'unknown'}`,
|
|
173
|
+
`处理器:${matched?.tool || 'legacy-text'}`,
|
|
174
|
+
`严格模式:${matched?.strict === true ? 'true' : 'false'}`,
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
if (offset !== undefined || limit !== undefined) {
|
|
178
|
+
lines.push(`读取方式:分段读取`)
|
|
179
|
+
lines.push(`起始行号:${offset ?? 1}`)
|
|
180
|
+
lines.push(`读取行数:${limit ?? 'all'}`)
|
|
181
|
+
lines.push('说明:如需继续读取剩余内容,请继续增大 offset。')
|
|
182
|
+
} else {
|
|
183
|
+
lines.push('读取方式:完整读取')
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
lines.push(content)
|
|
187
|
+
return lines.join('\n')
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
export default tool({
|
|
191
|
+
description: '读取文件,命中 legacy 规则时自动按对应编码处理',
|
|
192
|
+
args: {
|
|
193
|
+
filePath: tool.schema.string().describe('文件路径'),
|
|
194
|
+
offset: tool.schema.number().optional().describe('起始行号,1 开始'),
|
|
195
|
+
limit: tool.schema.number().optional().describe('最多读取多少行'),
|
|
196
|
+
raw: tool.schema.boolean().optional().describe('内部调用时返回原始内容'),
|
|
197
|
+
},
|
|
198
|
+
async execute(args, context) {
|
|
199
|
+
const filePath = resolvePath(args.filePath, context.worktree)
|
|
200
|
+
const rules = loadLegacyRules(context.worktree)
|
|
201
|
+
const matched = matchLegacyRule(filePath, rules)
|
|
202
|
+
|
|
203
|
+
if (matched?.tool === 'txt-gb2312' || matched?.encoding?.toLowerCase() === 'gb2312') {
|
|
204
|
+
return withLegacyReadNotice(readGb2312File(filePath, args.offset, args.limit), matched, args.offset, args.limit, Boolean(args.raw))
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
const content = await readFile(filePath, 'utf8')
|
|
208
|
+
if (args.raw) {
|
|
209
|
+
return applyOffsetLimitFast(content, args.offset, args.limit)
|
|
210
|
+
}
|
|
211
|
+
return withLargeFileNotice(applyOffsetLimitFast(content, args.offset, args.limit), args.offset, args.limit)
|
|
212
|
+
},
|
|
213
|
+
})
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
function normalizeNewlines(text) {
|
|
2
|
+
return text.replace(/\r\n/g, '\n')
|
|
3
|
+
}
|
|
4
|
+
|
|
5
|
+
function splitNormalizedLines(text) {
|
|
6
|
+
return normalizeNewlines(text).split('\n')
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
function canonicalLine(text) {
|
|
10
|
+
return text.replace(/[ \t]+$/g, '')
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
function detectNewline(text) {
|
|
14
|
+
return text.includes('\r\n') ? '\r\n' : '\n'
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
function denormalize(text, newline) {
|
|
18
|
+
return newline === '\r\n' ? text.replace(/\n/g, '\r\n') : text
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
function normalizeLabel(label) {
|
|
22
|
+
if (label.startsWith('[@')) {
|
|
23
|
+
return label
|
|
24
|
+
}
|
|
25
|
+
if (label.startsWith('@')) {
|
|
26
|
+
return `[${label}]`
|
|
27
|
+
}
|
|
28
|
+
return `[@${label}]`
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export function findLabelBlock(content, label) {
|
|
32
|
+
const normalized = normalizeNewlines(content)
|
|
33
|
+
const lines = normalized.split('\n')
|
|
34
|
+
const target = normalizeLabel(label)
|
|
35
|
+
const startIndex = lines.findIndex((line) => line.trim() === target)
|
|
36
|
+
|
|
37
|
+
if (startIndex === -1) {
|
|
38
|
+
return null
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
let endIndex = lines.length - 1
|
|
42
|
+
for (let index = startIndex + 1; index < lines.length; index += 1) {
|
|
43
|
+
if (/^\[@.+\]$/.test(lines[index].trim())) {
|
|
44
|
+
endIndex = index - 1
|
|
45
|
+
break
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
startLine: startIndex + 1,
|
|
51
|
+
endLine: endIndex + 1,
|
|
52
|
+
lines,
|
|
53
|
+
startIndex,
|
|
54
|
+
endIndex,
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export function insertAfterAnchor(content, anchor, insertText) {
|
|
59
|
+
const newline = detectNewline(content)
|
|
60
|
+
const normalized = normalizeNewlines(content)
|
|
61
|
+
const normalizedAnchor = normalizeNewlines(anchor)
|
|
62
|
+
const normalizedInsert = normalizeNewlines(insertText)
|
|
63
|
+
|
|
64
|
+
if (normalized.includes(normalizedAnchor)) {
|
|
65
|
+
return denormalize(normalized.replace(normalizedAnchor, `${normalizedAnchor}\n${normalizedInsert}`), newline)
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
const lines = splitNormalizedLines(normalized)
|
|
69
|
+
const target = canonicalLine(normalizedAnchor)
|
|
70
|
+
|
|
71
|
+
for (let index = 0; index < lines.length; index += 1) {
|
|
72
|
+
if (canonicalLine(lines[index]) === target) {
|
|
73
|
+
const next = [
|
|
74
|
+
...lines.slice(0, index + 1),
|
|
75
|
+
...splitNormalizedLines(normalizedInsert),
|
|
76
|
+
...lines.slice(index + 1),
|
|
77
|
+
].join('\n')
|
|
78
|
+
return denormalize(next, newline)
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
throw new Error('未命中插入锚点')
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
export function insertIntoLabelAfterMarker(content, label, marker, insertText) {
|
|
86
|
+
const newline = detectNewline(content)
|
|
87
|
+
const normalized = normalizeNewlines(content)
|
|
88
|
+
const block = findLabelBlock(normalized, label)
|
|
89
|
+
|
|
90
|
+
if (!block) {
|
|
91
|
+
throw new Error(`未找到标签块: ${normalizeLabel(label)}`)
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
const markerIndex = block.lines.findIndex((line, index) => index >= block.startIndex && index <= block.endIndex && line.trim() === marker.trim())
|
|
95
|
+
|
|
96
|
+
if (markerIndex === -1) {
|
|
97
|
+
throw new Error(`未找到标签内锚点: ${marker}`)
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const insertLines = normalizeNewlines(insertText).split('\n')
|
|
101
|
+
const nextLines = [
|
|
102
|
+
...block.lines.slice(0, markerIndex + 1),
|
|
103
|
+
...insertLines,
|
|
104
|
+
...block.lines.slice(markerIndex + 1),
|
|
105
|
+
].join('\n')
|
|
106
|
+
|
|
107
|
+
return denormalize(nextLines, newline)
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
export function replaceLabelBlock(content, label, replacement) {
|
|
111
|
+
const newline = detectNewline(content)
|
|
112
|
+
const normalized = normalizeNewlines(content)
|
|
113
|
+
const block = findLabelBlock(normalized, label)
|
|
114
|
+
|
|
115
|
+
if (!block) {
|
|
116
|
+
throw new Error(`未找到标签块: ${normalizeLabel(label)}`)
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
const next = [
|
|
120
|
+
...block.lines.slice(0, block.startIndex),
|
|
121
|
+
...normalizeNewlines(replacement).split('\n'),
|
|
122
|
+
...block.lines.slice(block.endIndex + 1),
|
|
123
|
+
].join('\n')
|
|
124
|
+
|
|
125
|
+
return denormalize(next, newline)
|
|
126
|
+
}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { tool } from '@opencode-ai/plugin'
|
|
2
|
+
import { existsSync } from 'node:fs'
|
|
3
|
+
import path from 'node:path'
|
|
4
|
+
import readTool from './read.js'
|
|
5
|
+
import writeTool from './write.js'
|
|
6
|
+
import {
|
|
7
|
+
insertAfterAnchor,
|
|
8
|
+
insertIntoLabelAfterMarker,
|
|
9
|
+
replaceLabelBlock,
|
|
10
|
+
} from './script-edit-core.mjs'
|
|
11
|
+
|
|
12
|
+
function resolvePath(filePath, worktree) {
|
|
13
|
+
return path.isAbsolute(filePath) ? filePath : path.join(worktree, filePath)
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export default tool({
|
|
17
|
+
description: '按标签块或锚点编辑脚本类文本,适合 legacy 脚本文件',
|
|
18
|
+
args: {
|
|
19
|
+
filePath: tool.schema.string().describe('文件路径'),
|
|
20
|
+
mode: tool.schema.enum(['insert_after_anchor', 'insert_into_label_after_marker', 'replace_label_block']).describe('编辑模式'),
|
|
21
|
+
label: tool.schema.string().optional().describe('目标标签名,例如 @全体补偿页面'),
|
|
22
|
+
marker: tool.schema.string().optional().describe('标签块内锚点行,例如 #ACT'),
|
|
23
|
+
anchor: tool.schema.string().optional().describe('普通锚点文本'),
|
|
24
|
+
content: tool.schema.string().describe('要插入或替换的内容'),
|
|
25
|
+
},
|
|
26
|
+
async execute(args, context) {
|
|
27
|
+
const filePath = resolvePath(args.filePath, context.worktree)
|
|
28
|
+
|
|
29
|
+
if (!existsSync(filePath)) {
|
|
30
|
+
throw new Error(`文件不存在: ${filePath}`)
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const original = await readTool.execute({ filePath, raw: true }, context)
|
|
34
|
+
let next = original
|
|
35
|
+
|
|
36
|
+
if (args.mode === 'insert_after_anchor') {
|
|
37
|
+
if (!args.anchor) {
|
|
38
|
+
throw new Error('insert_after_anchor 模式缺少 anchor')
|
|
39
|
+
}
|
|
40
|
+
next = insertAfterAnchor(original, args.anchor, args.content)
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if (args.mode === 'insert_into_label_after_marker') {
|
|
44
|
+
if (!args.label || !args.marker) {
|
|
45
|
+
throw new Error('insert_into_label_after_marker 模式缺少 label 或 marker')
|
|
46
|
+
}
|
|
47
|
+
next = insertIntoLabelAfterMarker(original, args.label, args.marker, args.content)
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
if (args.mode === 'replace_label_block') {
|
|
51
|
+
if (!args.label) {
|
|
52
|
+
throw new Error('replace_label_block 模式缺少 label')
|
|
53
|
+
}
|
|
54
|
+
next = replaceLabelBlock(original, args.label, args.content)
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return writeTool.execute({ filePath, content: next }, context)
|
|
58
|
+
},
|
|
59
|
+
})
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { tool } from '@opencode-ai/plugin'
|
|
2
|
+
import { existsSync } from 'node:fs'
|
|
3
|
+
import path from 'node:path'
|
|
4
|
+
import readTool from './read.ts'
|
|
5
|
+
import writeTool from './write.ts'
|
|
6
|
+
import {
|
|
7
|
+
insertAfterAnchor,
|
|
8
|
+
insertIntoLabelAfterMarker,
|
|
9
|
+
replaceLabelBlock,
|
|
10
|
+
} from './script-edit-core.mjs'
|
|
11
|
+
|
|
12
|
+
function resolvePath(filePath, worktree) {
|
|
13
|
+
return path.isAbsolute(filePath) ? filePath : path.join(worktree, filePath)
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export default tool({
|
|
17
|
+
description: '按标签块或锚点编辑脚本类文本,适合 legacy 脚本文件',
|
|
18
|
+
args: {
|
|
19
|
+
filePath: tool.schema.string().describe('文件路径'),
|
|
20
|
+
mode: tool.schema.enum(['insert_after_anchor', 'insert_into_label_after_marker', 'replace_label_block']).describe('编辑模式'),
|
|
21
|
+
label: tool.schema.string().optional().describe('目标标签名,例如 @全体补偿页面'),
|
|
22
|
+
marker: tool.schema.string().optional().describe('标签块内锚点行,例如 #ACT'),
|
|
23
|
+
anchor: tool.schema.string().optional().describe('普通锚点文本'),
|
|
24
|
+
content: tool.schema.string().describe('要插入或替换的内容'),
|
|
25
|
+
},
|
|
26
|
+
async execute(args, context) {
|
|
27
|
+
const filePath = resolvePath(args.filePath, context.worktree)
|
|
28
|
+
|
|
29
|
+
if (!existsSync(filePath)) {
|
|
30
|
+
throw new Error(`文件不存在: ${filePath}`)
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const original = await readTool.execute({ filePath, raw: true }, context)
|
|
34
|
+
let next = original
|
|
35
|
+
|
|
36
|
+
if (args.mode === 'insert_after_anchor') {
|
|
37
|
+
if (!args.anchor) {
|
|
38
|
+
throw new Error('insert_after_anchor 模式缺少 anchor')
|
|
39
|
+
}
|
|
40
|
+
next = insertAfterAnchor(original, args.anchor, args.content)
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if (args.mode === 'insert_into_label_after_marker') {
|
|
44
|
+
if (!args.label || !args.marker) {
|
|
45
|
+
throw new Error('insert_into_label_after_marker 模式缺少 label 或 marker')
|
|
46
|
+
}
|
|
47
|
+
next = insertIntoLabelAfterMarker(original, args.label, args.marker, args.content)
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
if (args.mode === 'replace_label_block') {
|
|
51
|
+
if (!args.label) {
|
|
52
|
+
throw new Error('replace_label_block 模式缺少 label')
|
|
53
|
+
}
|
|
54
|
+
next = replaceLabelBlock(original, args.label, args.content)
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
return writeTool.execute({ filePath, content: next }, context)
|
|
58
|
+
},
|
|
59
|
+
})
|