@newtype-os/cli 0.0.0-main-202603270907
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/bin/bin/nt +173 -0
- package/bin/nt +173 -0
- package/package.json +24 -0
- package/postinstall.mjs +233 -0
- package/skill-content.ts +358 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 opencode
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/bin/bin/nt
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
const childProcess = require("child_process")
|
|
4
|
+
const fs = require("fs")
|
|
5
|
+
const path = require("path")
|
|
6
|
+
const os = require("os")
|
|
7
|
+
|
|
8
|
+
function run(target) {
|
|
9
|
+
const result = childProcess.spawnSync(target, process.argv.slice(2), {
|
|
10
|
+
stdio: "inherit",
|
|
11
|
+
})
|
|
12
|
+
if (result.error) {
|
|
13
|
+
console.error(result.error.message)
|
|
14
|
+
process.exit(1)
|
|
15
|
+
}
|
|
16
|
+
const code = typeof result.status === "number" ? result.status : 0
|
|
17
|
+
process.exit(code)
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const envPath = process.env.OPENCODE_BIN_PATH
|
|
21
|
+
if (envPath) {
|
|
22
|
+
run(envPath)
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
const scriptPath = fs.realpathSync(__filename)
|
|
26
|
+
const scriptDir = path.dirname(scriptPath)
|
|
27
|
+
|
|
28
|
+
const platformMap = {
|
|
29
|
+
darwin: "darwin",
|
|
30
|
+
linux: "linux",
|
|
31
|
+
win32: "windows",
|
|
32
|
+
}
|
|
33
|
+
const archMap = {
|
|
34
|
+
x64: "x64",
|
|
35
|
+
arm64: "arm64",
|
|
36
|
+
arm: "arm",
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
let platform = platformMap[os.platform()]
|
|
40
|
+
if (!platform) {
|
|
41
|
+
platform = os.platform()
|
|
42
|
+
}
|
|
43
|
+
let arch = archMap[os.arch()]
|
|
44
|
+
if (!arch) {
|
|
45
|
+
arch = os.arch()
|
|
46
|
+
}
|
|
47
|
+
const base = "@newtype-os/cli-" + platform + "-" + arch
|
|
48
|
+
const binary = platform === "windows" ? "nt.exe" : "nt"
|
|
49
|
+
|
|
50
|
+
function supportsAvx2() {
|
|
51
|
+
if (arch !== "x64") return false
|
|
52
|
+
|
|
53
|
+
if (platform === "linux") {
|
|
54
|
+
try {
|
|
55
|
+
return /(^|\s)avx2(\s|$)/i.test(fs.readFileSync("/proc/cpuinfo", "utf8"))
|
|
56
|
+
} catch {
|
|
57
|
+
return false
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (platform === "darwin") {
|
|
62
|
+
try {
|
|
63
|
+
const result = childProcess.spawnSync("sysctl", ["-n", "hw.optional.avx2_0"], {
|
|
64
|
+
encoding: "utf8",
|
|
65
|
+
timeout: 1500,
|
|
66
|
+
})
|
|
67
|
+
if (result.status !== 0) return false
|
|
68
|
+
return (result.stdout || "").trim() === "1"
|
|
69
|
+
} catch {
|
|
70
|
+
return false
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if (platform === "windows") {
|
|
75
|
+
const cmd =
|
|
76
|
+
'(Add-Type -MemberDefinition "[DllImport(""kernel32.dll"")] public static extern bool IsProcessorFeaturePresent(int ProcessorFeature);" -Name Kernel32 -Namespace Win32 -PassThru)::IsProcessorFeaturePresent(40)'
|
|
77
|
+
|
|
78
|
+
for (const exe of ["powershell.exe", "pwsh.exe", "pwsh", "powershell"]) {
|
|
79
|
+
try {
|
|
80
|
+
const result = childProcess.spawnSync(exe, ["-NoProfile", "-NonInteractive", "-Command", cmd], {
|
|
81
|
+
encoding: "utf8",
|
|
82
|
+
timeout: 3000,
|
|
83
|
+
windowsHide: true,
|
|
84
|
+
})
|
|
85
|
+
if (result.status !== 0) continue
|
|
86
|
+
const out = (result.stdout || "").trim().toLowerCase()
|
|
87
|
+
if (out === "true" || out === "1") return true
|
|
88
|
+
if (out === "false" || out === "0") return false
|
|
89
|
+
} catch {
|
|
90
|
+
continue
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return false
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
return false
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const names = (() => {
|
|
101
|
+
const avx2 = supportsAvx2()
|
|
102
|
+
const baseline = arch === "x64" && !avx2
|
|
103
|
+
|
|
104
|
+
if (platform === "linux") {
|
|
105
|
+
const musl = (() => {
|
|
106
|
+
try {
|
|
107
|
+
if (fs.existsSync("/etc/alpine-release")) return true
|
|
108
|
+
} catch {
|
|
109
|
+
// ignore
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
try {
|
|
113
|
+
const result = childProcess.spawnSync("ldd", ["--version"], { encoding: "utf8" })
|
|
114
|
+
const text = ((result.stdout || "") + (result.stderr || "")).toLowerCase()
|
|
115
|
+
if (text.includes("musl")) return true
|
|
116
|
+
} catch {
|
|
117
|
+
// ignore
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return false
|
|
121
|
+
})()
|
|
122
|
+
|
|
123
|
+
if (musl) {
|
|
124
|
+
if (arch === "x64") {
|
|
125
|
+
if (baseline) return [`${base}-baseline-musl`, `${base}-musl`, `${base}-baseline`, base]
|
|
126
|
+
return [`${base}-musl`, `${base}-baseline-musl`, base, `${base}-baseline`]
|
|
127
|
+
}
|
|
128
|
+
return [`${base}-musl`, base]
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if (arch === "x64") {
|
|
132
|
+
if (baseline) return [`${base}-baseline`, base, `${base}-baseline-musl`, `${base}-musl`]
|
|
133
|
+
return [base, `${base}-baseline`, `${base}-musl`, `${base}-baseline-musl`]
|
|
134
|
+
}
|
|
135
|
+
return [base, `${base}-musl`]
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
if (arch === "x64") {
|
|
139
|
+
if (baseline) return [`${base}-baseline`, base]
|
|
140
|
+
return [base, `${base}-baseline`]
|
|
141
|
+
}
|
|
142
|
+
return [base]
|
|
143
|
+
})()
|
|
144
|
+
|
|
145
|
+
function findBinary(startDir) {
|
|
146
|
+
let current = startDir
|
|
147
|
+
for (;;) {
|
|
148
|
+
const modules = path.join(current, "node_modules")
|
|
149
|
+
if (fs.existsSync(modules)) {
|
|
150
|
+
for (const name of names) {
|
|
151
|
+
const candidate = path.join(modules, name, "bin", binary)
|
|
152
|
+
if (fs.existsSync(candidate)) return candidate
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
const parent = path.dirname(current)
|
|
156
|
+
if (parent === current) {
|
|
157
|
+
return
|
|
158
|
+
}
|
|
159
|
+
current = parent
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
const resolved = findBinary(scriptDir)
|
|
164
|
+
if (!resolved) {
|
|
165
|
+
console.error(
|
|
166
|
+
"It seems that your package manager failed to install the right version of the Newtype CLI for your platform. You can try manually installing " +
|
|
167
|
+
names.map((n) => `\"${n}\"`).join(" or ") +
|
|
168
|
+
" package",
|
|
169
|
+
)
|
|
170
|
+
process.exit(1)
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
run(resolved)
|
package/bin/nt
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
const childProcess = require("child_process")
|
|
4
|
+
const fs = require("fs")
|
|
5
|
+
const path = require("path")
|
|
6
|
+
const os = require("os")
|
|
7
|
+
|
|
8
|
+
function run(target) {
|
|
9
|
+
const result = childProcess.spawnSync(target, process.argv.slice(2), {
|
|
10
|
+
stdio: "inherit",
|
|
11
|
+
})
|
|
12
|
+
if (result.error) {
|
|
13
|
+
console.error(result.error.message)
|
|
14
|
+
process.exit(1)
|
|
15
|
+
}
|
|
16
|
+
const code = typeof result.status === "number" ? result.status : 0
|
|
17
|
+
process.exit(code)
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const envPath = process.env.OPENCODE_BIN_PATH
|
|
21
|
+
if (envPath) {
|
|
22
|
+
run(envPath)
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
const scriptPath = fs.realpathSync(__filename)
|
|
26
|
+
const scriptDir = path.dirname(scriptPath)
|
|
27
|
+
|
|
28
|
+
const platformMap = {
|
|
29
|
+
darwin: "darwin",
|
|
30
|
+
linux: "linux",
|
|
31
|
+
win32: "windows",
|
|
32
|
+
}
|
|
33
|
+
const archMap = {
|
|
34
|
+
x64: "x64",
|
|
35
|
+
arm64: "arm64",
|
|
36
|
+
arm: "arm",
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
let platform = platformMap[os.platform()]
|
|
40
|
+
if (!platform) {
|
|
41
|
+
platform = os.platform()
|
|
42
|
+
}
|
|
43
|
+
let arch = archMap[os.arch()]
|
|
44
|
+
if (!arch) {
|
|
45
|
+
arch = os.arch()
|
|
46
|
+
}
|
|
47
|
+
const base = "@newtype-os/cli-" + platform + "-" + arch
|
|
48
|
+
const binary = platform === "windows" ? "nt.exe" : "nt"
|
|
49
|
+
|
|
50
|
+
function supportsAvx2() {
|
|
51
|
+
if (arch !== "x64") return false
|
|
52
|
+
|
|
53
|
+
if (platform === "linux") {
|
|
54
|
+
try {
|
|
55
|
+
return /(^|\s)avx2(\s|$)/i.test(fs.readFileSync("/proc/cpuinfo", "utf8"))
|
|
56
|
+
} catch {
|
|
57
|
+
return false
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
if (platform === "darwin") {
|
|
62
|
+
try {
|
|
63
|
+
const result = childProcess.spawnSync("sysctl", ["-n", "hw.optional.avx2_0"], {
|
|
64
|
+
encoding: "utf8",
|
|
65
|
+
timeout: 1500,
|
|
66
|
+
})
|
|
67
|
+
if (result.status !== 0) return false
|
|
68
|
+
return (result.stdout || "").trim() === "1"
|
|
69
|
+
} catch {
|
|
70
|
+
return false
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if (platform === "windows") {
|
|
75
|
+
const cmd =
|
|
76
|
+
'(Add-Type -MemberDefinition "[DllImport(""kernel32.dll"")] public static extern bool IsProcessorFeaturePresent(int ProcessorFeature);" -Name Kernel32 -Namespace Win32 -PassThru)::IsProcessorFeaturePresent(40)'
|
|
77
|
+
|
|
78
|
+
for (const exe of ["powershell.exe", "pwsh.exe", "pwsh", "powershell"]) {
|
|
79
|
+
try {
|
|
80
|
+
const result = childProcess.spawnSync(exe, ["-NoProfile", "-NonInteractive", "-Command", cmd], {
|
|
81
|
+
encoding: "utf8",
|
|
82
|
+
timeout: 3000,
|
|
83
|
+
windowsHide: true,
|
|
84
|
+
})
|
|
85
|
+
if (result.status !== 0) continue
|
|
86
|
+
const out = (result.stdout || "").trim().toLowerCase()
|
|
87
|
+
if (out === "true" || out === "1") return true
|
|
88
|
+
if (out === "false" || out === "0") return false
|
|
89
|
+
} catch {
|
|
90
|
+
continue
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return false
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
return false
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
const names = (() => {
|
|
101
|
+
const avx2 = supportsAvx2()
|
|
102
|
+
const baseline = arch === "x64" && !avx2
|
|
103
|
+
|
|
104
|
+
if (platform === "linux") {
|
|
105
|
+
const musl = (() => {
|
|
106
|
+
try {
|
|
107
|
+
if (fs.existsSync("/etc/alpine-release")) return true
|
|
108
|
+
} catch {
|
|
109
|
+
// ignore
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
try {
|
|
113
|
+
const result = childProcess.spawnSync("ldd", ["--version"], { encoding: "utf8" })
|
|
114
|
+
const text = ((result.stdout || "") + (result.stderr || "")).toLowerCase()
|
|
115
|
+
if (text.includes("musl")) return true
|
|
116
|
+
} catch {
|
|
117
|
+
// ignore
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return false
|
|
121
|
+
})()
|
|
122
|
+
|
|
123
|
+
if (musl) {
|
|
124
|
+
if (arch === "x64") {
|
|
125
|
+
if (baseline) return [`${base}-baseline-musl`, `${base}-musl`, `${base}-baseline`, base]
|
|
126
|
+
return [`${base}-musl`, `${base}-baseline-musl`, base, `${base}-baseline`]
|
|
127
|
+
}
|
|
128
|
+
return [`${base}-musl`, base]
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if (arch === "x64") {
|
|
132
|
+
if (baseline) return [`${base}-baseline`, base, `${base}-baseline-musl`, `${base}-musl`]
|
|
133
|
+
return [base, `${base}-baseline`, `${base}-musl`, `${base}-baseline-musl`]
|
|
134
|
+
}
|
|
135
|
+
return [base, `${base}-musl`]
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
if (arch === "x64") {
|
|
139
|
+
if (baseline) return [`${base}-baseline`, base]
|
|
140
|
+
return [base, `${base}-baseline`]
|
|
141
|
+
}
|
|
142
|
+
return [base]
|
|
143
|
+
})()
|
|
144
|
+
|
|
145
|
+
function findBinary(startDir) {
|
|
146
|
+
let current = startDir
|
|
147
|
+
for (;;) {
|
|
148
|
+
const modules = path.join(current, "node_modules")
|
|
149
|
+
if (fs.existsSync(modules)) {
|
|
150
|
+
for (const name of names) {
|
|
151
|
+
const candidate = path.join(modules, name, "bin", binary)
|
|
152
|
+
if (fs.existsSync(candidate)) return candidate
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
const parent = path.dirname(current)
|
|
156
|
+
if (parent === current) {
|
|
157
|
+
return
|
|
158
|
+
}
|
|
159
|
+
current = parent
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
const resolved = findBinary(scriptDir)
|
|
164
|
+
if (!resolved) {
|
|
165
|
+
console.error(
|
|
166
|
+
"It seems that your package manager failed to install the right version of the Newtype CLI for your platform. You can try manually installing " +
|
|
167
|
+
names.map((n) => `\"${n}\"`).join(" or ") +
|
|
168
|
+
" package",
|
|
169
|
+
)
|
|
170
|
+
process.exit(1)
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
run(resolved)
|
package/package.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@newtype-os/cli",
|
|
3
|
+
"bin": {
|
|
4
|
+
"nt": "./bin/nt"
|
|
5
|
+
},
|
|
6
|
+
"scripts": {
|
|
7
|
+
"postinstall": "bun ./postinstall.mjs || node ./postinstall.mjs"
|
|
8
|
+
},
|
|
9
|
+
"version": "0.0.0-main-202603270907",
|
|
10
|
+
"license": "MIT",
|
|
11
|
+
"optionalDependencies": {
|
|
12
|
+
"@newtype-os/cli-linux-x64-baseline-musl": "0.0.0-main-202603270907",
|
|
13
|
+
"@newtype-os/cli-darwin-x64": "0.0.0-main-202603270907",
|
|
14
|
+
"@newtype-os/cli-linux-x64-musl": "0.0.0-main-202603270907",
|
|
15
|
+
"@newtype-os/cli-linux-x64": "0.0.0-main-202603270907",
|
|
16
|
+
"@newtype-os/cli-windows-x64-baseline": "0.0.0-main-202603270907",
|
|
17
|
+
"@newtype-os/cli-darwin-arm64": "0.0.0-main-202603270907",
|
|
18
|
+
"@newtype-os/cli-darwin-x64-baseline": "0.0.0-main-202603270907",
|
|
19
|
+
"@newtype-os/cli-linux-arm64-musl": "0.0.0-main-202603270907",
|
|
20
|
+
"@newtype-os/cli-linux-arm64": "0.0.0-main-202603270907",
|
|
21
|
+
"@newtype-os/cli-windows-x64": "0.0.0-main-202603270907",
|
|
22
|
+
"@newtype-os/cli-linux-x64-baseline": "0.0.0-main-202603270907"
|
|
23
|
+
}
|
|
24
|
+
}
|
package/postinstall.mjs
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import fs from "fs"
|
|
4
|
+
import path from "path"
|
|
5
|
+
import os from "os"
|
|
6
|
+
import { fileURLToPath } from "url"
|
|
7
|
+
import { createRequire } from "module"
|
|
8
|
+
|
|
9
|
+
const __dirname = path.dirname(fileURLToPath(import.meta.url))
|
|
10
|
+
const require = createRequire(import.meta.url)
|
|
11
|
+
|
|
12
|
+
function detectPlatformAndArch() {
|
|
13
|
+
// Map platform names
|
|
14
|
+
let platform
|
|
15
|
+
switch (os.platform()) {
|
|
16
|
+
case "darwin":
|
|
17
|
+
platform = "darwin"
|
|
18
|
+
break
|
|
19
|
+
case "linux":
|
|
20
|
+
platform = "linux"
|
|
21
|
+
break
|
|
22
|
+
case "win32":
|
|
23
|
+
platform = "windows"
|
|
24
|
+
break
|
|
25
|
+
default:
|
|
26
|
+
platform = os.platform()
|
|
27
|
+
break
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// Map architecture names
|
|
31
|
+
let arch
|
|
32
|
+
switch (os.arch()) {
|
|
33
|
+
case "x64":
|
|
34
|
+
arch = "x64"
|
|
35
|
+
break
|
|
36
|
+
case "arm64":
|
|
37
|
+
arch = "arm64"
|
|
38
|
+
break
|
|
39
|
+
case "arm":
|
|
40
|
+
arch = "arm"
|
|
41
|
+
break
|
|
42
|
+
default:
|
|
43
|
+
arch = os.arch()
|
|
44
|
+
break
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
return { platform, arch }
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function findBinary() {
|
|
51
|
+
const { platform, arch } = detectPlatformAndArch()
|
|
52
|
+
const packageName = `@newtype-os/cli-${platform}-${arch}`
|
|
53
|
+
const binaryName = platform === "windows" ? "newtype.exe" : "newtype"
|
|
54
|
+
|
|
55
|
+
try {
|
|
56
|
+
// Use require.resolve to find the package
|
|
57
|
+
const packageJsonPath = require.resolve(`${packageName}/package.json`)
|
|
58
|
+
const packageDir = path.dirname(packageJsonPath)
|
|
59
|
+
const binaryPath = path.join(packageDir, "bin", binaryName)
|
|
60
|
+
|
|
61
|
+
if (!fs.existsSync(binaryPath)) {
|
|
62
|
+
throw new Error(`Binary not found at ${binaryPath}`)
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return { binaryPath, binaryName }
|
|
66
|
+
} catch (error) {
|
|
67
|
+
throw new Error(`Could not find package ${packageName}: ${error.message}`)
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
function prepareBinDirectory(binaryName) {
|
|
72
|
+
const binDir = path.join(__dirname, "bin")
|
|
73
|
+
const targetPath = path.join(binDir, binaryName)
|
|
74
|
+
|
|
75
|
+
// Ensure bin directory exists
|
|
76
|
+
if (!fs.existsSync(binDir)) {
|
|
77
|
+
fs.mkdirSync(binDir, { recursive: true })
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Remove existing binary/symlink if it exists
|
|
81
|
+
if (fs.existsSync(targetPath)) {
|
|
82
|
+
fs.unlinkSync(targetPath)
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return { binDir, targetPath }
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
function symlinkBinary(sourcePath, binaryName) {
|
|
89
|
+
const { targetPath } = prepareBinDirectory(binaryName)
|
|
90
|
+
|
|
91
|
+
fs.symlinkSync(sourcePath, targetPath)
|
|
92
|
+
console.log(`newtype binary symlinked: ${targetPath} -> ${sourcePath}`)
|
|
93
|
+
|
|
94
|
+
// Verify the file exists after operation
|
|
95
|
+
if (!fs.existsSync(targetPath)) {
|
|
96
|
+
throw new Error(`Failed to symlink binary to ${targetPath}`)
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
async function main() {
|
|
101
|
+
try {
|
|
102
|
+
if (os.platform() === "win32") {
|
|
103
|
+
// On Windows, the .exe is already included in the package and bin field points to it
|
|
104
|
+
// No binary postinstall setup needed — config creation still runs below
|
|
105
|
+
console.log("Windows detected: binary setup not needed (using packaged .exe)")
|
|
106
|
+
} else {
|
|
107
|
+
// On non-Windows platforms, just verify the binary package exists
|
|
108
|
+
// Don't replace the wrapper script - it handles binary execution
|
|
109
|
+
const { binaryPath } = findBinary()
|
|
110
|
+
console.log(`Platform binary verified at: ${binaryPath}`)
|
|
111
|
+
console.log("Wrapper script will handle binary execution")
|
|
112
|
+
}
|
|
113
|
+
} catch (error) {
|
|
114
|
+
console.error("Failed to setup newtype binary:", error.message)
|
|
115
|
+
console.error("This is not fatal - the wrapper script will attempt to find the binary at runtime.")
|
|
116
|
+
// Don't exit(1) - postinstall failure should not block installation
|
|
117
|
+
// The bin/newtype wrapper script handles binary resolution at runtime
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// Auto-create default config if it doesn't exist
|
|
121
|
+
try {
|
|
122
|
+
const configDir = path.join(os.homedir(), ".config", "newtype")
|
|
123
|
+
const configFile = path.join(configDir, "newtype-profile.json")
|
|
124
|
+
|
|
125
|
+
if (!fs.existsSync(configFile)) {
|
|
126
|
+
if (!fs.existsSync(configDir)) {
|
|
127
|
+
fs.mkdirSync(configDir, { recursive: true })
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
const defaultConfig = {
|
|
131
|
+
agents: {
|
|
132
|
+
chief: { model: "opencode/glm-4.7-free" },
|
|
133
|
+
researcher: { model: "opencode/glm-4.7-free" },
|
|
134
|
+
archivist: { model: "opencode/glm-4.7-free" },
|
|
135
|
+
writer: { model: "opencode/glm-4.7-free" },
|
|
136
|
+
},
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
fs.writeFileSync(configFile, JSON.stringify(defaultConfig, null, 2) + "\n")
|
|
140
|
+
console.log(`Default config created at: ${configFile}`)
|
|
141
|
+
} else {
|
|
142
|
+
console.log(`Config already exists at: ${configFile}`)
|
|
143
|
+
}
|
|
144
|
+
} catch (error) {
|
|
145
|
+
console.error("Failed to create default config:", error.message)
|
|
146
|
+
// Non-fatal - user can create config manually
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// Auto-inject SKILL.md into detected global AI agent directories
|
|
150
|
+
try {
|
|
151
|
+
// In dist package: skill-content.ts is next to postinstall.mjs
|
|
152
|
+
// In dev: it's at ../src/cli/cmd/skill-content.ts relative to script/
|
|
153
|
+
const candidates = [
|
|
154
|
+
path.join(__dirname, "skill-content.ts"),
|
|
155
|
+
path.join(__dirname, "..", "src", "cli", "cmd", "skill-content.ts"),
|
|
156
|
+
]
|
|
157
|
+
const skillSrc = candidates.find((p) => fs.existsSync(p))
|
|
158
|
+
if (skillSrc) {
|
|
159
|
+
const raw = fs.readFileSync(skillSrc, "utf-8")
|
|
160
|
+
const match = raw.match(/export const skillContent = `([\s\S]*?)`/)
|
|
161
|
+
if (match) {
|
|
162
|
+
const content = match[1]
|
|
163
|
+
const home = os.homedir()
|
|
164
|
+
const agents = [
|
|
165
|
+
{
|
|
166
|
+
name: "claude-code",
|
|
167
|
+
root: path.join(home, ".claude"),
|
|
168
|
+
dir: path.join(home, ".claude", "skills", "newtype"),
|
|
169
|
+
file: "SKILL.md",
|
|
170
|
+
},
|
|
171
|
+
{
|
|
172
|
+
name: "codex",
|
|
173
|
+
root: path.join(home, ".codex"),
|
|
174
|
+
dir: path.join(home, ".codex", "skills", "newtype"),
|
|
175
|
+
file: "SKILL.md",
|
|
176
|
+
},
|
|
177
|
+
{
|
|
178
|
+
name: "goose",
|
|
179
|
+
root: path.join(home, ".config", "goose"),
|
|
180
|
+
dir: path.join(home, ".config", "goose", "skills", "newtype"),
|
|
181
|
+
file: "SKILL.md",
|
|
182
|
+
},
|
|
183
|
+
{
|
|
184
|
+
name: "cursor",
|
|
185
|
+
root: path.join(home, ".cursor"),
|
|
186
|
+
dir: path.join(home, ".cursor", "rules"),
|
|
187
|
+
file: "newtype.mdc",
|
|
188
|
+
},
|
|
189
|
+
{
|
|
190
|
+
name: "augment",
|
|
191
|
+
root: path.join(home, ".augment"),
|
|
192
|
+
dir: path.join(home, ".augment", "rules"),
|
|
193
|
+
file: "newtype.md",
|
|
194
|
+
},
|
|
195
|
+
]
|
|
196
|
+
let injected = 0
|
|
197
|
+
for (const agent of agents) {
|
|
198
|
+
if (!fs.existsSync(agent.root)) continue
|
|
199
|
+
const target = path.join(agent.dir, agent.file)
|
|
200
|
+
if (fs.existsSync(target)) continue // Don't overwrite existing
|
|
201
|
+
fs.mkdirSync(agent.dir, { recursive: true })
|
|
202
|
+
if (agent.name === "cursor") {
|
|
203
|
+
const mdc = [
|
|
204
|
+
"---",
|
|
205
|
+
"description: Newtype CLI multi-agent content team",
|
|
206
|
+
"globs: ",
|
|
207
|
+
"alwaysApply: true",
|
|
208
|
+
"---",
|
|
209
|
+
"",
|
|
210
|
+
content,
|
|
211
|
+
].join("\n")
|
|
212
|
+
fs.writeFileSync(target, mdc)
|
|
213
|
+
} else {
|
|
214
|
+
fs.writeFileSync(target, content)
|
|
215
|
+
}
|
|
216
|
+
injected++
|
|
217
|
+
}
|
|
218
|
+
if (injected > 0) {
|
|
219
|
+
console.log(`Newtype skill injected into ${injected} agent(s)`)
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
} catch (error) {
|
|
224
|
+
console.error("Failed to inject skills:", error.message)
|
|
225
|
+
// Non-fatal
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
main().catch((error) => {
|
|
230
|
+
console.error("Postinstall script error:", error.message)
|
|
231
|
+
// Exit 0 so installation is not blocked
|
|
232
|
+
process.exit(0)
|
|
233
|
+
})
|
package/skill-content.ts
ADDED
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
export const skillContent = `# newtype CLI — Multi-Agent Content Team
|
|
2
|
+
|
|
3
|
+
Binary: \`nt\` | Package: \`npm i -g @newtype-os/cli\`
|
|
4
|
+
|
|
5
|
+
8 specialized AI agents (researcher, writer, editor, fact-checker, extractor, analyst, archivist, chief) accessible as non-interactive CLI commands. Designed for programmatic invocation by other AI agents.
|
|
6
|
+
|
|
7
|
+
## When to Use
|
|
8
|
+
|
|
9
|
+
- Deep research on a topic with source verification
|
|
10
|
+
- Generate newsletters, essays, reports, technical docs
|
|
11
|
+
- Polish existing content (structure → paragraph → sentence → word)
|
|
12
|
+
- Fact-check claims or articles
|
|
13
|
+
- Extract structured content from PDF, images, web pages
|
|
14
|
+
- Framework-based analysis (SWOT, first-principles, PESTEL, etc.)
|
|
15
|
+
- Full research-to-publish pipeline orchestration
|
|
16
|
+
- Store/retrieve knowledge across sessions
|
|
17
|
+
|
|
18
|
+
## Installation
|
|
19
|
+
|
|
20
|
+
\`\`\`bash
|
|
21
|
+
npm i -g @newtype-os/cli
|
|
22
|
+
# or
|
|
23
|
+
bun i -g @newtype-os/cli
|
|
24
|
+
\`\`\`
|
|
25
|
+
|
|
26
|
+
Verify: \`nt --version\`
|
|
27
|
+
|
|
28
|
+
## Output Modes
|
|
29
|
+
|
|
30
|
+
| Flag | Behavior | Use When |
|
|
31
|
+
| ----------- | ----------------------- | -------------------------------------- |
|
|
32
|
+
| (none) | Plain text to stdout | Human reading, simple piping |
|
|
33
|
+
| \`--json\` | JSON envelope to stdout | Agent parsing results programmatically |
|
|
34
|
+
| \`--stream\` | NDJSON event stream | Real-time progress monitoring |
|
|
35
|
+
| \`-o <file>\` | Write result to file | Saving output, pipeline steps |
|
|
36
|
+
|
|
37
|
+
### JSON Envelope Schema
|
|
38
|
+
|
|
39
|
+
\`\`\`json
|
|
40
|
+
{
|
|
41
|
+
"success": true,
|
|
42
|
+
"result": "... the text output ...",
|
|
43
|
+
"metadata": {
|
|
44
|
+
"agent": "researcher",
|
|
45
|
+
"sessionID": "...",
|
|
46
|
+
"duration_ms": 45000,
|
|
47
|
+
"tools": [{ "name": "web_search", "title": "..." }]
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
\`\`\`
|
|
51
|
+
|
|
52
|
+
On error:
|
|
53
|
+
|
|
54
|
+
\`\`\`json
|
|
55
|
+
{
|
|
56
|
+
"success": false,
|
|
57
|
+
"error": { "code": 2, "message": "..." },
|
|
58
|
+
"metadata": {
|
|
59
|
+
"agent": "researcher",
|
|
60
|
+
"sessionID": "...",
|
|
61
|
+
"duration_ms": 12000,
|
|
62
|
+
"tools": []
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
\`\`\`
|
|
66
|
+
|
|
67
|
+
## Exit Codes
|
|
68
|
+
|
|
69
|
+
| Code | Meaning |
|
|
70
|
+
| ---- | -------------------- |
|
|
71
|
+
| 0 | Success |
|
|
72
|
+
| 1 | Input/argument error |
|
|
73
|
+
| 2 | Model call failed |
|
|
74
|
+
| 3 | Timeout |
|
|
75
|
+
| 4 | Config/auth error |
|
|
76
|
+
|
|
77
|
+
## Input Methods (priority high → low)
|
|
78
|
+
|
|
79
|
+
1. Positional text: \`nt research AI Agent architectures\`
|
|
80
|
+
2. \`--input <file>\` — read from file
|
|
81
|
+
3. \`--topic <text>\` — explicit topic string
|
|
82
|
+
4. stdin pipe: \`cat notes.md | nt write\`
|
|
83
|
+
|
|
84
|
+
## Global Flags
|
|
85
|
+
|
|
86
|
+
| Flag | Short | Description |
|
|
87
|
+
| ----------------- | ----- | ----------------------------------------- |
|
|
88
|
+
| \`--output <path>\` | \`-o\` | Output file (default: stdout) |
|
|
89
|
+
| \`--json\` | | JSON envelope output |
|
|
90
|
+
| \`--stream\` | | NDJSON event stream |
|
|
91
|
+
| \`--verbose\` | | Show tool execution progress |
|
|
92
|
+
| \`--quiet\` | \`-q\` | Suppress non-result output |
|
|
93
|
+
| \`--model <p/m>\` | \`-m\` | Override model (format: \`provider/model\`) |
|
|
94
|
+
| \`--lang <code>\` | | Output language: \`zh\`, \`en\`, \`ja\`, etc. |
|
|
95
|
+
| \`--timeout <sec>\` | \`-t\` | Timeout in seconds (default: 300) |
|
|
96
|
+
| \`--input <file>\` | | Input file path |
|
|
97
|
+
| \`--topic <text>\` | | Topic text |
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## Commands
|
|
102
|
+
|
|
103
|
+
### \`nt research [topic..]\` — Topic Research
|
|
104
|
+
|
|
105
|
+
Agent: researcher (+ fact-checker by default)
|
|
106
|
+
|
|
107
|
+
\`\`\`bash
|
|
108
|
+
# Basic
|
|
109
|
+
nt research "AI Agent architectures 2026" -o research.md
|
|
110
|
+
|
|
111
|
+
# Deep research with focus
|
|
112
|
+
nt research "MCP vs CLI" --depth deep --focus "developer experience,adoption" -o deep.md
|
|
113
|
+
|
|
114
|
+
# JSON output for programmatic consumption
|
|
115
|
+
nt research "Kubernetes security" --json
|
|
116
|
+
|
|
117
|
+
# Skip fact-checking for speed
|
|
118
|
+
nt research "quick overview of RAG" --no-fact-check -o quick.md
|
|
119
|
+
|
|
120
|
+
# Pipe research into writing
|
|
121
|
+
nt research "AI trends" -o /tmp/r.md && nt write --input /tmp/r.md -o draft.md
|
|
122
|
+
\`\`\`
|
|
123
|
+
|
|
124
|
+
| Flag | Default | Description |
|
|
125
|
+
| -------------------- | -------- | ------------------------------- |
|
|
126
|
+
| \`--depth <level>\` | \`normal\` | \`shallow\` / \`normal\` / \`deep\` |
|
|
127
|
+
| \`--sources <n>\` | \`5\` | Expected number of sources |
|
|
128
|
+
| \`--focus <keywords>\` | | Focus keywords, comma-separated |
|
|
129
|
+
| \`--no-fact-check\` | | Skip fact-checking step |
|
|
130
|
+
|
|
131
|
+
### \`nt write [topic..]\` — Content Generation
|
|
132
|
+
|
|
133
|
+
Agent: writer
|
|
134
|
+
|
|
135
|
+
\`\`\`bash
|
|
136
|
+
# From research material
|
|
137
|
+
nt write --input research.md --style newsletter -o draft.md
|
|
138
|
+
|
|
139
|
+
# From scratch with topic
|
|
140
|
+
nt write "Why CLI is the native language of AI Agents" --style essay --words 2000 -o article.md
|
|
141
|
+
|
|
142
|
+
# Specify methodology and tone
|
|
143
|
+
nt write --input notes.md --method AIDA --tone professional --audience "CTOs and VPs of Engineering" -o post.md
|
|
144
|
+
\`\`\`
|
|
145
|
+
|
|
146
|
+
| Flag | Default | Description |
|
|
147
|
+
| ------------------- | ------- | --------------------------------------------------------------------------- |
|
|
148
|
+
| \`--style <s>\` | | \`newsletter\` / \`essay\` / \`report\` / \`tweet-thread\` / \`technical\` / \`story\` |
|
|
149
|
+
| \`--method <m>\` | | \`WRITE\` / \`AIDA\` / \`PAS\` / \`STORYTELLING\` / \`ANALYTICAL\` / \`CONVERSATIONAL\` |
|
|
150
|
+
| \`--words <n>\` | \`1500\` | Target word count |
|
|
151
|
+
| \`--tone <t>\` | | \`professional\` / \`casual\` / \`academic\` / \`provocative\` |
|
|
152
|
+
| \`--audience <desc>\` | | Target audience description |
|
|
153
|
+
|
|
154
|
+
### \`nt edit [file]\` — Content Editing
|
|
155
|
+
|
|
156
|
+
Agent: editor. Four-layer refinement: structure → paragraph → sentence → word.
|
|
157
|
+
|
|
158
|
+
\`\`\`bash
|
|
159
|
+
# Full edit
|
|
160
|
+
nt edit --input draft.md -o final.md
|
|
161
|
+
|
|
162
|
+
# Structure-only pass
|
|
163
|
+
nt edit --input draft.md --layer structure -o restructured.md
|
|
164
|
+
|
|
165
|
+
# Preserve voice, only fix grammar
|
|
166
|
+
nt edit --input draft.md --preserve-voice -o polished.md
|
|
167
|
+
|
|
168
|
+
# Pipe from stdin
|
|
169
|
+
cat draft.md | nt edit -o final.md
|
|
170
|
+
|
|
171
|
+
# Edit with diff output
|
|
172
|
+
nt edit --input draft.md --diff -o final.md
|
|
173
|
+
\`\`\`
|
|
174
|
+
|
|
175
|
+
| Flag | Default | Description |
|
|
176
|
+
| -------------------------- | ------- | ------------------------------------------------------- |
|
|
177
|
+
| \`--layer <l>\` | \`all\` | \`structure\` / \`paragraph\` / \`sentence\` / \`word\` / \`all\` |
|
|
178
|
+
| \`--tone <t>\` | | Target tone |
|
|
179
|
+
| \`--target-audience <desc>\` | | Target audience |
|
|
180
|
+
| \`--preserve-voice\` | | Keep original voice, fix grammar/logic only |
|
|
181
|
+
| \`--diff\` | | Include diff showing changes |
|
|
182
|
+
|
|
183
|
+
### \`nt fact-check [topic..]\` — Fact Verification
|
|
184
|
+
|
|
185
|
+
Agent: fact-checker
|
|
186
|
+
|
|
187
|
+
\`\`\`bash
|
|
188
|
+
# Check an article
|
|
189
|
+
nt fact-check --input article.md -o report.md
|
|
190
|
+
|
|
191
|
+
# Check a single claim
|
|
192
|
+
nt fact-check --claim "GPT-4 has 1.8 trillion parameters" --json
|
|
193
|
+
|
|
194
|
+
# Strict mode with extra cross-verification
|
|
195
|
+
nt fact-check --input report.md --strict -o verified.md
|
|
196
|
+
\`\`\`
|
|
197
|
+
|
|
198
|
+
| Flag | Description |
|
|
199
|
+
| ---------------- | ------------------------------------ |
|
|
200
|
+
| \`--claim <text>\` | Verify a single claim |
|
|
201
|
+
| \`--strict\` | Strict mode — more cross-referencing |
|
|
202
|
+
|
|
203
|
+
### \`nt analyze [topic..]\` — Framework Analysis
|
|
204
|
+
|
|
205
|
+
Agent: chief + researcher. Supports 12 analysis frameworks.
|
|
206
|
+
|
|
207
|
+
\`\`\`bash
|
|
208
|
+
# SWOT analysis
|
|
209
|
+
nt analyze --input research.md --framework swot -o analysis.md
|
|
210
|
+
|
|
211
|
+
# First principles
|
|
212
|
+
nt analyze "Why are all tools moving to CLI" --framework first-principles -o analysis.md
|
|
213
|
+
|
|
214
|
+
# Multiple frameworks combined
|
|
215
|
+
nt analyze --input data.md --framework "swot,pestel" -o combo.md
|
|
216
|
+
|
|
217
|
+
# JSON output
|
|
218
|
+
nt analyze "React vs Vue market position" --framework porter --json
|
|
219
|
+
\`\`\`
|
|
220
|
+
|
|
221
|
+
Available frameworks: \`swot\`, \`pestel\`, \`porter\`, \`first-principles\`, \`5why\`, \`jobs-to-be-done\`, \`blue-ocean\`, \`value-chain\`, \`bcg-matrix\`, \`ansoff\`, \`okr\`, \`systems-thinking\`
|
|
222
|
+
|
|
223
|
+
| Flag | Description |
|
|
224
|
+
| -------------------- | ----------------------------- |
|
|
225
|
+
| \`--framework <name>\` | Framework(s), comma-separated |
|
|
226
|
+
|
|
227
|
+
### \`nt extract [topic..]\` — Content Extraction
|
|
228
|
+
|
|
229
|
+
Agent: extractor. Extract structured content from documents, images, or web pages.
|
|
230
|
+
|
|
231
|
+
\`\`\`bash
|
|
232
|
+
# Extract from file
|
|
233
|
+
nt extract --input paper.pdf -o extracted.md
|
|
234
|
+
|
|
235
|
+
# Extract from URL
|
|
236
|
+
nt extract --url "https://example.com/article" -o page.md
|
|
237
|
+
|
|
238
|
+
# Prioritize tables
|
|
239
|
+
nt extract --input report.pdf --extract-tables --json
|
|
240
|
+
\`\`\`
|
|
241
|
+
|
|
242
|
+
| Flag | Description |
|
|
243
|
+
| ------------------ | ---------------------------------- |
|
|
244
|
+
| \`--url <url>\` | Web page URL to extract from |
|
|
245
|
+
| \`--extract-tables\` | Prioritize table extraction |
|
|
246
|
+
| \`--extract-images\` | Extract and describe image content |
|
|
247
|
+
|
|
248
|
+
### \`nt archive <action>\` — Knowledge Base
|
|
249
|
+
|
|
250
|
+
Agent: archivist. Subcommands: \`store\`, \`search\`, \`list\`, \`delete\`.
|
|
251
|
+
|
|
252
|
+
\`\`\`bash
|
|
253
|
+
# Store content with tags
|
|
254
|
+
nt archive store --input research.md --tags "AI,Agent,2026"
|
|
255
|
+
|
|
256
|
+
# Semantic search
|
|
257
|
+
nt archive search --query "difference between MCP and CLI" --top 5 -o results.md
|
|
258
|
+
|
|
259
|
+
# List all entries (filtered by tag)
|
|
260
|
+
nt archive list --tags "AI" --json
|
|
261
|
+
|
|
262
|
+
# Delete entry
|
|
263
|
+
nt archive delete --id "entry-123"
|
|
264
|
+
\`\`\`
|
|
265
|
+
|
|
266
|
+
| Flag | Description |
|
|
267
|
+
| ---------------- | ------------------------------------- |
|
|
268
|
+
| \`--tags <csv>\` | Tags, comma-separated (store/list) |
|
|
269
|
+
| \`--query <text>\` | Search query (search, required) |
|
|
270
|
+
| \`--top <n>\` | Number of search results (default: 5) |
|
|
271
|
+
| \`--id <id>\` | Entry ID (delete, required) |
|
|
272
|
+
|
|
273
|
+
### \`nt pipeline [topic..]\` — Full Workflow
|
|
274
|
+
|
|
275
|
+
Agent: chief orchestrating all agents. Default steps: research → analyze → write → fact-check → edit.
|
|
276
|
+
|
|
277
|
+
\`\`\`bash
|
|
278
|
+
# Full pipeline from topic
|
|
279
|
+
nt pipeline "AI Agent trends 2026" --style newsletter --output-dir ./output/
|
|
280
|
+
|
|
281
|
+
# Custom steps
|
|
282
|
+
nt pipeline "MCP future" --steps "research,write,edit" -o result.md
|
|
283
|
+
|
|
284
|
+
# From existing material (skip research)
|
|
285
|
+
nt pipeline --input notes.md --steps "analyze,write,edit" -o final.md
|
|
286
|
+
|
|
287
|
+
# JSON output
|
|
288
|
+
nt pipeline "topic" --json
|
|
289
|
+
\`\`\`
|
|
290
|
+
|
|
291
|
+
| Flag | Default | Description |
|
|
292
|
+
| -------------------- | ---------------------------------------- | --------------------------------------------- |
|
|
293
|
+
| \`--steps <csv>\` | \`research,analyze,write,fact-check,edit\` | Workflow steps |
|
|
294
|
+
| \`--style <s>\` | | Final output writing style |
|
|
295
|
+
| \`--output-dir <dir>\` | | Output directory (one file per step) |
|
|
296
|
+
| \`--parallel\` | | Allow parallel execution of independent steps |
|
|
297
|
+
|
|
298
|
+
Default timeout: 600s (vs 300s for other commands).
|
|
299
|
+
|
|
300
|
+
---
|
|
301
|
+
|
|
302
|
+
## Pipeline Composition Patterns
|
|
303
|
+
|
|
304
|
+
Chain commands with \`&&\` for multi-step workflows:
|
|
305
|
+
|
|
306
|
+
\`\`\`bash
|
|
307
|
+
# Research → Write → Edit
|
|
308
|
+
nt research "AI Agents" -o /tmp/r.md \\
|
|
309
|
+
&& nt write --input /tmp/r.md --style newsletter -o /tmp/d.md \\
|
|
310
|
+
&& nt edit --input /tmp/d.md -o final.md
|
|
311
|
+
|
|
312
|
+
# Extract → Fact-check → Archive
|
|
313
|
+
nt extract --input paper.pdf -o /tmp/e.md \\
|
|
314
|
+
&& nt fact-check --input /tmp/e.md -o /tmp/fc.md \\
|
|
315
|
+
&& nt archive store --input /tmp/fc.md --tags "paper,verified"
|
|
316
|
+
|
|
317
|
+
# Research with JSON for downstream parsing
|
|
318
|
+
nt research "topic" --json | jq '.result' > research.txt
|
|
319
|
+
\`\`\`
|
|
320
|
+
|
|
321
|
+
## Agent Integration Examples
|
|
322
|
+
|
|
323
|
+
For AI agents (Claude Code, Cursor, etc.) calling \`nt\` programmatically:
|
|
324
|
+
|
|
325
|
+
\`\`\`bash
|
|
326
|
+
# Get structured output
|
|
327
|
+
result=$(nt research "Kubernetes best practices" --json)
|
|
328
|
+
echo "$result" | jq -r '.result' # extract text
|
|
329
|
+
echo "$result" | jq '.success' # check success
|
|
330
|
+
echo "$result" | jq '.metadata' # get metadata
|
|
331
|
+
|
|
332
|
+
# Check exit code
|
|
333
|
+
nt research "topic" --json -o out.json
|
|
334
|
+
if [ $? -eq 0 ]; then
|
|
335
|
+
echo "Success"
|
|
336
|
+
elif [ $? -eq 2 ]; then
|
|
337
|
+
echo "Model call failed, retry"
|
|
338
|
+
elif [ $? -eq 3 ]; then
|
|
339
|
+
echo "Timeout, increase with --timeout"
|
|
340
|
+
fi
|
|
341
|
+
|
|
342
|
+
# Write to file and verify
|
|
343
|
+
nt write "topic" -o draft.md && test -f draft.md && echo "File created"
|
|
344
|
+
\`\`\`
|
|
345
|
+
|
|
346
|
+
## Command → Agent Mapping
|
|
347
|
+
|
|
348
|
+
| Command | Agent(s) | Description |
|
|
349
|
+
| --------------- | --------------------------- | -------------------------------------- |
|
|
350
|
+
| \`nt research\` | researcher (+ fact-checker) | Deep research with source verification |
|
|
351
|
+
| \`nt write\` | writer | Multi-style content generation |
|
|
352
|
+
| \`nt edit\` | editor | Four-layer content refinement |
|
|
353
|
+
| \`nt fact-check\` | fact-checker | Claim and source verification |
|
|
354
|
+
| \`nt analyze\` | chief + researcher | 12 analysis frameworks |
|
|
355
|
+
| \`nt extract\` | extractor | Document/image/web extraction |
|
|
356
|
+
| \`nt archive\` | archivist | Knowledge base CRUD |
|
|
357
|
+
| \`nt pipeline\` | chief → all | Full orchestration pipeline |
|
|
358
|
+
`
|