@better-openclaw/core 1.0.13 → 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/composer.mjs.map +1 -1
- package/dist/generate.d.mts.map +1 -1
- package/dist/generate.mjs +11 -0
- package/dist/generate.mjs.map +1 -1
- package/dist/generators/env.d.mts.map +1 -1
- package/dist/generators/env.mjs +21 -1
- package/dist/generators/env.mjs.map +1 -1
- package/dist/generators/get-shit-done.d.mts +10 -0
- package/dist/generators/get-shit-done.d.mts.map +1 -0
- package/dist/generators/get-shit-done.mjs +38 -0
- package/dist/generators/get-shit-done.mjs.map +1 -0
- package/dist/generators/openclaw-json.d.mts +11 -0
- package/dist/generators/openclaw-json.d.mts.map +1 -0
- package/dist/generators/openclaw-json.mjs +410 -0
- package/dist/generators/openclaw-json.mjs.map +1 -0
- package/dist/index.d.mts +2 -2
- package/dist/resolver.mjs +3 -1
- package/dist/resolver.mjs.map +1 -1
- package/dist/schema.d.mts +81 -1
- package/dist/schema.d.mts.map +1 -1
- package/dist/schema.mjs +28 -2
- package/dist/schema.mjs.map +1 -1
- package/dist/services/definitions/pentagi.mjs.map +1 -1
- package/dist/services/definitions/pentestagent.mjs.map +1 -1
- package/dist/types.d.mts +6 -2
- package/dist/types.d.mts.map +1 -1
- package/dist/types.mjs.map +1 -1
- package/package.json +1 -1
- package/src/composer.ts +1 -1
- package/src/generate.ts +15 -0
- package/src/generators/env.ts +26 -0
- package/src/generators/get-shit-done.ts +43 -0
- package/src/generators/openclaw-json.ts +406 -0
- package/src/index.ts +2 -0
- package/src/resolver.ts +10 -8
- package/src/schema.ts +23 -0
- package/src/services/definitions/pentagi.ts +2 -1
- package/src/services/definitions/pentestagent.ts +2 -1
- package/src/types.ts +6 -0
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
import type { AiProvider, ResolverOutput } from "../types.js";
|
|
2
|
+
|
|
3
|
+
const PROVIDER_CONFIGS: Record<AiProvider, any> = {
|
|
4
|
+
openai: {
|
|
5
|
+
baseUrl: "https://api.openai.com/v1",
|
|
6
|
+
api: "openai-completions",
|
|
7
|
+
auth: "api-key",
|
|
8
|
+
apiKey: "${OPENAI_API_KEY}",
|
|
9
|
+
models: [
|
|
10
|
+
{
|
|
11
|
+
id: "gpt-4o",
|
|
12
|
+
name: "GPT-4o",
|
|
13
|
+
api: "openai-completions",
|
|
14
|
+
reasoning: false,
|
|
15
|
+
input: ["text", "image"],
|
|
16
|
+
cost: { input: 2.5, output: 10, cacheRead: 1.25, cacheWrite: 2.5 },
|
|
17
|
+
contextWindow: 128000,
|
|
18
|
+
maxTokens: 16384,
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
id: "gpt-4o-mini",
|
|
22
|
+
name: "GPT-4o Mini",
|
|
23
|
+
api: "openai-completions",
|
|
24
|
+
reasoning: false,
|
|
25
|
+
input: ["text", "image"],
|
|
26
|
+
cost: { input: 0.15, output: 0.6, cacheRead: 0.075, cacheWrite: 0.15 },
|
|
27
|
+
contextWindow: 128000,
|
|
28
|
+
maxTokens: 16384,
|
|
29
|
+
},
|
|
30
|
+
],
|
|
31
|
+
},
|
|
32
|
+
anthropic: {
|
|
33
|
+
baseUrl: "https://api.anthropic.com/v1/messages",
|
|
34
|
+
api: "anthropic-messages",
|
|
35
|
+
auth: "api-key",
|
|
36
|
+
apiKey: "${ANTHROPIC_API_KEY}",
|
|
37
|
+
models: [
|
|
38
|
+
{
|
|
39
|
+
id: "claude-3-5-sonnet-latest",
|
|
40
|
+
name: "Claude 3.5 Sonnet",
|
|
41
|
+
api: "anthropic-messages",
|
|
42
|
+
reasoning: false,
|
|
43
|
+
input: ["text", "image"],
|
|
44
|
+
cost: { input: 3.0, output: 15.0, cacheRead: 0.3, cacheWrite: 3.75 },
|
|
45
|
+
contextWindow: 200000,
|
|
46
|
+
maxTokens: 8192,
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
id: "claude-3-5-haiku-latest",
|
|
50
|
+
name: "Claude 3.5 Haiku",
|
|
51
|
+
api: "anthropic-messages",
|
|
52
|
+
reasoning: false,
|
|
53
|
+
input: ["text", "image"],
|
|
54
|
+
cost: { input: 0.8, output: 4.0, cacheRead: 0.08, cacheWrite: 1.0 },
|
|
55
|
+
contextWindow: 200000,
|
|
56
|
+
maxTokens: 8192,
|
|
57
|
+
},
|
|
58
|
+
],
|
|
59
|
+
},
|
|
60
|
+
google: {
|
|
61
|
+
baseUrl: "https://generativelanguage.googleapis.com/v1beta/openai",
|
|
62
|
+
api: "openai-completions",
|
|
63
|
+
auth: "api-key",
|
|
64
|
+
apiKey: "${GOOGLE_API_KEY}",
|
|
65
|
+
models: [
|
|
66
|
+
{
|
|
67
|
+
id: "gemini-2.5-pro",
|
|
68
|
+
name: "Gemini 2.5 Pro",
|
|
69
|
+
api: "openai-completions",
|
|
70
|
+
reasoning: false,
|
|
71
|
+
input: ["text", "image"],
|
|
72
|
+
cost: { input: 2.0, output: 8.0, cacheRead: 0.5, cacheWrite: 2.0 },
|
|
73
|
+
contextWindow: 2000000,
|
|
74
|
+
maxTokens: 8192,
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
id: "gemini-2.5-flash",
|
|
78
|
+
name: "Gemini 2.5 Flash",
|
|
79
|
+
api: "openai-completions",
|
|
80
|
+
reasoning: false,
|
|
81
|
+
input: ["text", "image"],
|
|
82
|
+
cost: { input: 0.15, output: 0.6, cacheRead: 0.0375, cacheWrite: 0.15 },
|
|
83
|
+
contextWindow: 1000000,
|
|
84
|
+
maxTokens: 8192,
|
|
85
|
+
},
|
|
86
|
+
],
|
|
87
|
+
},
|
|
88
|
+
xai: {
|
|
89
|
+
baseUrl: "https://api.x.ai/v1",
|
|
90
|
+
api: "openai-completions",
|
|
91
|
+
auth: "api-key",
|
|
92
|
+
apiKey: "${XAI_API_KEY}",
|
|
93
|
+
models: [
|
|
94
|
+
{
|
|
95
|
+
id: "grok-2-latest",
|
|
96
|
+
name: "Grok 2",
|
|
97
|
+
api: "openai-completions",
|
|
98
|
+
reasoning: false,
|
|
99
|
+
input: ["text", "image"],
|
|
100
|
+
cost: { input: 2.0, output: 10.0, cacheRead: 1.0, cacheWrite: 2.0 },
|
|
101
|
+
contextWindow: 131072,
|
|
102
|
+
maxTokens: 32768,
|
|
103
|
+
},
|
|
104
|
+
],
|
|
105
|
+
},
|
|
106
|
+
deepseek: {
|
|
107
|
+
baseUrl: "https://api.deepseek.com/v1",
|
|
108
|
+
api: "openai-completions",
|
|
109
|
+
auth: "api-key",
|
|
110
|
+
apiKey: "${DEEPSEEK_API_KEY}",
|
|
111
|
+
models: [
|
|
112
|
+
{
|
|
113
|
+
id: "deepseek-chat",
|
|
114
|
+
name: "DeepSeek V3",
|
|
115
|
+
api: "openai-completions",
|
|
116
|
+
reasoning: false,
|
|
117
|
+
input: ["text"],
|
|
118
|
+
cost: { input: 0.14, output: 0.28, cacheRead: 0.014, cacheWrite: 0.14 },
|
|
119
|
+
contextWindow: 65536,
|
|
120
|
+
maxTokens: 8192,
|
|
121
|
+
},
|
|
122
|
+
{
|
|
123
|
+
id: "deepseek-reasoner",
|
|
124
|
+
name: "DeepSeek R1",
|
|
125
|
+
api: "openai-completions",
|
|
126
|
+
reasoning: true,
|
|
127
|
+
input: ["text"],
|
|
128
|
+
cost: { input: 0.55, output: 2.19, cacheRead: 0.14, cacheWrite: 0.55 },
|
|
129
|
+
contextWindow: 65536,
|
|
130
|
+
maxTokens: 8192,
|
|
131
|
+
},
|
|
132
|
+
],
|
|
133
|
+
},
|
|
134
|
+
groq: {
|
|
135
|
+
baseUrl: "https://api.groq.com/openai/v1",
|
|
136
|
+
api: "openai-completions",
|
|
137
|
+
auth: "api-key",
|
|
138
|
+
apiKey: "${GROQ_API_KEY}",
|
|
139
|
+
models: [
|
|
140
|
+
{
|
|
141
|
+
id: "llama3-70b-8192",
|
|
142
|
+
name: "LLaMA3 70B (Groq)",
|
|
143
|
+
api: "openai-completions",
|
|
144
|
+
reasoning: false,
|
|
145
|
+
input: ["text"],
|
|
146
|
+
cost: { input: 0.59, output: 0.79 },
|
|
147
|
+
contextWindow: 8192,
|
|
148
|
+
maxTokens: 8192,
|
|
149
|
+
},
|
|
150
|
+
],
|
|
151
|
+
},
|
|
152
|
+
openrouter: {
|
|
153
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
154
|
+
api: "openai-completions",
|
|
155
|
+
auth: "api-key",
|
|
156
|
+
apiKey: "${OPENROUTER_API_KEY}",
|
|
157
|
+
models: [
|
|
158
|
+
{
|
|
159
|
+
id: "anthropic/claude-3.5-sonnet",
|
|
160
|
+
name: "Claude 3.5 Sonnet (OpenRouter)",
|
|
161
|
+
api: "openai-completions",
|
|
162
|
+
reasoning: false,
|
|
163
|
+
input: ["text", "image"],
|
|
164
|
+
cost: { input: 3.0, output: 15.0 },
|
|
165
|
+
contextWindow: 200000,
|
|
166
|
+
maxTokens: 8192,
|
|
167
|
+
},
|
|
168
|
+
],
|
|
169
|
+
},
|
|
170
|
+
mistral: {
|
|
171
|
+
baseUrl: "https://api.mistral.ai/v1",
|
|
172
|
+
api: "openai-completions",
|
|
173
|
+
auth: "api-key",
|
|
174
|
+
apiKey: "${MISTRAL_API_KEY}",
|
|
175
|
+
models: [
|
|
176
|
+
{
|
|
177
|
+
id: "mistral-large-latest",
|
|
178
|
+
name: "Mistral Large",
|
|
179
|
+
api: "openai-completions",
|
|
180
|
+
reasoning: false,
|
|
181
|
+
input: ["text"],
|
|
182
|
+
cost: { input: 2.0, output: 6.0 },
|
|
183
|
+
contextWindow: 131000,
|
|
184
|
+
maxTokens: 8192,
|
|
185
|
+
},
|
|
186
|
+
],
|
|
187
|
+
},
|
|
188
|
+
together: {
|
|
189
|
+
baseUrl: "https://api.together.xyz/v1",
|
|
190
|
+
api: "openai-completions",
|
|
191
|
+
auth: "api-key",
|
|
192
|
+
apiKey: "${TOGETHER_API_KEY}",
|
|
193
|
+
models: [
|
|
194
|
+
{
|
|
195
|
+
id: "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
196
|
+
name: "LLaMA 3.3 70B (Together)",
|
|
197
|
+
api: "openai-completions",
|
|
198
|
+
reasoning: false,
|
|
199
|
+
input: ["text"],
|
|
200
|
+
cost: { input: 0.88, output: 0.88 },
|
|
201
|
+
contextWindow: 131072,
|
|
202
|
+
maxTokens: 4096,
|
|
203
|
+
},
|
|
204
|
+
],
|
|
205
|
+
},
|
|
206
|
+
ollama: {
|
|
207
|
+
baseUrl: "http://host.docker.internal:11434/v1",
|
|
208
|
+
api: "openai-completions",
|
|
209
|
+
auth: "none",
|
|
210
|
+
models: [
|
|
211
|
+
{
|
|
212
|
+
id: "llama3:latest",
|
|
213
|
+
name: "LLaMA 3 (Local)",
|
|
214
|
+
api: "openai-completions",
|
|
215
|
+
reasoning: false,
|
|
216
|
+
input: ["text"],
|
|
217
|
+
cost: { input: 0, output: 0 },
|
|
218
|
+
contextWindow: 8192,
|
|
219
|
+
maxTokens: 4096,
|
|
220
|
+
},
|
|
221
|
+
{
|
|
222
|
+
id: "deepseek-r1:latest",
|
|
223
|
+
name: "DeepSeek R1 (Local)",
|
|
224
|
+
api: "openai-completions",
|
|
225
|
+
reasoning: true,
|
|
226
|
+
input: ["text"],
|
|
227
|
+
cost: { input: 0, output: 0 },
|
|
228
|
+
contextWindow: 8192,
|
|
229
|
+
maxTokens: 4096,
|
|
230
|
+
},
|
|
231
|
+
],
|
|
232
|
+
},
|
|
233
|
+
lmstudio: {
|
|
234
|
+
baseUrl: "http://host.docker.internal:1234/v1",
|
|
235
|
+
api: "openai-completions",
|
|
236
|
+
auth: "none",
|
|
237
|
+
models: [
|
|
238
|
+
{
|
|
239
|
+
id: "local-model",
|
|
240
|
+
name: "LM Studio Model",
|
|
241
|
+
api: "openai-completions",
|
|
242
|
+
reasoning: false,
|
|
243
|
+
input: ["text"],
|
|
244
|
+
cost: { input: 0, output: 0 },
|
|
245
|
+
contextWindow: 8192,
|
|
246
|
+
maxTokens: 4096,
|
|
247
|
+
},
|
|
248
|
+
],
|
|
249
|
+
},
|
|
250
|
+
vllm: {
|
|
251
|
+
baseUrl: "http://host.docker.internal:8000/v1",
|
|
252
|
+
api: "openai-completions",
|
|
253
|
+
auth: "none",
|
|
254
|
+
models: [
|
|
255
|
+
{
|
|
256
|
+
id: "local-model",
|
|
257
|
+
name: "vLLM Model",
|
|
258
|
+
api: "openai-completions",
|
|
259
|
+
reasoning: false,
|
|
260
|
+
input: ["text"],
|
|
261
|
+
cost: { input: 0, output: 0 },
|
|
262
|
+
contextWindow: 8192,
|
|
263
|
+
maxTokens: 4096,
|
|
264
|
+
},
|
|
265
|
+
],
|
|
266
|
+
},
|
|
267
|
+
};
|
|
268
|
+
|
|
269
|
+
/**
|
|
270
|
+
* Generates a default `openclaw/config/openclaw.json` tailored
|
|
271
|
+
* to the services installed in the stack.
|
|
272
|
+
*/
|
|
273
|
+
export function generateOpenClawConfig(resolved: ResolverOutput): string {
|
|
274
|
+
const defaultSkills: Record<string, { enabled: boolean }> = {};
|
|
275
|
+
|
|
276
|
+
// Auto-enable any OpenClaw skills attached to installed companion services
|
|
277
|
+
for (const { definition } of resolved.services) {
|
|
278
|
+
for (const skill of definition.skills) {
|
|
279
|
+
if (skill.autoInstall) {
|
|
280
|
+
defaultSkills[skill.skillId] = { enabled: true };
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
const providers: Record<string, any> = {};
|
|
286
|
+
const agentsModels: Record<string, { alias: string }> = {};
|
|
287
|
+
let primaryModel = "";
|
|
288
|
+
|
|
289
|
+
// Always default to empty or the first choice, fallback to openai if nothing was passed
|
|
290
|
+
const selectedProviders =
|
|
291
|
+
resolved.aiProviders && resolved.aiProviders.length > 0
|
|
292
|
+
? resolved.aiProviders
|
|
293
|
+
: (["openai"] as AiProvider[]);
|
|
294
|
+
|
|
295
|
+
for (const provider of selectedProviders) {
|
|
296
|
+
const meta = PROVIDER_CONFIGS[provider];
|
|
297
|
+
if (!meta) continue;
|
|
298
|
+
|
|
299
|
+
providers[provider] = {
|
|
300
|
+
baseUrl: meta.baseUrl,
|
|
301
|
+
api: meta.api,
|
|
302
|
+
auth: meta.auth,
|
|
303
|
+
...(meta.apiKey ? { apiKey: meta.apiKey } : {}),
|
|
304
|
+
models: meta.models,
|
|
305
|
+
};
|
|
306
|
+
|
|
307
|
+
for (const m of meta.models) {
|
|
308
|
+
const fullId = `${provider}/${m.id}`;
|
|
309
|
+
agentsModels[fullId] = { alias: m.name };
|
|
310
|
+
if (!primaryModel) primaryModel = fullId; // Use the very first model mapped as the global system default
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
const authProfiles: Record<string, any> = {
|
|
315
|
+
"local:default": {
|
|
316
|
+
provider: "local",
|
|
317
|
+
mode: "token",
|
|
318
|
+
},
|
|
319
|
+
};
|
|
320
|
+
|
|
321
|
+
// Add provider auth profiles too
|
|
322
|
+
for (const provider of Object.keys(providers)) {
|
|
323
|
+
authProfiles[`${provider}:default`] = {
|
|
324
|
+
provider: provider,
|
|
325
|
+
mode: "api_key",
|
|
326
|
+
};
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
const config = {
|
|
330
|
+
wizard: {
|
|
331
|
+
lastRunAt: new Date().toISOString(),
|
|
332
|
+
lastRunVersion: "2026.2.23",
|
|
333
|
+
lastRunCommand: "auto-generated-by-better-openclaw",
|
|
334
|
+
lastRunMode: "local",
|
|
335
|
+
},
|
|
336
|
+
auth: {
|
|
337
|
+
profiles: authProfiles,
|
|
338
|
+
},
|
|
339
|
+
models: {
|
|
340
|
+
mode: "merge",
|
|
341
|
+
providers,
|
|
342
|
+
},
|
|
343
|
+
agents: {
|
|
344
|
+
defaults: {
|
|
345
|
+
model: {
|
|
346
|
+
primary: primaryModel,
|
|
347
|
+
},
|
|
348
|
+
models: agentsModels,
|
|
349
|
+
workspace: "/home/node/.openclaw/workspace",
|
|
350
|
+
compaction: { mode: "safeguard" },
|
|
351
|
+
maxConcurrent: 4,
|
|
352
|
+
subagents: { maxConcurrent: 8 },
|
|
353
|
+
},
|
|
354
|
+
},
|
|
355
|
+
messages: {
|
|
356
|
+
ackReactionScope: "group-mentions",
|
|
357
|
+
},
|
|
358
|
+
commands: {
|
|
359
|
+
native: "auto",
|
|
360
|
+
nativeSkills: "auto",
|
|
361
|
+
},
|
|
362
|
+
hooks: {
|
|
363
|
+
internal: {
|
|
364
|
+
enabled: true,
|
|
365
|
+
entries: {
|
|
366
|
+
"boot-md": { enabled: true },
|
|
367
|
+
"bootstrap-extra-files": { enabled: true },
|
|
368
|
+
"command-logger": { enabled: true },
|
|
369
|
+
"session-memory": { enabled: true },
|
|
370
|
+
},
|
|
371
|
+
},
|
|
372
|
+
},
|
|
373
|
+
channels: {},
|
|
374
|
+
gateway: {
|
|
375
|
+
port: 18791,
|
|
376
|
+
mode: "local",
|
|
377
|
+
bind: "loopback",
|
|
378
|
+
auth: {
|
|
379
|
+
mode: "token",
|
|
380
|
+
token: "${OPENCLAW_GATEWAY_TOKEN}",
|
|
381
|
+
},
|
|
382
|
+
tailscale: {
|
|
383
|
+
mode: "serve",
|
|
384
|
+
resetOnExit: true,
|
|
385
|
+
},
|
|
386
|
+
nodes: {
|
|
387
|
+
denyCommands: ["camera.snap", "camera.clip", "screen.record"],
|
|
388
|
+
},
|
|
389
|
+
},
|
|
390
|
+
skills: {
|
|
391
|
+
install: { nodeManager: "pnpm" },
|
|
392
|
+
...(Object.keys(defaultSkills).length > 0 ? { entries: defaultSkills } : {}),
|
|
393
|
+
},
|
|
394
|
+
plugins: {
|
|
395
|
+
entries: {
|
|
396
|
+
"memory-core": { enabled: true },
|
|
397
|
+
},
|
|
398
|
+
},
|
|
399
|
+
meta: {
|
|
400
|
+
lastTouchedVersion: "2026.2.23",
|
|
401
|
+
lastTouchedAt: new Date().toISOString(),
|
|
402
|
+
},
|
|
403
|
+
};
|
|
404
|
+
|
|
405
|
+
return JSON.stringify(config, null, 2);
|
|
406
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -83,6 +83,7 @@ export {
|
|
|
83
83
|
// ─── Types ──────────────────────────────────────────────────────────────────
|
|
84
84
|
export type {
|
|
85
85
|
AddedDependency,
|
|
86
|
+
AiProvider,
|
|
86
87
|
ApiError,
|
|
87
88
|
CategoryInfo,
|
|
88
89
|
ComposeOptions,
|
|
@@ -94,6 +95,7 @@ export type {
|
|
|
94
95
|
GenerationInput,
|
|
95
96
|
GenerationMetadata,
|
|
96
97
|
GenerationResult,
|
|
98
|
+
GsdRuntime,
|
|
97
99
|
HealthCheck,
|
|
98
100
|
Maturity,
|
|
99
101
|
NativePlatform,
|
package/src/resolver.ts
CHANGED
|
@@ -241,14 +241,16 @@ export function resolve(input: ResolverInput): ResolverOutput {
|
|
|
241
241
|
const isValid = errors.length === 0;
|
|
242
242
|
|
|
243
243
|
return {
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
244
|
+
services,
|
|
245
|
+
addedDependencies,
|
|
246
|
+
removedConflicts: [],
|
|
247
|
+
warnings,
|
|
248
|
+
errors,
|
|
249
|
+
isValid,
|
|
250
|
+
estimatedMemoryMB,
|
|
251
|
+
aiProviders: input.aiProviders ?? [],
|
|
252
|
+
gsdRuntimes: []
|
|
253
|
+
};
|
|
252
254
|
}
|
|
253
255
|
|
|
254
256
|
/**
|
package/src/schema.ts
CHANGED
|
@@ -48,6 +48,23 @@ export const DeploymentTargetSchema = z.enum(["local", "vps", "homelab", "clawex
|
|
|
48
48
|
|
|
49
49
|
export const OutputFormatSchema = z.enum(["directory", "tar", "zip"]);
|
|
50
50
|
|
|
51
|
+
export const AiProviderSchema = z.enum([
|
|
52
|
+
"openai",
|
|
53
|
+
"anthropic",
|
|
54
|
+
"google",
|
|
55
|
+
"xai",
|
|
56
|
+
"deepseek",
|
|
57
|
+
"groq",
|
|
58
|
+
"openrouter",
|
|
59
|
+
"mistral",
|
|
60
|
+
"together",
|
|
61
|
+
"ollama",
|
|
62
|
+
"lmstudio",
|
|
63
|
+
"vllm",
|
|
64
|
+
]);
|
|
65
|
+
|
|
66
|
+
export const GsdRuntimeSchema = z.enum(["claude", "opencode", "gemini", "codex"]);
|
|
67
|
+
|
|
51
68
|
// ─── Sub-Schemas ────────────────────────────────────────────────────────────
|
|
52
69
|
|
|
53
70
|
export const PortMappingSchema = z.object({
|
|
@@ -213,6 +230,8 @@ export const GenerationInputSchema = z.object({
|
|
|
213
230
|
}),
|
|
214
231
|
services: z.array(z.string()).default([]),
|
|
215
232
|
skillPacks: z.array(z.string()).default([]),
|
|
233
|
+
aiProviders: z.array(AiProviderSchema).default([]),
|
|
234
|
+
gsdRuntimes: z.array(GsdRuntimeSchema).default([]),
|
|
216
235
|
proxy: ProxyTypeSchema.default("none"),
|
|
217
236
|
domain: z.string().optional(),
|
|
218
237
|
gpu: z.boolean().default(false),
|
|
@@ -254,6 +273,8 @@ export const ResolverOutputSchema = z.object({
|
|
|
254
273
|
errors: z.array(ErrorSchema),
|
|
255
274
|
isValid: z.boolean(),
|
|
256
275
|
estimatedMemoryMB: z.number().int().min(0),
|
|
276
|
+
aiProviders: z.array(AiProviderSchema).default([]),
|
|
277
|
+
gsdRuntimes: z.array(GsdRuntimeSchema).default([]),
|
|
257
278
|
});
|
|
258
279
|
|
|
259
280
|
// ─── Compose Options ────────────────────────────────────────────────────────
|
|
@@ -275,6 +296,8 @@ export const ComposeOptionsSchema = z.object({
|
|
|
275
296
|
export const ValidateRequestSchema = z.object({
|
|
276
297
|
services: z.array(z.string()),
|
|
277
298
|
skillPacks: z.array(z.string()).default([]),
|
|
299
|
+
aiProviders: z.array(AiProviderSchema).default([]),
|
|
300
|
+
gsdRuntimes: z.array(GsdRuntimeSchema).default([]),
|
|
278
301
|
proxy: ProxyTypeSchema.default("none"),
|
|
279
302
|
domain: z.string().optional(),
|
|
280
303
|
gpu: z.boolean().default(false),
|
|
@@ -3,7 +3,8 @@ import type { ServiceDefinition } from "../../types.js";
|
|
|
3
3
|
export const pentagiDefinition: ServiceDefinition = {
|
|
4
4
|
id: "pentagi",
|
|
5
5
|
name: "PentAGI",
|
|
6
|
-
description:
|
|
6
|
+
description:
|
|
7
|
+
"Fully autonomous AI Agents system capable of performing complex penetration testing tasks.",
|
|
7
8
|
category: "security",
|
|
8
9
|
icon: "🕵️",
|
|
9
10
|
|
|
@@ -3,7 +3,8 @@ import type { ServiceDefinition } from "../../types.js";
|
|
|
3
3
|
export const pentestagentDefinition: ServiceDefinition = {
|
|
4
4
|
id: "pentestagent",
|
|
5
5
|
name: "PentestAgent",
|
|
6
|
-
description:
|
|
6
|
+
description:
|
|
7
|
+
"AI agent framework for black-box security testing via internal Docker isolated environments.",
|
|
7
8
|
category: "security",
|
|
8
9
|
icon: "🔍",
|
|
9
10
|
|
package/src/types.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { z } from "zod";
|
|
2
2
|
import type {
|
|
3
3
|
AddedDependencySchema,
|
|
4
|
+
AiProviderSchema,
|
|
4
5
|
ApiErrorSchema,
|
|
5
6
|
ComposeOptionsSchema,
|
|
6
7
|
DeploymentTargetSchema,
|
|
@@ -9,6 +10,7 @@ import type {
|
|
|
9
10
|
EnvVariableSchema,
|
|
10
11
|
ErrorSchema,
|
|
11
12
|
GenerationInputSchema,
|
|
13
|
+
GsdRuntimeSchema,
|
|
12
14
|
HealthCheckSchema,
|
|
13
15
|
MaturitySchema,
|
|
14
16
|
NativePlatformSchema,
|
|
@@ -34,6 +36,8 @@ import type {
|
|
|
34
36
|
|
|
35
37
|
// ─── Inferred Types ─────────────────────────────────────────────────────────
|
|
36
38
|
|
|
39
|
+
export type AiProvider = z.infer<typeof AiProviderSchema>;
|
|
40
|
+
export type GsdRuntime = z.infer<typeof GsdRuntimeSchema>;
|
|
37
41
|
export type ServiceCategory = z.infer<typeof ServiceCategorySchema>;
|
|
38
42
|
export type Maturity = z.infer<typeof MaturitySchema>;
|
|
39
43
|
export type Platform = z.infer<typeof PlatformSchema>;
|
|
@@ -77,6 +81,8 @@ export type ApiError = z.infer<typeof ApiErrorSchema>;
|
|
|
77
81
|
export interface ResolverInput {
|
|
78
82
|
services: string[];
|
|
79
83
|
skillPacks: string[];
|
|
84
|
+
aiProviders?: AiProvider[];
|
|
85
|
+
gsdRuntimes?: GsdRuntime[];
|
|
80
86
|
proxy?: ProxyType;
|
|
81
87
|
gpu?: boolean;
|
|
82
88
|
platform?: Platform;
|