@poolzin/pool-bot 2026.2.19 → 2026.2.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/dist/agents/model-auth.js +12 -0
- package/dist/agents/model-fallback.js +24 -0
- package/dist/agents/models-config.providers.js +85 -0
- package/dist/agents/openclaw-tools.js +16 -0
- package/dist/agents/pi-embedded-runner/run/attempt.js +15 -0
- package/dist/agents/poolbot-tools.js +16 -0
- package/dist/agents/provider/config-loader.js +76 -0
- package/dist/agents/provider/index.js +15 -0
- package/dist/agents/provider/integration.js +136 -0
- package/dist/agents/provider/models-dev.js +129 -0
- package/dist/agents/provider/rate-limits.js +458 -0
- package/dist/agents/provider/request-monitor.js +449 -0
- package/dist/agents/provider/session-binding.js +376 -0
- package/dist/agents/provider/token-pool.js +541 -0
- package/dist/agents/tools/deep-research-tool.js +225 -0
- package/dist/agents/tools/image-generate-tool.js +235 -0
- package/dist/build-info.json +3 -3
- package/package.json +1 -1
- package/skills/plcode-controller/SKILL.md +156 -0
- package/skills/plcode-controller/assets/operator-prompts.md +65 -0
- package/skills/plcode-controller/references/command-cheatsheet.md +53 -0
- package/skills/plcode-controller/references/failure-handling.md +60 -0
- package/skills/plcode-controller/references/model-selection.md +57 -0
- package/skills/plcode-controller/references/plan-vs-build.md +52 -0
- package/skills/plcode-controller/references/question-handling.md +40 -0
- package/skills/plcode-controller/references/session-management.md +63 -0
- package/skills/plcode-controller/references/workflow.md +35 -0
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
import { Type } from "@sinclair/typebox";
|
|
2
|
+
import { ensureAuthProfileStore, listProfilesForProvider } from "../auth-profiles.js";
|
|
3
|
+
import { resolveEnvApiKey } from "../model-auth.js";
|
|
4
|
+
import { jsonResult, readNumberParam, readStringParam } from "./common.js";
|
|
5
|
+
// ---------------------------------------------------------------------------
|
|
6
|
+
// Constants
|
|
7
|
+
// ---------------------------------------------------------------------------
|
|
8
|
+
const ZAI_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
|
|
9
|
+
/**
|
|
10
|
+
* GLM-4.7-Flash is FREE and supports the built-in web_search tool ($0.01/use).
|
|
11
|
+
* Perfect for deep research: model synthesis is free, search costs are negligible.
|
|
12
|
+
*/
|
|
13
|
+
const RESEARCH_MODEL = "GLM-4.7-Flash";
|
|
14
|
+
// ---------------------------------------------------------------------------
|
|
15
|
+
// Schema
|
|
16
|
+
// ---------------------------------------------------------------------------
|
|
17
|
+
const DeepResearchSchema = Type.Object({
|
|
18
|
+
topic: Type.String({
|
|
19
|
+
description: "The research topic or question. Be specific — include what aspects to cover, time range, and scope.",
|
|
20
|
+
}),
|
|
21
|
+
depth: Type.Optional(Type.String({
|
|
22
|
+
description: 'Research depth: "shallow" (quick overview, 1 search), "standard" (balanced, 2-3 searches, default), or "deep" (thorough, 4-5 searches with follow-ups).',
|
|
23
|
+
})),
|
|
24
|
+
language: Type.Optional(Type.String({
|
|
25
|
+
description: 'Language for the research report. Default: "en" (English). Use ISO 639-1 codes (e.g. "pt", "zh", "ja").',
|
|
26
|
+
})),
|
|
27
|
+
max_sources: Type.Optional(Type.Number({
|
|
28
|
+
description: "Maximum number of sources to include. Default: 10. Range: 1–20.",
|
|
29
|
+
})),
|
|
30
|
+
});
|
|
31
|
+
// ---------------------------------------------------------------------------
|
|
32
|
+
// Helpers
|
|
33
|
+
// ---------------------------------------------------------------------------
|
|
34
|
+
/**
|
|
35
|
+
* Resolve the Z.AI API key from environment or auth profiles.
|
|
36
|
+
* Same logic as image-generate-tool.ts — kept as a local copy to avoid
|
|
37
|
+
* cross-tool coupling.
|
|
38
|
+
*/
|
|
39
|
+
function resolveZaiApiKey(agentDir) {
|
|
40
|
+
const envKey = resolveEnvApiKey("zai");
|
|
41
|
+
if (envKey?.apiKey)
|
|
42
|
+
return envKey.apiKey;
|
|
43
|
+
const store = ensureAuthProfileStore(agentDir, { allowKeychainPrompt: false });
|
|
44
|
+
const profileIds = listProfilesForProvider(store, "zai");
|
|
45
|
+
for (const id of profileIds) {
|
|
46
|
+
const cred = store.profiles[id];
|
|
47
|
+
if (!cred)
|
|
48
|
+
continue;
|
|
49
|
+
if (cred.type === "api_key")
|
|
50
|
+
return cred.key;
|
|
51
|
+
if (cred.type === "token")
|
|
52
|
+
return cred.token;
|
|
53
|
+
}
|
|
54
|
+
return null;
|
|
55
|
+
}
|
|
56
|
+
function normalizeDepth(raw) {
|
|
57
|
+
if (!raw)
|
|
58
|
+
return "standard";
|
|
59
|
+
const lower = raw.trim().toLowerCase();
|
|
60
|
+
if (lower === "shallow" || lower === "quick" || lower === "brief")
|
|
61
|
+
return "shallow";
|
|
62
|
+
if (lower === "deep" || lower === "thorough" || lower === "comprehensive")
|
|
63
|
+
return "deep";
|
|
64
|
+
return "standard";
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Build the system prompt for the research agent.
|
|
68
|
+
* Tailored to the requested depth and language.
|
|
69
|
+
*/
|
|
70
|
+
function buildSystemPrompt(depth, language, maxSources) {
|
|
71
|
+
const depthGuidance = {
|
|
72
|
+
shallow: "Provide a concise overview. Use 1 web search to gather key facts. Keep the report brief — 3-5 paragraphs max.",
|
|
73
|
+
standard: "Provide a balanced research report. Use 2-3 web searches to gather diverse perspectives. Structure with clear sections and cite sources.",
|
|
74
|
+
deep: "Provide a thorough, in-depth research report. Use 4-5 web searches covering different angles (academic, news, expert opinions, primary sources). Include nuanced analysis, counterarguments, and comprehensive source citations.",
|
|
75
|
+
};
|
|
76
|
+
const langInstruction = language === "en"
|
|
77
|
+
? ""
|
|
78
|
+
: `\nWrite the entire report in ${language} language. Search queries can be in any language for best results, but the final output must be in ${language}.`;
|
|
79
|
+
return [
|
|
80
|
+
"You are a professional research analyst. Your task is to produce a well-structured research report on the given topic.",
|
|
81
|
+
"",
|
|
82
|
+
`Research depth: ${depth}.`,
|
|
83
|
+
depthGuidance[depth],
|
|
84
|
+
"",
|
|
85
|
+
"Guidelines:",
|
|
86
|
+
"- Use the web_search tool to find current, reliable information.",
|
|
87
|
+
"- Cross-reference multiple sources for accuracy.",
|
|
88
|
+
"- Cite all sources with titles and URLs.",
|
|
89
|
+
`- Include up to ${maxSources} sources.`,
|
|
90
|
+
"- Structure the report with: Summary, Key Findings, Detailed Analysis, Sources.",
|
|
91
|
+
"- Be factual and objective. Clearly distinguish facts from analysis.",
|
|
92
|
+
"- Include dates for time-sensitive information.",
|
|
93
|
+
langInstruction,
|
|
94
|
+
]
|
|
95
|
+
.filter((line) => line !== undefined)
|
|
96
|
+
.join("\n");
|
|
97
|
+
}
|
|
98
|
+
/**
|
|
99
|
+
* Call Z.AI chat completions with web_search tool enabled.
|
|
100
|
+
*/
|
|
101
|
+
async function callZaiResearch(params) {
|
|
102
|
+
const systemPrompt = buildSystemPrompt(params.depth, params.language, params.maxSources);
|
|
103
|
+
const body = {
|
|
104
|
+
model: RESEARCH_MODEL,
|
|
105
|
+
messages: [
|
|
106
|
+
{ role: "system", content: systemPrompt },
|
|
107
|
+
{ role: "user", content: params.topic },
|
|
108
|
+
],
|
|
109
|
+
tools: [
|
|
110
|
+
{
|
|
111
|
+
type: "web_search",
|
|
112
|
+
web_search: {
|
|
113
|
+
enable: true,
|
|
114
|
+
search_result: true, // include search results in response metadata
|
|
115
|
+
},
|
|
116
|
+
},
|
|
117
|
+
],
|
|
118
|
+
temperature: 0.3, // lower temp for factual research
|
|
119
|
+
stream: false,
|
|
120
|
+
};
|
|
121
|
+
const response = await fetch(`${ZAI_BASE_URL}/chat/completions`, {
|
|
122
|
+
method: "POST",
|
|
123
|
+
headers: {
|
|
124
|
+
Authorization: `Bearer ${params.apiKey}`,
|
|
125
|
+
"Content-Type": "application/json",
|
|
126
|
+
},
|
|
127
|
+
body: JSON.stringify(body),
|
|
128
|
+
});
|
|
129
|
+
if (!response.ok) {
|
|
130
|
+
const errorText = await response.text().catch(() => "unknown error");
|
|
131
|
+
throw new Error(`Z.AI research request failed (HTTP ${response.status}): ${errorText}`);
|
|
132
|
+
}
|
|
133
|
+
const result = (await response.json());
|
|
134
|
+
if (result.error) {
|
|
135
|
+
throw new Error(`Z.AI research error: ${result.error.message} (${result.error.code})`);
|
|
136
|
+
}
|
|
137
|
+
const content = result.choices?.[0]?.message?.content;
|
|
138
|
+
if (!content) {
|
|
139
|
+
throw new Error("Z.AI research returned no content.");
|
|
140
|
+
}
|
|
141
|
+
return {
|
|
142
|
+
report: content,
|
|
143
|
+
sources: result.web_search ?? [],
|
|
144
|
+
usage: {
|
|
145
|
+
promptTokens: result.usage?.prompt_tokens ?? 0,
|
|
146
|
+
completionTokens: result.usage?.completion_tokens ?? 0,
|
|
147
|
+
totalTokens: result.usage?.total_tokens ?? 0,
|
|
148
|
+
},
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
// ---------------------------------------------------------------------------
|
|
152
|
+
// Factory
|
|
153
|
+
// ---------------------------------------------------------------------------
|
|
154
|
+
/**
|
|
155
|
+
* Create the deep-research tool powered by Z.AI GLM-4.7-Flash + web_search.
|
|
156
|
+
*
|
|
157
|
+
* Cost breakdown:
|
|
158
|
+
* - GLM-4.7-Flash inference: FREE
|
|
159
|
+
* - web_search: $0.01/use (1-5 searches depending on depth)
|
|
160
|
+
* - Total per research query: ~$0.01–$0.05
|
|
161
|
+
*
|
|
162
|
+
* Returns null if no Z.AI API key is available (graceful degradation).
|
|
163
|
+
*/
|
|
164
|
+
export function createDeepResearchTool(options) {
|
|
165
|
+
const agentDir = options?.agentDir?.trim();
|
|
166
|
+
if (!agentDir)
|
|
167
|
+
return null;
|
|
168
|
+
const apiKey = resolveZaiApiKey(agentDir);
|
|
169
|
+
if (!apiKey)
|
|
170
|
+
return null;
|
|
171
|
+
return {
|
|
172
|
+
label: "Deep Research",
|
|
173
|
+
name: "deep_research",
|
|
174
|
+
description: [
|
|
175
|
+
"Perform deep web research on any topic using Z.AI GLM-4.7-Flash + web search.",
|
|
176
|
+
"Returns a structured research report with citations and sources.",
|
|
177
|
+
"",
|
|
178
|
+
"Depth options:",
|
|
179
|
+
'- "shallow": Quick overview (~$0.01, 1 search)',
|
|
180
|
+
'- "standard": Balanced report (~$0.03, 2-3 searches)',
|
|
181
|
+
'- "deep": Thorough analysis (~$0.05, 4-5 searches)',
|
|
182
|
+
"",
|
|
183
|
+
"Cost: GLM-4.7-Flash is FREE; only web search costs $0.01/use.",
|
|
184
|
+
"Best for: current events, technical topics, market research, fact-checking.",
|
|
185
|
+
].join("\n"),
|
|
186
|
+
parameters: DeepResearchSchema,
|
|
187
|
+
execute: async (_toolCallId, args) => {
|
|
188
|
+
const params = args;
|
|
189
|
+
const topic = readStringParam(params, "topic", { required: true });
|
|
190
|
+
const depthRaw = readStringParam(params, "depth");
|
|
191
|
+
const language = readStringParam(params, "language") ?? "en";
|
|
192
|
+
const maxSourcesRaw = readNumberParam(params, "max_sources", { integer: true });
|
|
193
|
+
const depth = normalizeDepth(depthRaw);
|
|
194
|
+
const maxSources = Math.max(1, Math.min(20, maxSourcesRaw ?? 10));
|
|
195
|
+
const result = await callZaiResearch({
|
|
196
|
+
apiKey,
|
|
197
|
+
topic,
|
|
198
|
+
depth,
|
|
199
|
+
language,
|
|
200
|
+
maxSources,
|
|
201
|
+
});
|
|
202
|
+
// Build a structured response
|
|
203
|
+
const sourcesList = result.sources.map((s, i) => ({
|
|
204
|
+
index: i + 1,
|
|
205
|
+
title: s.title,
|
|
206
|
+
url: s.link,
|
|
207
|
+
snippet: s.content?.slice(0, 200),
|
|
208
|
+
}));
|
|
209
|
+
const estimatedSearches = depth === "shallow" ? 1 : depth === "standard" ? 3 : 5;
|
|
210
|
+
const estimatedCost = `~$${(estimatedSearches * 0.01).toFixed(2)}`;
|
|
211
|
+
return jsonResult({
|
|
212
|
+
report: result.report,
|
|
213
|
+
sources: sourcesList,
|
|
214
|
+
metadata: {
|
|
215
|
+
model: RESEARCH_MODEL,
|
|
216
|
+
depth,
|
|
217
|
+
language,
|
|
218
|
+
sourceCount: sourcesList.length,
|
|
219
|
+
estimatedCost,
|
|
220
|
+
tokens: result.usage,
|
|
221
|
+
},
|
|
222
|
+
});
|
|
223
|
+
},
|
|
224
|
+
};
|
|
225
|
+
}
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import { Type } from "@sinclair/typebox";
|
|
4
|
+
import { resolveUserPath } from "../../utils.js";
|
|
5
|
+
import { ensureAuthProfileStore, listProfilesForProvider } from "../auth-profiles.js";
|
|
6
|
+
import { resolveEnvApiKey } from "../model-auth.js";
|
|
7
|
+
import { imageResult, readStringParam } from "./common.js";
|
|
8
|
+
// Z.AI image generation models
|
|
9
|
+
const ZAI_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
|
|
10
|
+
const ImageGenerateSchema = Type.Object({
|
|
11
|
+
prompt: Type.String({
|
|
12
|
+
description: "Description of the image to generate. Be descriptive — include subject, style, lighting, mood, composition, and artistic direction.",
|
|
13
|
+
}),
|
|
14
|
+
model: Type.Optional(Type.String({
|
|
15
|
+
description: 'Image generation model: "cogview-4" (default, $0.01/img), "cogview-4-flash" (faster, $0.004/img), or "glm-image" (highest quality, $0.015/img). Default: cogview-4.',
|
|
16
|
+
})),
|
|
17
|
+
size: Type.Optional(Type.String({
|
|
18
|
+
description: 'Image dimensions (WxH). Supported: "1024x1024" (default), "768x1344", "864x1152", "1344x768", "1152x864", "1440x720", "720x1440". Must be multiples of 32, range 512–2048.',
|
|
19
|
+
})),
|
|
20
|
+
quality: Type.Optional(Type.String({
|
|
21
|
+
description: 'Image quality: "standard" (default) or "hd" (higher detail, slower).',
|
|
22
|
+
})),
|
|
23
|
+
save_path: Type.Optional(Type.String({
|
|
24
|
+
description: "File path to save the generated image. If omitted, saves to the agent working directory with an auto-generated name.",
|
|
25
|
+
})),
|
|
26
|
+
});
|
|
27
|
+
/**
|
|
28
|
+
* Resolve the Z.AI API key from environment or auth profiles.
|
|
29
|
+
*/
|
|
30
|
+
function resolveZaiApiKey(agentDir) {
|
|
31
|
+
const envKey = resolveEnvApiKey("zai");
|
|
32
|
+
if (envKey?.apiKey)
|
|
33
|
+
return envKey.apiKey;
|
|
34
|
+
const store = ensureAuthProfileStore(agentDir, { allowKeychainPrompt: false });
|
|
35
|
+
const profileIds = listProfilesForProvider(store, "zai");
|
|
36
|
+
for (const id of profileIds) {
|
|
37
|
+
const cred = store.profiles[id];
|
|
38
|
+
if (!cred)
|
|
39
|
+
continue;
|
|
40
|
+
if (cred.type === "api_key")
|
|
41
|
+
return cred.key;
|
|
42
|
+
if (cred.type === "token")
|
|
43
|
+
return cred.token;
|
|
44
|
+
}
|
|
45
|
+
return null;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Validate and normalize the image model identifier.
|
|
49
|
+
*/
|
|
50
|
+
function normalizeImageModel(raw) {
|
|
51
|
+
if (!raw)
|
|
52
|
+
return "cogview-4";
|
|
53
|
+
const lower = raw.trim().toLowerCase();
|
|
54
|
+
const validModels = [
|
|
55
|
+
"cogview-4",
|
|
56
|
+
"cogview-4-250304",
|
|
57
|
+
"cogview-4-flash",
|
|
58
|
+
"glm-image",
|
|
59
|
+
];
|
|
60
|
+
if (validModels.includes(lower)) {
|
|
61
|
+
return lower;
|
|
62
|
+
}
|
|
63
|
+
// Fuzzy matching for common variants
|
|
64
|
+
if (lower.includes("flash"))
|
|
65
|
+
return "cogview-4-flash";
|
|
66
|
+
if (lower.includes("glm") || lower.includes("flagship"))
|
|
67
|
+
return "glm-image";
|
|
68
|
+
if (lower.includes("cog") || lower.includes("view"))
|
|
69
|
+
return "cogview-4";
|
|
70
|
+
return "cogview-4";
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Validate image size. Z.AI accepts WxH where both dimensions are 512–2048 and multiples of 32.
|
|
74
|
+
*/
|
|
75
|
+
function normalizeImageSize(raw) {
|
|
76
|
+
if (!raw)
|
|
77
|
+
return "1024x1024";
|
|
78
|
+
const trimmed = raw.trim().toLowerCase();
|
|
79
|
+
// Parse WxH format
|
|
80
|
+
const match = trimmed.match(/^(\d+)\s*[x×]\s*(\d+)$/);
|
|
81
|
+
if (!match)
|
|
82
|
+
return "1024x1024";
|
|
83
|
+
let w = Number.parseInt(match[1], 10);
|
|
84
|
+
let h = Number.parseInt(match[2], 10);
|
|
85
|
+
// Clamp to valid range
|
|
86
|
+
w = Math.max(512, Math.min(2048, w));
|
|
87
|
+
h = Math.max(512, Math.min(2048, h));
|
|
88
|
+
// Round to nearest multiple of 32
|
|
89
|
+
w = Math.round(w / 32) * 32;
|
|
90
|
+
h = Math.round(h / 32) * 32;
|
|
91
|
+
return `${w}x${h}`;
|
|
92
|
+
}
|
|
93
|
+
/**
|
|
94
|
+
* Call the Z.AI image generation API.
|
|
95
|
+
*/
|
|
96
|
+
async function generateImage(params) {
|
|
97
|
+
const body = {
|
|
98
|
+
model: params.model,
|
|
99
|
+
prompt: params.prompt,
|
|
100
|
+
size: params.size,
|
|
101
|
+
quality: params.quality,
|
|
102
|
+
n: 1,
|
|
103
|
+
};
|
|
104
|
+
const response = await fetch(`${ZAI_BASE_URL}/images/generations`, {
|
|
105
|
+
method: "POST",
|
|
106
|
+
headers: {
|
|
107
|
+
Authorization: `Bearer ${params.apiKey}`,
|
|
108
|
+
"Content-Type": "application/json",
|
|
109
|
+
},
|
|
110
|
+
body: JSON.stringify(body),
|
|
111
|
+
});
|
|
112
|
+
if (!response.ok) {
|
|
113
|
+
const errorText = await response.text().catch(() => "unknown error");
|
|
114
|
+
throw new Error(`Z.AI image generation failed (HTTP ${response.status}): ${errorText}`);
|
|
115
|
+
}
|
|
116
|
+
const result = (await response.json());
|
|
117
|
+
if (result.error) {
|
|
118
|
+
throw new Error(`Z.AI image generation error: ${result.error.message} (${result.error.code})`);
|
|
119
|
+
}
|
|
120
|
+
if (!result.data?.[0]) {
|
|
121
|
+
throw new Error("Z.AI image generation returned no images.");
|
|
122
|
+
}
|
|
123
|
+
const imageData = result.data[0];
|
|
124
|
+
const imageUrl = imageData.url ?? imageData.b64_json;
|
|
125
|
+
if (!imageUrl) {
|
|
126
|
+
throw new Error("Z.AI image generation returned no image URL or data.");
|
|
127
|
+
}
|
|
128
|
+
return {
|
|
129
|
+
imageUrl: imageData.url ?? "",
|
|
130
|
+
revisedPrompt: imageData.revised_prompt,
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* Download an image from URL and return its buffer.
|
|
135
|
+
*/
|
|
136
|
+
async function downloadImage(url) {
|
|
137
|
+
const response = await fetch(url);
|
|
138
|
+
if (!response.ok) {
|
|
139
|
+
throw new Error(`Failed to download generated image (HTTP ${response.status})`);
|
|
140
|
+
}
|
|
141
|
+
const arrayBuffer = await response.arrayBuffer();
|
|
142
|
+
return Buffer.from(arrayBuffer);
|
|
143
|
+
}
|
|
144
|
+
/**
|
|
145
|
+
* Generate a unique filename for saved images.
|
|
146
|
+
*/
|
|
147
|
+
function generateImageFilename(model) {
|
|
148
|
+
const timestamp = new Date().toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
149
|
+
return `generated-${model}-${timestamp}.png`;
|
|
150
|
+
}
|
|
151
|
+
/**
|
|
152
|
+
* Create the image-generate tool for Z.AI CogView-4 / GLM-Image.
|
|
153
|
+
*
|
|
154
|
+
* Supports:
|
|
155
|
+
* - CogView-4: $0.01/image, general purpose
|
|
156
|
+
* - CogView-4-Flash: $0.004/image, faster generation
|
|
157
|
+
* - GLM-Image: $0.015/image, highest quality
|
|
158
|
+
*/
|
|
159
|
+
export function createImageGenerateTool(options) {
|
|
160
|
+
const agentDir = options?.agentDir?.trim();
|
|
161
|
+
if (!agentDir)
|
|
162
|
+
return null;
|
|
163
|
+
// Check if Z.AI API key is available
|
|
164
|
+
const apiKey = resolveZaiApiKey(agentDir);
|
|
165
|
+
if (!apiKey)
|
|
166
|
+
return null;
|
|
167
|
+
return {
|
|
168
|
+
label: "Image Generate",
|
|
169
|
+
name: "image_generate",
|
|
170
|
+
description: [
|
|
171
|
+
"Generate images from text descriptions using Z.AI (ZhipuAI) image models.",
|
|
172
|
+
"Available models:",
|
|
173
|
+
'- "cogview-4" (default): General-purpose, $0.01/image',
|
|
174
|
+
'- "cogview-4-flash": Faster generation, $0.004/image',
|
|
175
|
+
'- "glm-image": Highest quality, $0.015/image',
|
|
176
|
+
"",
|
|
177
|
+
"Write detailed, descriptive prompts for best results.",
|
|
178
|
+
"The generated image is saved to disk and returned as MEDIA: path.",
|
|
179
|
+
"Copy the MEDIA line exactly when presenting the image to the user.",
|
|
180
|
+
].join("\n"),
|
|
181
|
+
parameters: ImageGenerateSchema,
|
|
182
|
+
execute: async (_toolCallId, args) => {
|
|
183
|
+
const params = args;
|
|
184
|
+
const prompt = readStringParam(params, "prompt", { required: true });
|
|
185
|
+
const modelRaw = readStringParam(params, "model");
|
|
186
|
+
const sizeRaw = readStringParam(params, "size");
|
|
187
|
+
const qualityRaw = readStringParam(params, "quality");
|
|
188
|
+
const savePathRaw = readStringParam(params, "save_path");
|
|
189
|
+
const model = normalizeImageModel(modelRaw);
|
|
190
|
+
const size = normalizeImageSize(sizeRaw);
|
|
191
|
+
const quality = qualityRaw?.toLowerCase() === "hd" ? "hd" : "standard";
|
|
192
|
+
// Generate the image
|
|
193
|
+
const result = await generateImage({
|
|
194
|
+
apiKey,
|
|
195
|
+
model,
|
|
196
|
+
prompt,
|
|
197
|
+
size,
|
|
198
|
+
quality,
|
|
199
|
+
});
|
|
200
|
+
// Download the generated image
|
|
201
|
+
const imageBuffer = await downloadImage(result.imageUrl);
|
|
202
|
+
const base64 = imageBuffer.toString("base64");
|
|
203
|
+
// Determine save path
|
|
204
|
+
const filename = generateImageFilename(model);
|
|
205
|
+
const savePath = savePathRaw
|
|
206
|
+
? savePathRaw.startsWith("~")
|
|
207
|
+
? resolveUserPath(savePathRaw)
|
|
208
|
+
: savePathRaw
|
|
209
|
+
: path.join(agentDir, filename);
|
|
210
|
+
// Ensure directory exists and write
|
|
211
|
+
await fs.mkdir(path.dirname(savePath), { recursive: true });
|
|
212
|
+
await fs.writeFile(savePath, imageBuffer);
|
|
213
|
+
return imageResult({
|
|
214
|
+
label: "image_generate",
|
|
215
|
+
path: savePath,
|
|
216
|
+
base64,
|
|
217
|
+
mimeType: "image/png",
|
|
218
|
+
extraText: [
|
|
219
|
+
`MEDIA:${savePath}`,
|
|
220
|
+
result.revisedPrompt ? `Revised prompt: ${result.revisedPrompt}` : "",
|
|
221
|
+
]
|
|
222
|
+
.filter(Boolean)
|
|
223
|
+
.join("\n"),
|
|
224
|
+
details: {
|
|
225
|
+
model,
|
|
226
|
+
size,
|
|
227
|
+
quality,
|
|
228
|
+
prompt: prompt.slice(0, 200),
|
|
229
|
+
revisedPrompt: result.revisedPrompt,
|
|
230
|
+
cost: model === "glm-image" ? "$0.015" : model === "cogview-4-flash" ? "$0.004" : "$0.01",
|
|
231
|
+
},
|
|
232
|
+
});
|
|
233
|
+
},
|
|
234
|
+
};
|
|
235
|
+
}
|
package/dist/build-info.json
CHANGED
package/package.json
CHANGED
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: plcode-controller
|
|
3
|
+
description: "Control and operate PLCODE via CLI commands, slash commands, and multi-agent orchestration. Manage sessions, select models, delegate to agents (plan/code/review), and coordinate development workflows."
|
|
4
|
+
metadata: {"poolbot":{"emoji":"🎛️","requires":{"anyBins":["plcode"]}}}
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# PLCODE Controller
|
|
8
|
+
|
|
9
|
+
## Core Rule
|
|
10
|
+
|
|
11
|
+
All planning, coding, and review happens inside PLCODE.
|
|
12
|
+
Use the appropriate agent for each phase of work.
|
|
13
|
+
Never skip the planning phase.
|
|
14
|
+
|
|
15
|
+
## Pre-flight
|
|
16
|
+
|
|
17
|
+
- Confirm the user's working directory and project.
|
|
18
|
+
- Ask which AI provider/model to use if not already configured.
|
|
19
|
+
- Run `plcode doctor` to verify the environment is healthy.
|
|
20
|
+
- Do not proceed if critical issues are detected.
|
|
21
|
+
|
|
22
|
+
## Session Management
|
|
23
|
+
|
|
24
|
+
- Check for existing sessions using:
|
|
25
|
+
```
|
|
26
|
+
plcode session list
|
|
27
|
+
```
|
|
28
|
+
- If the current project already has a session, reuse it.
|
|
29
|
+
- Never create a new session without user approval.
|
|
30
|
+
- Sessions are tied to project directories and preserve full context.
|
|
31
|
+
|
|
32
|
+
## Agent Control
|
|
33
|
+
|
|
34
|
+
PLCODE uses multi-agent orchestration (AGNOS) with specialized agents. List available agents:
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
plcode agents list
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
Key agents for the standard workflow:
|
|
41
|
+
|
|
42
|
+
| Agent | Role |
|
|
43
|
+
| -------------- | -------------------------------------------- |
|
|
44
|
+
| planner_agent | Task analysis, step-by-step planning |
|
|
45
|
+
| code_agent | Code implementation and refactoring |
|
|
46
|
+
| review_agent | Code review with metrics, no subjective bias |
|
|
47
|
+
| test_agent | Test-driven development (RED-GREEN-REFACTOR) |
|
|
48
|
+
| critic_agent | Compare approaches and select best plan |
|
|
49
|
+
| executor_agent | Execute code in controlled sandbox |
|
|
50
|
+
|
|
51
|
+
Inspect any agent for details:
|
|
52
|
+
|
|
53
|
+
```
|
|
54
|
+
plcode agents inspect <agent-name>
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Model Selection
|
|
58
|
+
|
|
59
|
+
- List available models:
|
|
60
|
+
```
|
|
61
|
+
plcode models
|
|
62
|
+
```
|
|
63
|
+
- For detailed provider info:
|
|
64
|
+
```
|
|
65
|
+
plcode models --verbose
|
|
66
|
+
```
|
|
67
|
+
- If authentication is needed:
|
|
68
|
+
```
|
|
69
|
+
plcode auth
|
|
70
|
+
```
|
|
71
|
+
- Wait for the user to confirm auth is complete before continuing.
|
|
72
|
+
|
|
73
|
+
## Planning Phase
|
|
74
|
+
|
|
75
|
+
Use the `/plan` command to create structured execution plans:
|
|
76
|
+
|
|
77
|
+
```
|
|
78
|
+
/plan <task description>
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
The TaskPlanner will:
|
|
82
|
+
|
|
83
|
+
- Analyze task complexity (simple / moderate / complex).
|
|
84
|
+
- Assess risk level (low / medium / high).
|
|
85
|
+
- Generate step-by-step execution plan with agent assignments.
|
|
86
|
+
- Identify required MCP tools per step.
|
|
87
|
+
|
|
88
|
+
Review the plan carefully:
|
|
89
|
+
|
|
90
|
+
- If the plan is incorrect or incomplete, ask PLCODE to revise it.
|
|
91
|
+
- If complexity is high, consider breaking into smaller tasks.
|
|
92
|
+
- Do not proceed to implementation without an approved plan.
|
|
93
|
+
|
|
94
|
+
## Implementation Phase
|
|
95
|
+
|
|
96
|
+
Once the plan is approved, execute it. Two modes are available:
|
|
97
|
+
|
|
98
|
+
- **one-shot**: Execute all steps sequentially in the current session.
|
|
99
|
+
- **background**: Execute steps in the background, freeing the session.
|
|
100
|
+
|
|
101
|
+
During implementation:
|
|
102
|
+
|
|
103
|
+
- The code_agent handles code changes.
|
|
104
|
+
- The test_agent runs tests as each step completes.
|
|
105
|
+
- If a step fails, PLCODE will attempt recovery or escalate.
|
|
106
|
+
|
|
107
|
+
## Review Phase
|
|
108
|
+
|
|
109
|
+
After implementation, trigger a review:
|
|
110
|
+
|
|
111
|
+
```
|
|
112
|
+
/review
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
The review_agent evaluates:
|
|
116
|
+
|
|
117
|
+
- Code quality and adherence to standards.
|
|
118
|
+
- Test coverage and correctness.
|
|
119
|
+
- Security and performance implications.
|
|
120
|
+
|
|
121
|
+
If the review flags issues, return to the planning phase for that specific issue.
|
|
122
|
+
|
|
123
|
+
## Question Handling
|
|
124
|
+
|
|
125
|
+
If PLCODE or any agent asks a clarification question during implementation:
|
|
126
|
+
|
|
127
|
+
- Pause implementation.
|
|
128
|
+
- Provide the answer or switch context to the planner_agent.
|
|
129
|
+
- Confirm the revised approach before resuming.
|
|
130
|
+
- Never ignore questions during implementation.
|
|
131
|
+
|
|
132
|
+
## Completion
|
|
133
|
+
|
|
134
|
+
- Repeat the Plan -> Implement -> Review loop until all requirements are satisfied.
|
|
135
|
+
- Never skip the planning phase.
|
|
136
|
+
- Never bypass review for non-trivial changes.
|
|
137
|
+
- Use `plcode session list` to verify session state.
|
|
138
|
+
|
|
139
|
+
## MCP Tools Available
|
|
140
|
+
|
|
141
|
+
PLCODE exposes 32+ MCP tools across 5 categories:
|
|
142
|
+
|
|
143
|
+
| Category | Tools | Examples |
|
|
144
|
+
| ------------- | ----- | ------------------------------------------------------------ |
|
|
145
|
+
| Observability | 7 | `get_service_health`, `get_bra_health`, `get_memory_metrics` |
|
|
146
|
+
| Git | 8 | `get_repo_structure`, `get_diff`, `open_pull_request` |
|
|
147
|
+
| LSP | 9 | `lsp_diagnostics`, `find_references`, `code_metrics` |
|
|
148
|
+
| Knowledge | 5 | `check_dependency_cves`, `analyze_dependencies` |
|
|
149
|
+
| Web | 3 | `web_search`, `web_fetch`, `web_extract` |
|
|
150
|
+
|
|
151
|
+
## Output Format
|
|
152
|
+
|
|
153
|
+
- Show all CLI commands and slash commands explicitly.
|
|
154
|
+
- State which agent is active and what mode is selected.
|
|
155
|
+
- Provide auth links verbatim when they appear.
|
|
156
|
+
- Display plan summaries with complexity and risk assessments.
|