@poolzin/pool-bot 2026.2.19 → 2026.2.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,12 @@
1
+ ## v2026.2.20 (2026-02-18)
2
+
3
+ ### Features
4
+ - **Z.AI Provider:** added ZhipuAI (GLM) as first-class provider with 5 models — GLM-4.7-Flash (free), GLM-4.5-Flash (free), GLM-4.6V-Flash (free vision), GLM-4.7, GLM-5
5
+ - **Image Generation Tool:** `image_generate` tool powered by Z.AI CogView-4, CogView-4-Flash, and GLM-Image — generates images from text prompts, downloads to disk, returns media path
6
+ - **Deep Research Tool:** `deep_research` tool powered by Z.AI GLM-4.7-Flash + web_search — configurable depth (shallow/standard/deep), language, and max sources; returns structured report with sources and metadata; cost ~$0.01–$0.05 per query
7
+
8
+ ---
9
+
1
10
  ## v2026.2.19 (2026-02-17)
2
11
 
3
12
  ### Fixes
@@ -31,6 +31,33 @@ const MOONSHOT_DEFAULT_COST = {
31
31
  cacheRead: 0,
32
32
  cacheWrite: 0,
33
33
  };
34
+ // Z.AI (ZhipuAI / GLM) — OpenAI-compatible API
35
+ // Docs: https://docs.z.ai/ | Base: https://open.bigmodel.cn/api/paas/v4/
36
+ const ZAI_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
37
+ const ZAI_DEFAULT_MODEL_ID = "GLM-4.7-Flash";
38
+ const ZAI_DEFAULT_CONTEXT_WINDOW = 128000;
39
+ const ZAI_DEFAULT_MAX_TOKENS = 8192;
40
+ // GLM-4.7-Flash and GLM-4.5-Flash are completely free
41
+ const ZAI_FREE_COST = {
42
+ input: 0,
43
+ output: 0,
44
+ cacheRead: 0,
45
+ cacheWrite: 0,
46
+ };
47
+ // GLM-4.7-FlashX: $0.07/$0.4 per 1M tokens
48
+ const ZAI_FLASHX_COST = {
49
+ input: 0.07,
50
+ output: 0.4,
51
+ cacheRead: 0,
52
+ cacheWrite: 0,
53
+ };
54
+ // GLM-4.7: $0.6/$2.2 per 1M tokens
55
+ const ZAI_PREMIUM_COST = {
56
+ input: 0.6,
57
+ output: 2.2,
58
+ cacheRead: 0,
59
+ cacheWrite: 0,
60
+ };
34
61
  const XIAOMI_BASE_URL = "https://api.xiaomimimo.com/anthropic";
35
62
  export const XIAOMI_DEFAULT_MODEL_ID = "mimo-v2-flash";
36
63
  const XIAOMI_DEFAULT_CONTEXT_WINDOW = 262144;
@@ -256,6 +283,59 @@ function buildMoonshotProvider() {
256
283
  ],
257
284
  };
258
285
  }
286
+ function buildZaiProvider() {
287
+ return {
288
+ baseUrl: ZAI_BASE_URL,
289
+ api: "openai-completions",
290
+ models: [
291
+ {
292
+ id: ZAI_DEFAULT_MODEL_ID,
293
+ name: "GLM 4.7 Flash",
294
+ reasoning: false,
295
+ input: ["text"],
296
+ cost: ZAI_FREE_COST,
297
+ contextWindow: ZAI_DEFAULT_CONTEXT_WINDOW,
298
+ maxTokens: ZAI_DEFAULT_MAX_TOKENS,
299
+ },
300
+ {
301
+ id: "GLM-4.5-Flash",
302
+ name: "GLM 4.5 Flash",
303
+ reasoning: false,
304
+ input: ["text"],
305
+ cost: ZAI_FREE_COST,
306
+ contextWindow: ZAI_DEFAULT_CONTEXT_WINDOW,
307
+ maxTokens: ZAI_DEFAULT_MAX_TOKENS,
308
+ },
309
+ {
310
+ id: "GLM-4.6V-Flash",
311
+ name: "GLM 4.6V Flash (Vision)",
312
+ reasoning: false,
313
+ input: ["text", "image"],
314
+ cost: ZAI_FREE_COST,
315
+ contextWindow: ZAI_DEFAULT_CONTEXT_WINDOW,
316
+ maxTokens: ZAI_DEFAULT_MAX_TOKENS,
317
+ },
318
+ {
319
+ id: "GLM-4.7-FlashX",
320
+ name: "GLM 4.7 FlashX",
321
+ reasoning: false,
322
+ input: ["text"],
323
+ cost: ZAI_FLASHX_COST,
324
+ contextWindow: ZAI_DEFAULT_CONTEXT_WINDOW,
325
+ maxTokens: ZAI_DEFAULT_MAX_TOKENS,
326
+ },
327
+ {
328
+ id: "GLM-4.7",
329
+ name: "GLM 4.7",
330
+ reasoning: false,
331
+ input: ["text"],
332
+ cost: ZAI_PREMIUM_COST,
333
+ contextWindow: ZAI_DEFAULT_CONTEXT_WINDOW,
334
+ maxTokens: ZAI_DEFAULT_MAX_TOKENS,
335
+ },
336
+ ],
337
+ };
338
+ }
259
339
  function buildQwenPortalProvider() {
260
340
  return {
261
341
  baseUrl: QWEN_PORTAL_BASE_URL,
@@ -402,6 +482,11 @@ export async function resolveImplicitProviders(params) {
402
482
  if (moonshotKey) {
403
483
  providers.moonshot = { ...buildMoonshotProvider(), apiKey: moonshotKey };
404
484
  }
485
+ const zaiKey = resolveEnvApiKeyVarName("zai") ??
486
+ resolveApiKeyFromProfiles({ provider: "zai", store: authStore });
487
+ if (zaiKey) {
488
+ providers.zai = { ...buildZaiProvider(), apiKey: zaiKey };
489
+ }
405
490
  const syntheticKey = resolveEnvApiKeyVarName("synthetic") ??
406
491
  resolveApiKeyFromProfiles({ provider: "synthetic", store: authStore });
407
492
  if (syntheticKey) {
@@ -5,6 +5,8 @@ import { createBrowserTool } from "./tools/browser-tool.js";
5
5
  import { createCanvasTool } from "./tools/canvas-tool.js";
6
6
  import { createCronTool } from "./tools/cron-tool.js";
7
7
  import { createGatewayTool } from "./tools/gateway-tool.js";
8
+ import { createDeepResearchTool } from "./tools/deep-research-tool.js";
9
+ import { createImageGenerateTool } from "./tools/image-generate-tool.js";
8
10
  import { createImageTool } from "./tools/image-tool.js";
9
11
  import { createMessageTool } from "./tools/message-tool.js";
10
12
  import { createNodesTool } from "./tools/nodes-tool.js";
@@ -30,6 +32,11 @@ export function createOpenClawTools(options) {
30
32
  modelHasVision: options?.modelHasVision,
31
33
  })
32
34
  : null;
35
+ const imageGenerateTool = createImageGenerateTool({
36
+ config: options?.config,
37
+ agentDir: options?.agentDir,
38
+ sandboxRoot: options?.sandboxRoot,
39
+ });
33
40
  const webSearchTool = createWebSearchTool({
34
41
  config: options?.config,
35
42
  sandboxed: options?.sandboxed,
@@ -113,7 +120,16 @@ export function createOpenClawTools(options) {
113
120
  ...(webSearchTool ? [webSearchTool] : []),
114
121
  ...(webFetchTool ? [webFetchTool] : []),
115
122
  ...(imageTool ? [imageTool] : []),
123
+ ...(imageGenerateTool ? [imageGenerateTool] : []),
116
124
  ];
125
+ // Z.AI-powered research tool (gracefully absent when no key configured)
126
+ const deepResearchTool = createDeepResearchTool({
127
+ config: options?.config,
128
+ agentDir: options?.agentDir,
129
+ sandboxRoot: options?.sandboxRoot,
130
+ });
131
+ if (deepResearchTool)
132
+ tools.push(deepResearchTool);
117
133
  const pluginTools = resolvePluginTools({
118
134
  context: {
119
135
  config: options?.config,
@@ -5,6 +5,8 @@ import { createBrowserTool } from "./tools/browser-tool.js";
5
5
  import { createCanvasTool } from "./tools/canvas-tool.js";
6
6
  import { createCronTool } from "./tools/cron-tool.js";
7
7
  import { createGatewayTool } from "./tools/gateway-tool.js";
8
+ import { createDeepResearchTool } from "./tools/deep-research-tool.js";
9
+ import { createImageGenerateTool } from "./tools/image-generate-tool.js";
8
10
  import { createImageTool } from "./tools/image-tool.js";
9
11
  import { createMessageTool } from "./tools/message-tool.js";
10
12
  import { createNodesTool } from "./tools/nodes-tool.js";
@@ -24,6 +26,11 @@ export function createPoolBotTools(options) {
24
26
  modelHasVision: options?.modelHasVision,
25
27
  })
26
28
  : null;
29
+ const imageGenerateTool = createImageGenerateTool({
30
+ config: options?.config,
31
+ agentDir: options?.agentDir,
32
+ sandboxRoot: options?.sandboxRoot,
33
+ });
27
34
  const webSearchTool = createWebSearchTool({
28
35
  config: options?.config,
29
36
  sandboxed: options?.sandboxed,
@@ -104,7 +111,16 @@ export function createPoolBotTools(options) {
104
111
  ...(webSearchTool ? [webSearchTool] : []),
105
112
  ...(webFetchTool ? [webFetchTool] : []),
106
113
  ...(imageTool ? [imageTool] : []),
114
+ ...(imageGenerateTool ? [imageGenerateTool] : []),
107
115
  ];
116
+ // Z.AI-powered research tool (gracefully absent when no key configured)
117
+ const deepResearchTool = createDeepResearchTool({
118
+ config: options?.config,
119
+ agentDir: options?.agentDir,
120
+ sandboxRoot: options?.sandboxRoot,
121
+ });
122
+ if (deepResearchTool)
123
+ tools.push(deepResearchTool);
108
124
  const pluginTools = resolvePluginTools({
109
125
  context: {
110
126
  config: options?.config,
@@ -0,0 +1,225 @@
1
+ import { Type } from "@sinclair/typebox";
2
+ import { ensureAuthProfileStore, listProfilesForProvider } from "../auth-profiles.js";
3
+ import { resolveEnvApiKey } from "../model-auth.js";
4
+ import { jsonResult, readNumberParam, readStringParam } from "./common.js";
5
+ // ---------------------------------------------------------------------------
6
+ // Constants
7
+ // ---------------------------------------------------------------------------
8
+ const ZAI_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
9
+ /**
10
+ * GLM-4.7-Flash is FREE and supports the built-in web_search tool ($0.01/use).
11
+ * Perfect for deep research: model synthesis is free, search costs are negligible.
12
+ */
13
+ const RESEARCH_MODEL = "GLM-4.7-Flash";
14
+ // ---------------------------------------------------------------------------
15
+ // Schema
16
+ // ---------------------------------------------------------------------------
17
+ const DeepResearchSchema = Type.Object({
18
+ topic: Type.String({
19
+ description: "The research topic or question. Be specific — include what aspects to cover, time range, and scope.",
20
+ }),
21
+ depth: Type.Optional(Type.String({
22
+ description: 'Research depth: "shallow" (quick overview, 1 search), "standard" (balanced, 2-3 searches, default), or "deep" (thorough, 4-5 searches with follow-ups).',
23
+ })),
24
+ language: Type.Optional(Type.String({
25
+ description: 'Language for the research report. Default: "en" (English). Use ISO 639-1 codes (e.g. "pt", "zh", "ja").',
26
+ })),
27
+ max_sources: Type.Optional(Type.Number({
28
+ description: "Maximum number of sources to include. Default: 10. Range: 1–20.",
29
+ })),
30
+ });
31
+ // ---------------------------------------------------------------------------
32
+ // Helpers
33
+ // ---------------------------------------------------------------------------
34
+ /**
35
+ * Resolve the Z.AI API key from environment or auth profiles.
36
+ * Same logic as image-generate-tool.ts — kept as a local copy to avoid
37
+ * cross-tool coupling.
38
+ */
39
+ function resolveZaiApiKey(agentDir) {
40
+ const envKey = resolveEnvApiKey("zai");
41
+ if (envKey?.apiKey)
42
+ return envKey.apiKey;
43
+ const store = ensureAuthProfileStore(agentDir, { allowKeychainPrompt: false });
44
+ const profileIds = listProfilesForProvider(store, "zai");
45
+ for (const id of profileIds) {
46
+ const cred = store.profiles[id];
47
+ if (!cred)
48
+ continue;
49
+ if (cred.type === "api_key")
50
+ return cred.key;
51
+ if (cred.type === "token")
52
+ return cred.token;
53
+ }
54
+ return null;
55
+ }
56
+ function normalizeDepth(raw) {
57
+ if (!raw)
58
+ return "standard";
59
+ const lower = raw.trim().toLowerCase();
60
+ if (lower === "shallow" || lower === "quick" || lower === "brief")
61
+ return "shallow";
62
+ if (lower === "deep" || lower === "thorough" || lower === "comprehensive")
63
+ return "deep";
64
+ return "standard";
65
+ }
66
+ /**
67
+ * Build the system prompt for the research agent.
68
+ * Tailored to the requested depth and language.
69
+ */
70
+ function buildSystemPrompt(depth, language, maxSources) {
71
+ const depthGuidance = {
72
+ shallow: "Provide a concise overview. Use 1 web search to gather key facts. Keep the report brief — 3-5 paragraphs max.",
73
+ standard: "Provide a balanced research report. Use 2-3 web searches to gather diverse perspectives. Structure with clear sections and cite sources.",
74
+ deep: "Provide a thorough, in-depth research report. Use 4-5 web searches covering different angles (academic, news, expert opinions, primary sources). Include nuanced analysis, counterarguments, and comprehensive source citations.",
75
+ };
76
+ const langInstruction = language === "en"
77
+ ? ""
78
+ : `\nWrite the entire report in ${language} language. Search queries can be in any language for best results, but the final output must be in ${language}.`;
79
+ return [
80
+ "You are a professional research analyst. Your task is to produce a well-structured research report on the given topic.",
81
+ "",
82
+ `Research depth: ${depth}.`,
83
+ depthGuidance[depth],
84
+ "",
85
+ "Guidelines:",
86
+ "- Use the web_search tool to find current, reliable information.",
87
+ "- Cross-reference multiple sources for accuracy.",
88
+ "- Cite all sources with titles and URLs.",
89
+ `- Include up to ${maxSources} sources.`,
90
+ "- Structure the report with: Summary, Key Findings, Detailed Analysis, Sources.",
91
+ "- Be factual and objective. Clearly distinguish facts from analysis.",
92
+ "- Include dates for time-sensitive information.",
93
+ langInstruction,
94
+ ]
95
+ .filter((line) => line !== undefined)
96
+ .join("\n");
97
+ }
98
+ /**
99
+ * Call Z.AI chat completions with web_search tool enabled.
100
+ */
101
+ async function callZaiResearch(params) {
102
+ const systemPrompt = buildSystemPrompt(params.depth, params.language, params.maxSources);
103
+ const body = {
104
+ model: RESEARCH_MODEL,
105
+ messages: [
106
+ { role: "system", content: systemPrompt },
107
+ { role: "user", content: params.topic },
108
+ ],
109
+ tools: [
110
+ {
111
+ type: "web_search",
112
+ web_search: {
113
+ enable: true,
114
+ search_result: true, // include search results in response metadata
115
+ },
116
+ },
117
+ ],
118
+ temperature: 0.3, // lower temp for factual research
119
+ stream: false,
120
+ };
121
+ const response = await fetch(`${ZAI_BASE_URL}/chat/completions`, {
122
+ method: "POST",
123
+ headers: {
124
+ Authorization: `Bearer ${params.apiKey}`,
125
+ "Content-Type": "application/json",
126
+ },
127
+ body: JSON.stringify(body),
128
+ });
129
+ if (!response.ok) {
130
+ const errorText = await response.text().catch(() => "unknown error");
131
+ throw new Error(`Z.AI research request failed (HTTP ${response.status}): ${errorText}`);
132
+ }
133
+ const result = (await response.json());
134
+ if (result.error) {
135
+ throw new Error(`Z.AI research error: ${result.error.message} (${result.error.code})`);
136
+ }
137
+ const content = result.choices?.[0]?.message?.content;
138
+ if (!content) {
139
+ throw new Error("Z.AI research returned no content.");
140
+ }
141
+ return {
142
+ report: content,
143
+ sources: result.web_search ?? [],
144
+ usage: {
145
+ promptTokens: result.usage?.prompt_tokens ?? 0,
146
+ completionTokens: result.usage?.completion_tokens ?? 0,
147
+ totalTokens: result.usage?.total_tokens ?? 0,
148
+ },
149
+ };
150
+ }
151
+ // ---------------------------------------------------------------------------
152
+ // Factory
153
+ // ---------------------------------------------------------------------------
154
+ /**
155
+ * Create the deep-research tool powered by Z.AI GLM-4.7-Flash + web_search.
156
+ *
157
+ * Cost breakdown:
158
+ * - GLM-4.7-Flash inference: FREE
159
+ * - web_search: $0.01/use (1-5 searches depending on depth)
160
+ * - Total per research query: ~$0.01–$0.05
161
+ *
162
+ * Returns null if no Z.AI API key is available (graceful degradation).
163
+ */
164
+ export function createDeepResearchTool(options) {
165
+ const agentDir = options?.agentDir?.trim();
166
+ if (!agentDir)
167
+ return null;
168
+ const apiKey = resolveZaiApiKey(agentDir);
169
+ if (!apiKey)
170
+ return null;
171
+ return {
172
+ label: "Deep Research",
173
+ name: "deep_research",
174
+ description: [
175
+ "Perform deep web research on any topic using Z.AI GLM-4.7-Flash + web search.",
176
+ "Returns a structured research report with citations and sources.",
177
+ "",
178
+ "Depth options:",
179
+ '- "shallow": Quick overview (~$0.01, 1 search)',
180
+ '- "standard": Balanced report (~$0.03, 2-3 searches)',
181
+ '- "deep": Thorough analysis (~$0.05, 4-5 searches)',
182
+ "",
183
+ "Cost: GLM-4.7-Flash is FREE; only web search costs $0.01/use.",
184
+ "Best for: current events, technical topics, market research, fact-checking.",
185
+ ].join("\n"),
186
+ parameters: DeepResearchSchema,
187
+ execute: async (_toolCallId, args) => {
188
+ const params = args;
189
+ const topic = readStringParam(params, "topic", { required: true });
190
+ const depthRaw = readStringParam(params, "depth");
191
+ const language = readStringParam(params, "language") ?? "en";
192
+ const maxSourcesRaw = readNumberParam(params, "max_sources", { integer: true });
193
+ const depth = normalizeDepth(depthRaw);
194
+ const maxSources = Math.max(1, Math.min(20, maxSourcesRaw ?? 10));
195
+ const result = await callZaiResearch({
196
+ apiKey,
197
+ topic,
198
+ depth,
199
+ language,
200
+ maxSources,
201
+ });
202
+ // Build a structured response
203
+ const sourcesList = result.sources.map((s, i) => ({
204
+ index: i + 1,
205
+ title: s.title,
206
+ url: s.link,
207
+ snippet: s.content?.slice(0, 200),
208
+ }));
209
+ const estimatedSearches = depth === "shallow" ? 1 : depth === "standard" ? 3 : 5;
210
+ const estimatedCost = `~$${(estimatedSearches * 0.01).toFixed(2)}`;
211
+ return jsonResult({
212
+ report: result.report,
213
+ sources: sourcesList,
214
+ metadata: {
215
+ model: RESEARCH_MODEL,
216
+ depth,
217
+ language,
218
+ sourceCount: sourcesList.length,
219
+ estimatedCost,
220
+ tokens: result.usage,
221
+ },
222
+ });
223
+ },
224
+ };
225
+ }
@@ -0,0 +1,235 @@
1
+ import fs from "node:fs/promises";
2
+ import path from "node:path";
3
+ import { Type } from "@sinclair/typebox";
4
+ import { resolveUserPath } from "../../utils.js";
5
+ import { ensureAuthProfileStore, listProfilesForProvider } from "../auth-profiles.js";
6
+ import { resolveEnvApiKey } from "../model-auth.js";
7
+ import { imageResult, readStringParam } from "./common.js";
8
+ // Z.AI image generation models
9
+ const ZAI_BASE_URL = "https://open.bigmodel.cn/api/paas/v4";
10
+ const ImageGenerateSchema = Type.Object({
11
+ prompt: Type.String({
12
+ description: "Description of the image to generate. Be descriptive — include subject, style, lighting, mood, composition, and artistic direction.",
13
+ }),
14
+ model: Type.Optional(Type.String({
15
+ description: 'Image generation model: "cogview-4" (default, $0.01/img), "cogview-4-flash" (faster, $0.004/img), or "glm-image" (highest quality, $0.015/img). Default: cogview-4.',
16
+ })),
17
+ size: Type.Optional(Type.String({
18
+ description: 'Image dimensions (WxH). Supported: "1024x1024" (default), "768x1344", "864x1152", "1344x768", "1152x864", "1440x720", "720x1440". Must be multiples of 32, range 512–2048.',
19
+ })),
20
+ quality: Type.Optional(Type.String({
21
+ description: 'Image quality: "standard" (default) or "hd" (higher detail, slower).',
22
+ })),
23
+ save_path: Type.Optional(Type.String({
24
+ description: "File path to save the generated image. If omitted, saves to the agent working directory with an auto-generated name.",
25
+ })),
26
+ });
27
+ /**
28
+ * Resolve the Z.AI API key from environment or auth profiles.
29
+ */
30
+ function resolveZaiApiKey(agentDir) {
31
+ const envKey = resolveEnvApiKey("zai");
32
+ if (envKey?.apiKey)
33
+ return envKey.apiKey;
34
+ const store = ensureAuthProfileStore(agentDir, { allowKeychainPrompt: false });
35
+ const profileIds = listProfilesForProvider(store, "zai");
36
+ for (const id of profileIds) {
37
+ const cred = store.profiles[id];
38
+ if (!cred)
39
+ continue;
40
+ if (cred.type === "api_key")
41
+ return cred.key;
42
+ if (cred.type === "token")
43
+ return cred.token;
44
+ }
45
+ return null;
46
+ }
47
+ /**
48
+ * Validate and normalize the image model identifier.
49
+ */
50
+ function normalizeImageModel(raw) {
51
+ if (!raw)
52
+ return "cogview-4";
53
+ const lower = raw.trim().toLowerCase();
54
+ const validModels = [
55
+ "cogview-4",
56
+ "cogview-4-250304",
57
+ "cogview-4-flash",
58
+ "glm-image",
59
+ ];
60
+ if (validModels.includes(lower)) {
61
+ return lower;
62
+ }
63
+ // Fuzzy matching for common variants
64
+ if (lower.includes("flash"))
65
+ return "cogview-4-flash";
66
+ if (lower.includes("glm") || lower.includes("flagship"))
67
+ return "glm-image";
68
+ if (lower.includes("cog") || lower.includes("view"))
69
+ return "cogview-4";
70
+ return "cogview-4";
71
+ }
72
+ /**
73
+ * Validate image size. Z.AI accepts WxH where both dimensions are 512–2048 and multiples of 32.
74
+ */
75
+ function normalizeImageSize(raw) {
76
+ if (!raw)
77
+ return "1024x1024";
78
+ const trimmed = raw.trim().toLowerCase();
79
+ // Parse WxH format
80
+ const match = trimmed.match(/^(\d+)\s*[x×]\s*(\d+)$/);
81
+ if (!match)
82
+ return "1024x1024";
83
+ let w = Number.parseInt(match[1], 10);
84
+ let h = Number.parseInt(match[2], 10);
85
+ // Clamp to valid range
86
+ w = Math.max(512, Math.min(2048, w));
87
+ h = Math.max(512, Math.min(2048, h));
88
+ // Round to nearest multiple of 32
89
+ w = Math.round(w / 32) * 32;
90
+ h = Math.round(h / 32) * 32;
91
+ return `${w}x${h}`;
92
+ }
93
+ /**
94
+ * Call the Z.AI image generation API.
95
+ */
96
+ async function generateImage(params) {
97
+ const body = {
98
+ model: params.model,
99
+ prompt: params.prompt,
100
+ size: params.size,
101
+ quality: params.quality,
102
+ n: 1,
103
+ };
104
+ const response = await fetch(`${ZAI_BASE_URL}/images/generations`, {
105
+ method: "POST",
106
+ headers: {
107
+ Authorization: `Bearer ${params.apiKey}`,
108
+ "Content-Type": "application/json",
109
+ },
110
+ body: JSON.stringify(body),
111
+ });
112
+ if (!response.ok) {
113
+ const errorText = await response.text().catch(() => "unknown error");
114
+ throw new Error(`Z.AI image generation failed (HTTP ${response.status}): ${errorText}`);
115
+ }
116
+ const result = (await response.json());
117
+ if (result.error) {
118
+ throw new Error(`Z.AI image generation error: ${result.error.message} (${result.error.code})`);
119
+ }
120
+ if (!result.data?.[0]) {
121
+ throw new Error("Z.AI image generation returned no images.");
122
+ }
123
+ const imageData = result.data[0];
124
+ const imageUrl = imageData.url ?? imageData.b64_json;
125
+ if (!imageUrl) {
126
+ throw new Error("Z.AI image generation returned no image URL or data.");
127
+ }
128
+ return {
129
+ imageUrl: imageData.url ?? "",
130
+ revisedPrompt: imageData.revised_prompt,
131
+ };
132
+ }
133
+ /**
134
+ * Download an image from URL and return its buffer.
135
+ */
136
+ async function downloadImage(url) {
137
+ const response = await fetch(url);
138
+ if (!response.ok) {
139
+ throw new Error(`Failed to download generated image (HTTP ${response.status})`);
140
+ }
141
+ const arrayBuffer = await response.arrayBuffer();
142
+ return Buffer.from(arrayBuffer);
143
+ }
144
+ /**
145
+ * Generate a unique filename for saved images.
146
+ */
147
+ function generateImageFilename(model) {
148
+ const timestamp = new Date().toISOString().replace(/[:.]/g, "-").slice(0, 19);
149
+ return `generated-${model}-${timestamp}.png`;
150
+ }
151
+ /**
152
+ * Create the image-generate tool for Z.AI CogView-4 / GLM-Image.
153
+ *
154
+ * Supports:
155
+ * - CogView-4: $0.01/image, general purpose
156
+ * - CogView-4-Flash: $0.004/image, faster generation
157
+ * - GLM-Image: $0.015/image, highest quality
158
+ */
159
+ export function createImageGenerateTool(options) {
160
+ const agentDir = options?.agentDir?.trim();
161
+ if (!agentDir)
162
+ return null;
163
+ // Check if Z.AI API key is available
164
+ const apiKey = resolveZaiApiKey(agentDir);
165
+ if (!apiKey)
166
+ return null;
167
+ return {
168
+ label: "Image Generate",
169
+ name: "image_generate",
170
+ description: [
171
+ "Generate images from text descriptions using Z.AI (ZhipuAI) image models.",
172
+ "Available models:",
173
+ '- "cogview-4" (default): General-purpose, $0.01/image',
174
+ '- "cogview-4-flash": Faster generation, $0.004/image',
175
+ '- "glm-image": Highest quality, $0.015/image',
176
+ "",
177
+ "Write detailed, descriptive prompts for best results.",
178
+ "The generated image is saved to disk and returned as MEDIA: path.",
179
+ "Copy the MEDIA line exactly when presenting the image to the user.",
180
+ ].join("\n"),
181
+ parameters: ImageGenerateSchema,
182
+ execute: async (_toolCallId, args) => {
183
+ const params = args;
184
+ const prompt = readStringParam(params, "prompt", { required: true });
185
+ const modelRaw = readStringParam(params, "model");
186
+ const sizeRaw = readStringParam(params, "size");
187
+ const qualityRaw = readStringParam(params, "quality");
188
+ const savePathRaw = readStringParam(params, "save_path");
189
+ const model = normalizeImageModel(modelRaw);
190
+ const size = normalizeImageSize(sizeRaw);
191
+ const quality = qualityRaw?.toLowerCase() === "hd" ? "hd" : "standard";
192
+ // Generate the image
193
+ const result = await generateImage({
194
+ apiKey,
195
+ model,
196
+ prompt,
197
+ size,
198
+ quality,
199
+ });
200
+ // Download the generated image
201
+ const imageBuffer = await downloadImage(result.imageUrl);
202
+ const base64 = imageBuffer.toString("base64");
203
+ // Determine save path
204
+ const filename = generateImageFilename(model);
205
+ const savePath = savePathRaw
206
+ ? savePathRaw.startsWith("~")
207
+ ? resolveUserPath(savePathRaw)
208
+ : savePathRaw
209
+ : path.join(agentDir, filename);
210
+ // Ensure directory exists and write
211
+ await fs.mkdir(path.dirname(savePath), { recursive: true });
212
+ await fs.writeFile(savePath, imageBuffer);
213
+ return imageResult({
214
+ label: "image_generate",
215
+ path: savePath,
216
+ base64,
217
+ mimeType: "image/png",
218
+ extraText: [
219
+ `MEDIA:${savePath}`,
220
+ result.revisedPrompt ? `Revised prompt: ${result.revisedPrompt}` : "",
221
+ ]
222
+ .filter(Boolean)
223
+ .join("\n"),
224
+ details: {
225
+ model,
226
+ size,
227
+ quality,
228
+ prompt: prompt.slice(0, 200),
229
+ revisedPrompt: result.revisedPrompt,
230
+ cost: model === "glm-image" ? "$0.015" : model === "cogview-4-flash" ? "$0.004" : "$0.01",
231
+ },
232
+ });
233
+ },
234
+ };
235
+ }
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "2026.2.19",
3
- "commit": "8491aa098a0215c3b1d5e8c853c27400ec2924e8",
4
- "builtAt": "2026-02-18T00:08:38.731Z"
2
+ "version": "2026.2.20",
3
+ "commit": "d55929d079a2231e96e962d49e52c54efb661a80",
4
+ "builtAt": "2026-02-18T03:33:36.817Z"
5
5
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@poolzin/pool-bot",
3
- "version": "2026.2.19",
3
+ "version": "2026.2.20",
4
4
  "description": "🎱 Pool Bot - AI assistant with PLCODE integrations",
5
5
  "keywords": [],
6
6
  "license": "MIT",