@defai.digital/ax-cli 0.0.34 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +749 -272
- package/dist/agent/grok-agent.d.ts +7 -1
- package/dist/agent/grok-agent.js +22 -9
- package/dist/agent/grok-agent.js.map +1 -1
- package/dist/commands/mcp.js +1 -1
- package/dist/commands/mcp.js.map +1 -1
- package/dist/constants.d.ts +116 -0
- package/dist/constants.js +115 -0
- package/dist/constants.js.map +1 -0
- package/dist/grok/client.d.ts +133 -2
- package/dist/grok/client.js +173 -16
- package/dist/grok/client.js.map +1 -1
- package/dist/grok/types.d.ts +291 -0
- package/dist/grok/types.js +127 -0
- package/dist/grok/types.js.map +1 -0
- package/dist/index.js +3 -3
- package/dist/index.js.map +1 -1
- package/dist/mcp/client.d.ts +3 -8
- package/dist/mcp/client.js +17 -9
- package/dist/mcp/client.js.map +1 -1
- package/dist/mcp/config.d.ts +1 -1
- package/dist/mcp/config.js +24 -4
- package/dist/mcp/config.js.map +1 -1
- package/dist/mcp/transports.js +1 -1
- package/dist/mcp/transports.js.map +1 -1
- package/dist/schemas/api-schemas.d.ts +569 -0
- package/dist/schemas/api-schemas.js +116 -0
- package/dist/schemas/api-schemas.js.map +1 -0
- package/dist/schemas/confirmation-schemas.d.ts +60 -0
- package/dist/schemas/confirmation-schemas.js +41 -0
- package/dist/schemas/confirmation-schemas.js.map +1 -0
- package/dist/schemas/index-unified.d.ts +12 -0
- package/dist/schemas/index-unified.js +17 -0
- package/dist/schemas/index-unified.js.map +1 -0
- package/dist/schemas/index.d.ts +229 -37
- package/dist/schemas/settings-schemas.d.ts +179 -0
- package/dist/schemas/settings-schemas.js +52 -0
- package/dist/schemas/settings-schemas.js.map +1 -0
- package/dist/schemas/tool-schemas.d.ts +241 -0
- package/dist/schemas/tool-schemas.js +79 -0
- package/dist/schemas/tool-schemas.js.map +1 -0
- package/dist/tools/search.js +2 -2
- package/dist/tools/search.js.map +1 -1
- package/dist/ui/components/api-key-input.js +2 -2
- package/dist/ui/components/api-key-input.js.map +1 -1
- package/dist/ui/components/chat-history.js +2 -0
- package/dist/ui/components/chat-history.js.map +1 -1
- package/dist/ui/components/chat-interface.js +31 -1
- package/dist/ui/components/chat-interface.js.map +1 -1
- package/dist/ui/components/mcp-status.js +1 -1
- package/dist/ui/components/mcp-status.js.map +1 -1
- package/dist/ui/components/reasoning-display.d.ts +109 -0
- package/dist/ui/components/reasoning-display.js +110 -0
- package/dist/ui/components/reasoning-display.js.map +1 -0
- package/dist/ui/shared/max-sized-box.js +1 -1
- package/dist/ui/shared/max-sized-box.js.map +1 -1
- package/dist/utils/cache.d.ts +75 -0
- package/dist/utils/cache.js +137 -0
- package/dist/utils/cache.js.map +1 -0
- package/dist/utils/confirmation-service.js +2 -2
- package/dist/utils/confirmation-service.js.map +1 -1
- package/dist/utils/error-handler.d.ts +43 -0
- package/dist/utils/error-handler.js +70 -0
- package/dist/utils/error-handler.js.map +1 -0
- package/dist/utils/index.d.ts +13 -0
- package/dist/utils/index.js +23 -0
- package/dist/utils/index.js.map +1 -0
- package/dist/utils/path-validator.d.ts +30 -0
- package/dist/utils/path-validator.js +67 -0
- package/dist/utils/path-validator.js.map +1 -0
- package/dist/utils/performance.d.ts +72 -0
- package/dist/utils/performance.js +114 -0
- package/dist/utils/performance.js.map +1 -0
- package/dist/utils/settings-manager.d.ts +2 -18
- package/dist/utils/settings-manager.js +19 -6
- package/dist/utils/settings-manager.js.map +1 -1
- package/dist/utils/token-counter.d.ts +10 -1
- package/dist/utils/token-counter.js +27 -6
- package/dist/utils/token-counter.js.map +1 -1
- package/eslint.config.js +60 -0
- package/package.json +24 -6
- package/vitest.config.ts +20 -0
- package/.automatosx/agents/aerospace-scientist.yaml +0 -159
- package/.automatosx/agents/architecture.yaml +0 -244
- package/.automatosx/agents/backend.yaml +0 -172
- package/.automatosx/agents/ceo.yaml +0 -105
- package/.automatosx/agents/creative-marketer.yaml +0 -173
- package/.automatosx/agents/cto.yaml +0 -118
- package/.automatosx/agents/data-scientist.yaml +0 -200
- package/.automatosx/agents/data.yaml +0 -106
- package/.automatosx/agents/design.yaml +0 -115
- package/.automatosx/agents/devops.yaml +0 -124
- package/.automatosx/agents/frontend.yaml +0 -171
- package/.automatosx/agents/fullstack.yaml +0 -172
- package/.automatosx/agents/mobile.yaml +0 -185
- package/.automatosx/agents/product.yaml +0 -103
- package/.automatosx/agents/quality.yaml +0 -93
- package/.automatosx/agents/quantum-engineer.yaml +0 -167
- package/.automatosx/agents/researcher.yaml +0 -122
- package/.automatosx/agents/security.yaml +0 -115
- package/.automatosx/agents/standard.yaml +0 -214
- package/.automatosx/agents/writer.yaml +0 -122
- package/.automatosx/feature-flags.json +0 -13
- package/.automatosx/memory/memory.db +0 -0
- package/.automatosx/providers/README.md +0 -117
- package/.automatosx/providers/grok-zai.yaml.template +0 -61
- package/.automatosx/providers/grok.yaml.template +0 -71
- package/.automatosx/status/backend-1763517593334-85037.json +0 -9
- package/.automatosx/status/quality-1763516867087-82043.json +0 -9
- package/.automatosx/status/quality-1763516976722-84817.json +0 -9
- package/.automatosx/status/security-1763517871950-87357.json +0 -9
- package/.automatosx/teams/business.yaml +0 -56
- package/.automatosx/teams/core.yaml +0 -60
- package/.automatosx/teams/design.yaml +0 -58
- package/.automatosx/teams/engineering.yaml +0 -69
- package/.automatosx/teams/research.yaml +0 -56
- package/.automatosx/templates/analyst.yaml +0 -60
- package/.automatosx/templates/assistant.yaml +0 -48
- package/.automatosx/templates/basic-agent.yaml +0 -28
- package/.automatosx/templates/code-reviewer.yaml +0 -52
- package/.automatosx/templates/debugger.yaml +0 -63
- package/.automatosx/templates/designer.yaml +0 -69
- package/.automatosx/templates/developer.yaml +0 -60
- package/.automatosx/templates/fullstack-developer.yaml +0 -395
- package/.automatosx/templates/qa-specialist.yaml +0 -71
- package/.claude/mcp/automatosx.json +0 -244
- package/.claude/settings.local.json +0 -34
- package/.grok/settings.json +0 -37
- package/automatosx/PRD/README.md +0 -9
- package/automatosx/tmp/README.md +0 -10
- package/automatosx.config.json +0 -333
package/dist/grok/client.js
CHANGED
|
@@ -1,58 +1,210 @@
|
|
|
1
1
|
import OpenAI from "openai";
|
|
2
|
+
import { safeValidateGrokResponse } from "../schemas/api-schemas.js";
|
|
3
|
+
import { ErrorCategory, createErrorMessage } from "../utils/error-handler.js";
|
|
4
|
+
import { GLM_MODELS, DEFAULT_MODEL } from "../constants.js";
|
|
5
|
+
/**
|
|
6
|
+
* GrokClient - Enhanced client for GLM-4.6 API
|
|
7
|
+
*
|
|
8
|
+
* Supports advanced features including:
|
|
9
|
+
* - Thinking/reasoning mode
|
|
10
|
+
* - Configurable temperature (0.6-1.0 for GLM-4.6)
|
|
11
|
+
* - Extended context windows (up to 200K tokens)
|
|
12
|
+
* - Multiple model support
|
|
13
|
+
*/
|
|
2
14
|
export class GrokClient {
|
|
3
15
|
client;
|
|
4
|
-
currentModel
|
|
16
|
+
currentModel;
|
|
5
17
|
defaultMaxTokens;
|
|
18
|
+
defaultTemperature;
|
|
6
19
|
constructor(apiKey, model, baseURL) {
|
|
7
20
|
this.client = new OpenAI({
|
|
8
21
|
apiKey,
|
|
9
22
|
baseURL: baseURL || process.env.GROK_BASE_URL || "https://api.x.ai/v1",
|
|
10
23
|
timeout: 360000,
|
|
11
24
|
});
|
|
25
|
+
// Set model with validation
|
|
26
|
+
this.currentModel = this.validateModel(model || DEFAULT_MODEL);
|
|
27
|
+
// Get model configuration
|
|
28
|
+
const modelConfig = GLM_MODELS[this.currentModel];
|
|
29
|
+
// Set defaults from environment or model config
|
|
12
30
|
const envMax = Number(process.env.GROK_MAX_TOKENS);
|
|
13
|
-
this.defaultMaxTokens = Number.isFinite(envMax) && envMax > 0
|
|
14
|
-
|
|
15
|
-
|
|
31
|
+
this.defaultMaxTokens = Number.isFinite(envMax) && envMax > 0
|
|
32
|
+
? Math.min(envMax, modelConfig.maxOutputTokens)
|
|
33
|
+
: modelConfig.defaultMaxTokens;
|
|
34
|
+
const envTemp = Number(process.env.GROK_TEMPERATURE);
|
|
35
|
+
this.defaultTemperature = Number.isFinite(envTemp) &&
|
|
36
|
+
envTemp >= modelConfig.temperatureRange.min &&
|
|
37
|
+
envTemp <= modelConfig.temperatureRange.max
|
|
38
|
+
? envTemp
|
|
39
|
+
: modelConfig.defaultTemperature;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Validate and normalize model name
|
|
43
|
+
*/
|
|
44
|
+
validateModel(model) {
|
|
45
|
+
if (model in GLM_MODELS) {
|
|
46
|
+
return model;
|
|
47
|
+
}
|
|
48
|
+
console.warn(`Unknown model "${model}", using default: ${DEFAULT_MODEL}`);
|
|
49
|
+
return DEFAULT_MODEL;
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Validate temperature for current model
|
|
53
|
+
*/
|
|
54
|
+
validateTemperature(temperature, model) {
|
|
55
|
+
const config = GLM_MODELS[model];
|
|
56
|
+
const { min, max } = config.temperatureRange;
|
|
57
|
+
if (temperature < min || temperature > max) {
|
|
58
|
+
throw new Error(`Temperature ${temperature} is out of range for model ${model}. ` +
|
|
59
|
+
`Valid range: ${min} - ${max}`);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Validate max tokens for current model
|
|
64
|
+
*/
|
|
65
|
+
validateMaxTokens(maxTokens, model) {
|
|
66
|
+
const config = GLM_MODELS[model];
|
|
67
|
+
if (maxTokens > config.maxOutputTokens) {
|
|
68
|
+
throw new Error(`Max tokens ${maxTokens} exceeds limit for model ${model}. ` +
|
|
69
|
+
`Maximum: ${config.maxOutputTokens}`);
|
|
70
|
+
}
|
|
71
|
+
if (maxTokens < 1) {
|
|
72
|
+
throw new Error(`Max tokens must be at least 1, got ${maxTokens}`);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Validate thinking configuration for current model
|
|
77
|
+
*/
|
|
78
|
+
validateThinking(thinking, model) {
|
|
79
|
+
if (thinking && thinking.type === "enabled") {
|
|
80
|
+
const config = GLM_MODELS[model];
|
|
81
|
+
if (!config.supportsThinking) {
|
|
82
|
+
throw new Error(`Thinking mode is not supported by model ${model}. ` +
|
|
83
|
+
`Use glm-4.6 for thinking capabilities.`);
|
|
84
|
+
}
|
|
16
85
|
}
|
|
17
86
|
}
|
|
18
87
|
setModel(model) {
|
|
19
|
-
this.currentModel = model;
|
|
88
|
+
this.currentModel = this.validateModel(model);
|
|
20
89
|
}
|
|
21
90
|
getCurrentModel() {
|
|
22
91
|
return this.currentModel;
|
|
23
92
|
}
|
|
24
|
-
|
|
93
|
+
getModelConfig() {
|
|
94
|
+
return GLM_MODELS[this.currentModel];
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Chat completion with GLM-4.6 support
|
|
98
|
+
*
|
|
99
|
+
* @param messages - Conversation messages
|
|
100
|
+
* @param tools - Available tools/functions
|
|
101
|
+
* @param options - Chat options including temperature, thinking mode, etc.
|
|
102
|
+
* @returns Promise<GrokResponse>
|
|
103
|
+
*
|
|
104
|
+
* @example
|
|
105
|
+
* ```typescript
|
|
106
|
+
* const response = await client.chat(messages, tools, {
|
|
107
|
+
* model: 'glm-4.6',
|
|
108
|
+
* temperature: 0.7,
|
|
109
|
+
* thinking: { type: 'enabled' },
|
|
110
|
+
* maxTokens: 8192
|
|
111
|
+
* });
|
|
112
|
+
* ```
|
|
113
|
+
*/
|
|
114
|
+
async chat(messages, tools, options) {
|
|
25
115
|
try {
|
|
116
|
+
// Merge options with defaults
|
|
117
|
+
const model = this.validateModel(options?.model || this.currentModel);
|
|
118
|
+
const temperature = options?.temperature ?? this.defaultTemperature;
|
|
119
|
+
const maxTokens = options?.maxTokens ?? this.defaultMaxTokens;
|
|
120
|
+
const thinking = options?.thinking;
|
|
121
|
+
const searchOptions = options?.searchOptions;
|
|
122
|
+
// Validate parameters
|
|
123
|
+
this.validateTemperature(temperature, model);
|
|
124
|
+
this.validateMaxTokens(maxTokens, model);
|
|
125
|
+
this.validateThinking(thinking, model);
|
|
26
126
|
const requestPayload = {
|
|
27
|
-
model
|
|
127
|
+
model,
|
|
28
128
|
messages,
|
|
29
129
|
tools: tools || [],
|
|
30
130
|
tool_choice: tools && tools.length > 0 ? "auto" : undefined,
|
|
31
|
-
temperature
|
|
32
|
-
max_tokens:
|
|
131
|
+
temperature,
|
|
132
|
+
max_tokens: maxTokens,
|
|
33
133
|
};
|
|
134
|
+
// Add GLM-4.6 thinking parameter if specified
|
|
135
|
+
if (thinking) {
|
|
136
|
+
requestPayload.thinking = thinking;
|
|
137
|
+
}
|
|
34
138
|
// Add search parameters if specified
|
|
35
139
|
if (searchOptions?.search_parameters) {
|
|
36
140
|
requestPayload.search_parameters = searchOptions.search_parameters;
|
|
37
141
|
}
|
|
38
142
|
const response = await this.client.chat.completions.create(requestPayload);
|
|
143
|
+
// Validate response structure
|
|
144
|
+
const validationResult = safeValidateGrokResponse(response);
|
|
145
|
+
if (!validationResult.success) {
|
|
146
|
+
console.warn(createErrorMessage(ErrorCategory.VALIDATION, 'Grok API response validation', validationResult.error || 'Invalid response structure'));
|
|
147
|
+
// Return response anyway for backward compatibility, but log warning
|
|
148
|
+
}
|
|
39
149
|
return response;
|
|
40
150
|
}
|
|
41
151
|
catch (error) {
|
|
42
|
-
|
|
152
|
+
// Enhance error message with context
|
|
153
|
+
const modelInfo = options?.model || this.currentModel;
|
|
154
|
+
throw new Error(`Grok API error (model: ${modelInfo}): ${error.message}`);
|
|
43
155
|
}
|
|
44
156
|
}
|
|
45
|
-
|
|
157
|
+
/**
|
|
158
|
+
* Streaming chat completion with GLM-4.6 support
|
|
159
|
+
*
|
|
160
|
+
* Yields chunks including reasoning_content when thinking is enabled
|
|
161
|
+
*
|
|
162
|
+
* @param messages - Conversation messages
|
|
163
|
+
* @param tools - Available tools/functions
|
|
164
|
+
* @param options - Chat options including temperature, thinking mode, etc.
|
|
165
|
+
* @returns AsyncGenerator yielding GLM46StreamChunk
|
|
166
|
+
*
|
|
167
|
+
* @example
|
|
168
|
+
* ```typescript
|
|
169
|
+
* const stream = client.chatStream(messages, tools, {
|
|
170
|
+
* thinking: { type: 'enabled' }
|
|
171
|
+
* });
|
|
172
|
+
*
|
|
173
|
+
* for await (const chunk of stream) {
|
|
174
|
+
* if (chunk.choices[0]?.delta?.reasoning_content) {
|
|
175
|
+
* console.log('Reasoning:', chunk.choices[0].delta.reasoning_content);
|
|
176
|
+
* }
|
|
177
|
+
* if (chunk.choices[0]?.delta?.content) {
|
|
178
|
+
* console.log('Content:', chunk.choices[0].delta.content);
|
|
179
|
+
* }
|
|
180
|
+
* }
|
|
181
|
+
* ```
|
|
182
|
+
*/
|
|
183
|
+
async *chatStream(messages, tools, options) {
|
|
46
184
|
try {
|
|
185
|
+
// Merge options with defaults
|
|
186
|
+
const model = this.validateModel(options?.model || this.currentModel);
|
|
187
|
+
const temperature = options?.temperature ?? this.defaultTemperature;
|
|
188
|
+
const maxTokens = options?.maxTokens ?? this.defaultMaxTokens;
|
|
189
|
+
const thinking = options?.thinking;
|
|
190
|
+
const searchOptions = options?.searchOptions;
|
|
191
|
+
// Validate parameters
|
|
192
|
+
this.validateTemperature(temperature, model);
|
|
193
|
+
this.validateMaxTokens(maxTokens, model);
|
|
194
|
+
this.validateThinking(thinking, model);
|
|
47
195
|
const requestPayload = {
|
|
48
|
-
model
|
|
196
|
+
model,
|
|
49
197
|
messages,
|
|
50
198
|
tools: tools || [],
|
|
51
199
|
tool_choice: tools && tools.length > 0 ? "auto" : undefined,
|
|
52
|
-
temperature
|
|
53
|
-
max_tokens:
|
|
200
|
+
temperature,
|
|
201
|
+
max_tokens: maxTokens,
|
|
54
202
|
stream: true,
|
|
55
203
|
};
|
|
204
|
+
// Add GLM-4.6 thinking parameter if specified
|
|
205
|
+
if (thinking) {
|
|
206
|
+
requestPayload.thinking = thinking;
|
|
207
|
+
}
|
|
56
208
|
// Add search parameters if specified
|
|
57
209
|
if (searchOptions?.search_parameters) {
|
|
58
210
|
requestPayload.search_parameters = searchOptions.search_parameters;
|
|
@@ -63,9 +215,14 @@ export class GrokClient {
|
|
|
63
215
|
}
|
|
64
216
|
}
|
|
65
217
|
catch (error) {
|
|
66
|
-
|
|
218
|
+
const modelInfo = options?.model || this.currentModel;
|
|
219
|
+
throw new Error(`Grok API streaming error (model: ${modelInfo}): ${error.message}`);
|
|
67
220
|
}
|
|
68
221
|
}
|
|
222
|
+
/**
|
|
223
|
+
* Search with web context (deprecated - use chat with searchOptions)
|
|
224
|
+
* @deprecated Use chat() with searchOptions parameter instead
|
|
225
|
+
*/
|
|
69
226
|
async search(query, searchParameters) {
|
|
70
227
|
const searchMessage = {
|
|
71
228
|
role: "user",
|
|
@@ -74,7 +231,7 @@ export class GrokClient {
|
|
|
74
231
|
const searchOptions = {
|
|
75
232
|
search_parameters: searchParameters || { mode: "on" },
|
|
76
233
|
};
|
|
77
|
-
return this.chat([searchMessage], [],
|
|
234
|
+
return this.chat([searchMessage], [], { searchOptions });
|
|
78
235
|
}
|
|
79
236
|
}
|
|
80
237
|
//# sourceMappingURL=client.js.map
|
package/dist/grok/client.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"client.js","sourceRoot":"","sources":["../../src/grok/client.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;
|
|
1
|
+
{"version":3,"file":"client.js","sourceRoot":"","sources":["../../src/grok/client.ts"],"names":[],"mappings":"AAAA,OAAO,MAAM,MAAM,QAAQ,CAAC;AAE5B,OAAO,EAAE,wBAAwB,EAAE,MAAM,2BAA2B,CAAC;AACrE,OAAO,EAAE,aAAa,EAAE,kBAAkB,EAAE,MAAM,2BAA2B,CAAC;AAC9E,OAAO,EAAE,UAAU,EAAE,aAAa,EAAuB,MAAM,iBAAiB,CAAC;AA0DjF;;;;;;;;GAQG;AACH,MAAM,OAAO,UAAU;IACb,MAAM,CAAS;IACf,YAAY,CAAiB;IAC7B,gBAAgB,CAAS;IACzB,kBAAkB,CAAS;IAEnC,YAAY,MAAc,EAAE,KAAc,EAAE,OAAgB;QAC1D,IAAI,CAAC,MAAM,GAAG,IAAI,MAAM,CAAC;YACvB,MAAM;YACN,OAAO,EAAE,OAAO,IAAI,OAAO,CAAC,GAAG,CAAC,aAAa,IAAI,qBAAqB;YACtE,OAAO,EAAE,MAAM;SAChB,CAAC,CAAC;QAEH,4BAA4B;QAC5B,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,IAAI,aAAa,CAAC,CAAC;QAE/D,0BAA0B;QAC1B,MAAM,WAAW,GAAG,UAAU,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;QAElD,gDAAgD;QAChD,MAAM,MAAM,GAAG,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,eAAe,CAAC,CAAC;QACnD,IAAI,CAAC,gBAAgB,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,IAAI,MAAM,GAAG,CAAC;YAC3D,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,WAAW,CAAC,eAAe,CAAC;YAC/C,CAAC,CAAC,WAAW,CAAC,gBAAgB,CAAC;QAEjC,MAAM,OAAO,GAAG,MAAM,CAAC,OAAO,CAAC,GAAG,CAAC,gBAAgB,CAAC,CAAC;QACrD,IAAI,CAAC,kBAAkB,GAAG,MAAM,CAAC,QAAQ,CAAC,OAAO,CAAC;YAChD,OAAO,IAAI,WAAW,CAAC,gBAAgB,CAAC,GAAG;YAC3C,OAAO,IAAI,WAAW,CAAC,gBAAgB,CAAC,GAAG;YAC3C,CAAC,CAAC,OAAO;YACT,CAAC,CAAC,WAAW,CAAC,kBAAkB,CAAC;IACrC,CAAC;IAED;;OAEG;IACK,aAAa,CAAC,KAAa;QACjC,IAAI,KAAK,IAAI,UAAU,EAAE,CAAC;YACxB,OAAO,KAAuB,CAAC;QACjC,CAAC;QACD,OAAO,CAAC,IAAI,CAAC,kBAAkB,KAAK,qBAAqB,aAAa,EAAE,CAAC,CAAC;QAC1E,OAAO,aAAa,CAAC;IACvB,CAAC;IAED;;OAEG;IACK,mBAAmB,CAAC,WAAmB,EAAE,KAAqB;QACpE,MAAM,MAAM,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC;QACjC,MAAM,EAAE,GAAG,EAAE,GAAG,EAAE,GAAG,MAAM,CAAC,gBAAgB,CAAC;QAE7C,IAAI,WAAW,GAAG,GAAG,IAAI,WAAW,GAAG,GAAG,EAAE,CAAC;YAC3C,MAAM,IAAI,KAAK,CACb,eAAe,WAAW,8BAA8B,KAAK,IAAI;gBACjE,gBAAgB,GAAG,MAAM,GAAG,EAAE,CAC/B,CAAC;QACJ,CAAC;IACH,CAAC;IAED;;OAEG;IACK,iBAAiB,CAAC,SAAiB,EAAE,KAAqB;QAChE,MAAM,MAAM,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC;QAEjC,IAAI,SAAS,GAAG,MAAM,CAAC,eAAe,EAAE,CAAC;YACvC,MAAM,IAAI,KAAK,CACb,cAAc,SAAS,4BAA4B,KAAK,IAAI;gBAC5D,YAAY,MAAM,CAAC,eAAe,EAAE,CACrC,CAAC;QACJ,CAAC;QAED,IAAI,SAAS,GAAG,CAAC,EAAE,CAAC;YAClB,MAAM,IAAI,KAAK,CAAC,sCAAsC,SAAS,EAAE,CAAC,CAAC;QACrE,CAAC;IACH,CAAC;IAED;;OAEG;IACK,gBAAgB,CAAC,QAAoC,EAAE,KAAqB;QAClF,IAAI,QAAQ,IAAI,QAAQ,CAAC,IAAI,KAAK,SAAS,EAAE,CAAC;YAC5C,MAAM,MAAM,GAAG,UAAU,CAAC,KAAK,CAAC,CAAC;YACjC,IAAI,CAAC,MAAM,CAAC,gBAAgB,EAAE,CAAC;gBAC7B,MAAM,IAAI,KAAK,CACb,2CAA2C,KAAK,IAAI;oBACpD,wCAAwC,CACzC,CAAC;YACJ,CAAC;QACH,CAAC;IACH,CAAC;IAED,QAAQ,CAAC,KAAa;QACpB,IAAI,CAAC,YAAY,GAAG,IAAI,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC;IAChD,CAAC;IAED,eAAe;QACb,OAAO,IAAI,CAAC,YAAY,CAAC;IAC3B,CAAC;IAED,cAAc;QACZ,OAAO,UAAU,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC;IACvC,CAAC;IAED;;;;;;;;;;;;;;;;;OAiBG;IACH,KAAK,CAAC,IAAI,CACR,QAAuB,EACvB,KAAkB,EAClB,OAAqB;QAErB,IAAI,CAAC;YACH,8BAA8B;YAC9B,MAAM,KAAK,GAAG,IAAI,CAAC,aAAa,CAAC,OAAO,EAAE,KAAK,IAAI,IAAI,CAAC,YAAY,CAAC,CAAC;YACtE,MAAM,WAAW,GAAG,OAAO,EAAE,WAAW,IAAI,IAAI,CAAC,kBAAkB,CAAC;YACpE,MAAM,SAAS,GAAG,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,gBAAgB,CAAC;YAC9D,MAAM,QAAQ,GAAG,OAAO,EAAE,QAAQ,CAAC;YACnC,MAAM,aAAa,GAAG,OAAO,EAAE,aAAa,CAAC;YAE7C,sBAAsB;YACtB,IAAI,CAAC,mBAAmB,CAAC,WAAW,EAAE,KAAK,CAAC,CAAC;YAC7C,IAAI,CAAC,iBAAiB,CAAC,SAAS,EAAE,KAAK,CAAC,CAAC;YACzC,IAAI,CAAC,gBAAgB,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;YAEvC,MAAM,cAAc,GAAQ;gBAC1B,KAAK;gBACL,QAAQ;gBACR,KAAK,EAAE,KAAK,IAAI,EAAE;gBAClB,WAAW,EAAE,KAAK,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS;gBAC3D,WAAW;gBACX,UAAU,EAAE,SAAS;aACtB,CAAC;YAEF,8CAA8C;YAC9C,IAAI,QAAQ,EAAE,CAAC;gBACb,cAAc,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACrC,CAAC;YAED,qCAAqC;YACrC,IAAI,aAAa,EAAE,iBAAiB,EAAE,CAAC;gBACrC,cAAc,CAAC,iBAAiB,GAAG,aAAa,CAAC,iBAAiB,CAAC;YACrE,CAAC;YAED,MAAM,QAAQ,GACZ,MAAM,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC,cAAc,CAAC,CAAC;YAE5D,8BAA8B;YAC9B,MAAM,gBAAgB,GAAG,wBAAwB,CAAC,QAAQ,CAAC,CAAC;YAC5D,IAAI,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC;gBAC9B,OAAO,CAAC,IAAI,CACV,kBAAkB,CAChB,aAAa,CAAC,UAAU,EACxB,8BAA8B,EAC9B,gBAAgB,CAAC,KAAK,IAAI,4BAA4B,CACvD,CACF,CAAC;gBACF,qEAAqE;YACvE,CAAC;YAED,OAAO,QAAwB,CAAC;QAClC,CAAC;QAAC,OAAO,KAAU,EAAE,CAAC;YACpB,qCAAqC;YACrC,MAAM,SAAS,GAAG,OAAO,EAAE,KAAK,IAAI,IAAI,CAAC,YAAY,CAAC;YACtD,MAAM,IAAI,KAAK,CAAC,0BAA0B,SAAS,MAAM,KAAK,CAAC,OAAO,EAAE,CAAC,CAAC;QAC5E,CAAC;IACH,CAAC;IAED;;;;;;;;;;;;;;;;;;;;;;;;;OAyBG;IACH,KAAK,CAAC,CAAC,UAAU,CACf,QAAuB,EACvB,KAAkB,EAClB,OAAqB;QAErB,IAAI,CAAC;YACH,8BAA8B;YAC9B,MAAM,KAAK,GAAG,IAAI,CAAC,aAAa,CAAC,OAAO,EAAE,KAAK,IAAI,IAAI,CAAC,YAAY,CAAC,CAAC;YACtE,MAAM,WAAW,GAAG,OAAO,EAAE,WAAW,IAAI,IAAI,CAAC,kBAAkB,CAAC;YACpE,MAAM,SAAS,GAAG,OAAO,EAAE,SAAS,IAAI,IAAI,CAAC,gBAAgB,CAAC;YAC9D,MAAM,QAAQ,GAAG,OAAO,EAAE,QAAQ,CAAC;YACnC,MAAM,aAAa,GAAG,OAAO,EAAE,aAAa,CAAC;YAE7C,sBAAsB;YACtB,IAAI,CAAC,mBAAmB,CAAC,WAAW,EAAE,KAAK,CAAC,CAAC;YAC7C,IAAI,CAAC,iBAAiB,CAAC,SAAS,EAAE,KAAK,CAAC,CAAC;YACzC,IAAI,CAAC,gBAAgB,CAAC,QAAQ,EAAE,KAAK,CAAC,CAAC;YAEvC,MAAM,cAAc,GAAQ;gBAC1B,KAAK;gBACL,QAAQ;gBACR,KAAK,EAAE,KAAK,IAAI,EAAE;gBAClB,WAAW,EAAE,KAAK,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,SAAS;gBAC3D,WAAW;gBACX,UAAU,EAAE,SAAS;gBACrB,MAAM,EAAE,IAAI;aACb,CAAC;YAEF,8CAA8C;YAC9C,IAAI,QAAQ,EAAE,CAAC;gBACb,cAAc,CAAC,QAAQ,GAAG,QAAQ,CAAC;YACrC,CAAC;YAED,qCAAqC;YACrC,IAAI,aAAa,EAAE,iBAAiB,EAAE,CAAC;gBACrC,cAAc,CAAC,iBAAiB,GAAG,aAAa,CAAC,iBAAiB,CAAC;YACrE,CAAC;YAED,MAAM,MAAM,GAAG,CAAC,MAAM,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CACvD,cAAc,CACf,CAAQ,CAAC;YAEV,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,MAAM,EAAE,CAAC;gBACjC,MAAM,KAAyB,CAAC;YAClC,CAAC;QACH,CAAC;QAAC,OAAO,KAAU,EAAE,CAAC;YACpB,MAAM,SAAS,GAAG,OAAO,EAAE,KAAK,IAAI,IAAI,CAAC,YAAY,CAAC;YACtD,MAAM,IAAI,KAAK,CAAC,oCAAoC,SAAS,MAAM,KAAK,CAAC,OAAO,EAAE,CAAC,CAAC;QACtF,CAAC;IACH,CAAC;IAED;;;OAGG;IACH,KAAK,CAAC,MAAM,CACV,KAAa,EACb,gBAAmC;QAEnC,MAAM,aAAa,GAAgB;YACjC,IAAI,EAAE,MAAM;YACZ,OAAO,EAAE,KAAK;SACf,CAAC;QAEF,MAAM,aAAa,GAAkB;YACnC,iBAAiB,EAAE,gBAAgB,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE;SACtD,CAAC;QAEF,OAAO,IAAI,CAAC,IAAI,CAAC,CAAC,aAAa,CAAC,EAAE,EAAE,EAAE,EAAE,aAAa,EAAE,CAAC,CAAC;IAC3D,CAAC;CACF"}
|
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* GLM-4.6 API Type Definitions
|
|
3
|
+
*
|
|
4
|
+
* This file contains comprehensive type definitions for GLM-4.6 API features,
|
|
5
|
+
* including advanced reasoning mode, configurable parameters, and enhanced
|
|
6
|
+
* response structures.
|
|
7
|
+
*
|
|
8
|
+
* @see https://docs.z.ai/guides/llm/glm-4.6
|
|
9
|
+
*/
|
|
10
|
+
import type { GrokTool, GrokToolCall, SearchOptions } from "./client.js";
|
|
11
|
+
/**
|
|
12
|
+
* Thinking/Reasoning configuration for GLM-4.6
|
|
13
|
+
*
|
|
14
|
+
* When enabled, the model will include reasoning_content in responses,
|
|
15
|
+
* showing the step-by-step thought process before generating the final answer.
|
|
16
|
+
*
|
|
17
|
+
* @example
|
|
18
|
+
* ```typescript
|
|
19
|
+
* const thinking: ThinkingConfig = { type: "enabled" };
|
|
20
|
+
* const response = await client.chat(messages, [], { thinking });
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
export interface ThinkingConfig {
|
|
24
|
+
/**
|
|
25
|
+
* Enable or disable thinking mode
|
|
26
|
+
* - "enabled": Include reasoning process in responses
|
|
27
|
+
* - "disabled": Standard response without reasoning
|
|
28
|
+
*/
|
|
29
|
+
type: "enabled" | "disabled";
|
|
30
|
+
}
|
|
31
|
+
/**
|
|
32
|
+
* Comprehensive options for GLM-4.6 chat requests
|
|
33
|
+
*
|
|
34
|
+
* Consolidates all available parameters for chat completions,
|
|
35
|
+
* providing type-safe configuration for GLM-4.6 features.
|
|
36
|
+
*/
|
|
37
|
+
export interface ChatOptions {
|
|
38
|
+
/**
|
|
39
|
+
* Model identifier
|
|
40
|
+
* @default "glm-4.6"
|
|
41
|
+
* @example "glm-4.6", "grok-code-fast-1"
|
|
42
|
+
*/
|
|
43
|
+
model?: string;
|
|
44
|
+
/**
|
|
45
|
+
* Temperature controls randomness in responses
|
|
46
|
+
*
|
|
47
|
+
* - Lower values (0.6): More focused and deterministic
|
|
48
|
+
* - Higher values (1.0): More creative and diverse
|
|
49
|
+
*
|
|
50
|
+
* @default 0.7
|
|
51
|
+
* @minimum 0.6
|
|
52
|
+
* @maximum 1.0
|
|
53
|
+
*/
|
|
54
|
+
temperature?: number;
|
|
55
|
+
/**
|
|
56
|
+
* Maximum number of tokens to generate
|
|
57
|
+
*
|
|
58
|
+
* GLM-4.6 supports up to 128,000 output tokens
|
|
59
|
+
*
|
|
60
|
+
* @default 8192
|
|
61
|
+
* @maximum 128000
|
|
62
|
+
*/
|
|
63
|
+
maxTokens?: number;
|
|
64
|
+
/**
|
|
65
|
+
* Enable/disable advanced reasoning mode
|
|
66
|
+
*
|
|
67
|
+
* When enabled, responses include reasoning_content showing
|
|
68
|
+
* the model's step-by-step thought process.
|
|
69
|
+
*/
|
|
70
|
+
thinking?: ThinkingConfig;
|
|
71
|
+
/**
|
|
72
|
+
* Search parameters for web-enabled queries
|
|
73
|
+
*/
|
|
74
|
+
searchOptions?: SearchOptions;
|
|
75
|
+
/**
|
|
76
|
+
* Tools/functions available for the model to call
|
|
77
|
+
*/
|
|
78
|
+
tools?: GrokTool[];
|
|
79
|
+
/**
|
|
80
|
+
* Enable streaming responses
|
|
81
|
+
* @default false
|
|
82
|
+
*/
|
|
83
|
+
stream?: boolean;
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* GLM-4.6 enhanced response structure
|
|
87
|
+
*
|
|
88
|
+
* Extends the standard response with reasoning content and
|
|
89
|
+
* enhanced usage metrics.
|
|
90
|
+
*/
|
|
91
|
+
export interface GLM46Response {
|
|
92
|
+
id: string;
|
|
93
|
+
object: string;
|
|
94
|
+
created: number;
|
|
95
|
+
model: string;
|
|
96
|
+
choices: Array<{
|
|
97
|
+
index: number;
|
|
98
|
+
message: {
|
|
99
|
+
role: string;
|
|
100
|
+
content: string | null;
|
|
101
|
+
/**
|
|
102
|
+
* Reasoning process (only present when thinking is enabled)
|
|
103
|
+
* Contains the step-by-step thought process before the final answer
|
|
104
|
+
*/
|
|
105
|
+
reasoning_content?: string;
|
|
106
|
+
tool_calls?: GrokToolCall[];
|
|
107
|
+
};
|
|
108
|
+
finish_reason: string;
|
|
109
|
+
}>;
|
|
110
|
+
/**
|
|
111
|
+
* Token usage statistics
|
|
112
|
+
*/
|
|
113
|
+
usage?: {
|
|
114
|
+
prompt_tokens: number;
|
|
115
|
+
completion_tokens: number;
|
|
116
|
+
total_tokens: number;
|
|
117
|
+
/**
|
|
118
|
+
* Tokens used for reasoning (only when thinking is enabled)
|
|
119
|
+
*/
|
|
120
|
+
reasoning_tokens?: number;
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* GLM-4.6 streaming response chunk
|
|
125
|
+
*
|
|
126
|
+
* Individual chunks received during streaming responses,
|
|
127
|
+
* including support for reasoning content.
|
|
128
|
+
*/
|
|
129
|
+
export interface GLM46StreamChunk {
|
|
130
|
+
id: string;
|
|
131
|
+
object: string;
|
|
132
|
+
created: number;
|
|
133
|
+
model: string;
|
|
134
|
+
choices: Array<{
|
|
135
|
+
index: number;
|
|
136
|
+
delta: {
|
|
137
|
+
role?: string;
|
|
138
|
+
/**
|
|
139
|
+
* Incremental content from the final response
|
|
140
|
+
*/
|
|
141
|
+
content?: string;
|
|
142
|
+
/**
|
|
143
|
+
* Incremental reasoning content (when thinking is enabled)
|
|
144
|
+
* Shows the model's thought process as it develops
|
|
145
|
+
*/
|
|
146
|
+
reasoning_content?: string;
|
|
147
|
+
tool_calls?: Array<{
|
|
148
|
+
index: number;
|
|
149
|
+
id?: string;
|
|
150
|
+
type?: "function";
|
|
151
|
+
function?: {
|
|
152
|
+
name?: string;
|
|
153
|
+
arguments?: string;
|
|
154
|
+
};
|
|
155
|
+
}>;
|
|
156
|
+
};
|
|
157
|
+
finish_reason?: string | null;
|
|
158
|
+
}>;
|
|
159
|
+
}
|
|
160
|
+
/**
|
|
161
|
+
* Type guard to check if a response is a GLM-4.6 response
|
|
162
|
+
*/
|
|
163
|
+
export declare function isGLM46Response(response: unknown): response is GLM46Response;
|
|
164
|
+
/**
|
|
165
|
+
* Type guard to check if a chunk has reasoning content
|
|
166
|
+
*/
|
|
167
|
+
export declare function hasReasoningContent(chunk: GLM46StreamChunk): chunk is GLM46StreamChunk & {
|
|
168
|
+
choices: Array<{
|
|
169
|
+
delta: {
|
|
170
|
+
reasoning_content: string;
|
|
171
|
+
};
|
|
172
|
+
}>;
|
|
173
|
+
};
|
|
174
|
+
/**
|
|
175
|
+
* GLM-4.6 model configurations
|
|
176
|
+
*
|
|
177
|
+
* Defines capabilities and limits for supported models
|
|
178
|
+
*/
|
|
179
|
+
export declare const GLM_MODELS: {
|
|
180
|
+
readonly "glm-4.6": {
|
|
181
|
+
readonly contextWindow: 200000;
|
|
182
|
+
readonly maxOutputTokens: 128000;
|
|
183
|
+
readonly supportsThinking: true;
|
|
184
|
+
readonly defaultTemperature: 0.7;
|
|
185
|
+
readonly temperatureRange: {
|
|
186
|
+
readonly min: 0.6;
|
|
187
|
+
readonly max: 1;
|
|
188
|
+
};
|
|
189
|
+
readonly tokenEfficiency: 1.3;
|
|
190
|
+
};
|
|
191
|
+
readonly "grok-code-fast-1": {
|
|
192
|
+
readonly contextWindow: 128000;
|
|
193
|
+
readonly maxOutputTokens: 4096;
|
|
194
|
+
readonly supportsThinking: false;
|
|
195
|
+
readonly defaultTemperature: 0.7;
|
|
196
|
+
readonly temperatureRange: {
|
|
197
|
+
readonly min: 0;
|
|
198
|
+
readonly max: 2;
|
|
199
|
+
};
|
|
200
|
+
readonly tokenEfficiency: 1;
|
|
201
|
+
};
|
|
202
|
+
readonly "glm-4-air": {
|
|
203
|
+
readonly contextWindow: 128000;
|
|
204
|
+
readonly maxOutputTokens: 8192;
|
|
205
|
+
readonly supportsThinking: false;
|
|
206
|
+
readonly defaultTemperature: 0.7;
|
|
207
|
+
readonly temperatureRange: {
|
|
208
|
+
readonly min: 0.6;
|
|
209
|
+
readonly max: 1;
|
|
210
|
+
};
|
|
211
|
+
readonly tokenEfficiency: 1.15;
|
|
212
|
+
};
|
|
213
|
+
readonly "glm-4-airx": {
|
|
214
|
+
readonly contextWindow: 8192;
|
|
215
|
+
readonly maxOutputTokens: 8192;
|
|
216
|
+
readonly supportsThinking: false;
|
|
217
|
+
readonly defaultTemperature: 0.7;
|
|
218
|
+
readonly temperatureRange: {
|
|
219
|
+
readonly min: 0.6;
|
|
220
|
+
readonly max: 1;
|
|
221
|
+
};
|
|
222
|
+
readonly tokenEfficiency: 1.1;
|
|
223
|
+
};
|
|
224
|
+
};
|
|
225
|
+
export type SupportedModel = keyof typeof GLM_MODELS;
|
|
226
|
+
/**
|
|
227
|
+
* Get model configuration by name
|
|
228
|
+
*/
|
|
229
|
+
export declare function getModelConfig(model: string): {
|
|
230
|
+
readonly contextWindow: 200000;
|
|
231
|
+
readonly maxOutputTokens: 128000;
|
|
232
|
+
readonly supportsThinking: true;
|
|
233
|
+
readonly defaultTemperature: 0.7;
|
|
234
|
+
readonly temperatureRange: {
|
|
235
|
+
readonly min: 0.6;
|
|
236
|
+
readonly max: 1;
|
|
237
|
+
};
|
|
238
|
+
readonly tokenEfficiency: 1.3;
|
|
239
|
+
} | {
|
|
240
|
+
readonly contextWindow: 128000;
|
|
241
|
+
readonly maxOutputTokens: 4096;
|
|
242
|
+
readonly supportsThinking: false;
|
|
243
|
+
readonly defaultTemperature: 0.7;
|
|
244
|
+
readonly temperatureRange: {
|
|
245
|
+
readonly min: 0;
|
|
246
|
+
readonly max: 2;
|
|
247
|
+
};
|
|
248
|
+
readonly tokenEfficiency: 1;
|
|
249
|
+
} | {
|
|
250
|
+
readonly contextWindow: 128000;
|
|
251
|
+
readonly maxOutputTokens: 8192;
|
|
252
|
+
readonly supportsThinking: false;
|
|
253
|
+
readonly defaultTemperature: 0.7;
|
|
254
|
+
readonly temperatureRange: {
|
|
255
|
+
readonly min: 0.6;
|
|
256
|
+
readonly max: 1;
|
|
257
|
+
};
|
|
258
|
+
readonly tokenEfficiency: 1.15;
|
|
259
|
+
} | {
|
|
260
|
+
readonly contextWindow: 8192;
|
|
261
|
+
readonly maxOutputTokens: 8192;
|
|
262
|
+
readonly supportsThinking: false;
|
|
263
|
+
readonly defaultTemperature: 0.7;
|
|
264
|
+
readonly temperatureRange: {
|
|
265
|
+
readonly min: 0.6;
|
|
266
|
+
readonly max: 1;
|
|
267
|
+
};
|
|
268
|
+
readonly tokenEfficiency: 1.1;
|
|
269
|
+
};
|
|
270
|
+
/**
|
|
271
|
+
* Validate temperature for a given model
|
|
272
|
+
*
|
|
273
|
+
* @throws Error if temperature is out of valid range
|
|
274
|
+
*/
|
|
275
|
+
export declare function validateTemperature(temperature: number, model: string): void;
|
|
276
|
+
/**
|
|
277
|
+
* Validate max tokens for a given model
|
|
278
|
+
*
|
|
279
|
+
* @throws Error if maxTokens exceeds model limit
|
|
280
|
+
*/
|
|
281
|
+
export declare function validateMaxTokens(maxTokens: number, model: string): void;
|
|
282
|
+
/**
|
|
283
|
+
* Validate thinking configuration for a given model
|
|
284
|
+
*
|
|
285
|
+
* @throws Error if thinking is not supported by the model
|
|
286
|
+
*/
|
|
287
|
+
export declare function validateThinking(thinking: ThinkingConfig | undefined, model: string): void;
|
|
288
|
+
/**
|
|
289
|
+
* Create default chat options with sensible defaults
|
|
290
|
+
*/
|
|
291
|
+
export declare function createDefaultChatOptions(model?: string): Required<Omit<ChatOptions, 'thinking' | 'searchOptions' | 'tools'>>;
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* GLM-4.6 API Type Definitions
|
|
3
|
+
*
|
|
4
|
+
* This file contains comprehensive type definitions for GLM-4.6 API features,
|
|
5
|
+
* including advanced reasoning mode, configurable parameters, and enhanced
|
|
6
|
+
* response structures.
|
|
7
|
+
*
|
|
8
|
+
* @see https://docs.z.ai/guides/llm/glm-4.6
|
|
9
|
+
*/
|
|
10
|
+
/**
|
|
11
|
+
* Type guard to check if a response is a GLM-4.6 response
|
|
12
|
+
*/
|
|
13
|
+
export function isGLM46Response(response) {
|
|
14
|
+
return (typeof response === 'object' &&
|
|
15
|
+
response !== null &&
|
|
16
|
+
'choices' in response &&
|
|
17
|
+
Array.isArray(response.choices));
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Type guard to check if a chunk has reasoning content
|
|
21
|
+
*/
|
|
22
|
+
export function hasReasoningContent(chunk) {
|
|
23
|
+
return (chunk.choices.length > 0 &&
|
|
24
|
+
typeof chunk.choices[0]?.delta?.reasoning_content === 'string' &&
|
|
25
|
+
chunk.choices[0].delta.reasoning_content.length > 0);
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* GLM-4.6 model configurations
|
|
29
|
+
*
|
|
30
|
+
* Defines capabilities and limits for supported models
|
|
31
|
+
*/
|
|
32
|
+
export const GLM_MODELS = {
|
|
33
|
+
"glm-4.6": {
|
|
34
|
+
contextWindow: 200000, // 200K tokens
|
|
35
|
+
maxOutputTokens: 128000, // 128K max output
|
|
36
|
+
supportsThinking: true,
|
|
37
|
+
defaultTemperature: 0.7,
|
|
38
|
+
temperatureRange: { min: 0.6, max: 1.0 },
|
|
39
|
+
tokenEfficiency: 1.3, // 30% more efficient
|
|
40
|
+
},
|
|
41
|
+
"grok-code-fast-1": {
|
|
42
|
+
contextWindow: 128000, // 128K tokens
|
|
43
|
+
maxOutputTokens: 4096,
|
|
44
|
+
supportsThinking: false,
|
|
45
|
+
defaultTemperature: 0.7,
|
|
46
|
+
temperatureRange: { min: 0.0, max: 2.0 },
|
|
47
|
+
tokenEfficiency: 1.0,
|
|
48
|
+
},
|
|
49
|
+
"glm-4-air": {
|
|
50
|
+
contextWindow: 128000,
|
|
51
|
+
maxOutputTokens: 8192,
|
|
52
|
+
supportsThinking: false,
|
|
53
|
+
defaultTemperature: 0.7,
|
|
54
|
+
temperatureRange: { min: 0.6, max: 1.0 },
|
|
55
|
+
tokenEfficiency: 1.15,
|
|
56
|
+
},
|
|
57
|
+
"glm-4-airx": {
|
|
58
|
+
contextWindow: 8192,
|
|
59
|
+
maxOutputTokens: 8192,
|
|
60
|
+
supportsThinking: false,
|
|
61
|
+
defaultTemperature: 0.7,
|
|
62
|
+
temperatureRange: { min: 0.6, max: 1.0 },
|
|
63
|
+
tokenEfficiency: 1.1,
|
|
64
|
+
},
|
|
65
|
+
};
|
|
66
|
+
/**
|
|
67
|
+
* Get model configuration by name
|
|
68
|
+
*/
|
|
69
|
+
export function getModelConfig(model) {
|
|
70
|
+
return GLM_MODELS[model] || GLM_MODELS["glm-4.6"];
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Validate temperature for a given model
|
|
74
|
+
*
|
|
75
|
+
* @throws Error if temperature is out of valid range
|
|
76
|
+
*/
|
|
77
|
+
export function validateTemperature(temperature, model) {
|
|
78
|
+
const config = getModelConfig(model);
|
|
79
|
+
const { min, max } = config.temperatureRange;
|
|
80
|
+
if (temperature < min || temperature > max) {
|
|
81
|
+
throw new Error(`Temperature ${temperature} is out of range for model ${model}. ` +
|
|
82
|
+
`Valid range: ${min} - ${max}`);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Validate max tokens for a given model
|
|
87
|
+
*
|
|
88
|
+
* @throws Error if maxTokens exceeds model limit
|
|
89
|
+
*/
|
|
90
|
+
export function validateMaxTokens(maxTokens, model) {
|
|
91
|
+
const config = getModelConfig(model);
|
|
92
|
+
if (maxTokens > config.maxOutputTokens) {
|
|
93
|
+
throw new Error(`Max tokens ${maxTokens} exceeds model limit for ${model}. ` +
|
|
94
|
+
`Maximum: ${config.maxOutputTokens}`);
|
|
95
|
+
}
|
|
96
|
+
if (maxTokens < 1) {
|
|
97
|
+
throw new Error(`Max tokens must be at least 1, got ${maxTokens}`);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
/**
|
|
101
|
+
* Validate thinking configuration for a given model
|
|
102
|
+
*
|
|
103
|
+
* @throws Error if thinking is not supported by the model
|
|
104
|
+
*/
|
|
105
|
+
export function validateThinking(thinking, model) {
|
|
106
|
+
if (thinking && thinking.type === "enabled") {
|
|
107
|
+
const config = getModelConfig(model);
|
|
108
|
+
if (!config.supportsThinking) {
|
|
109
|
+
throw new Error(`Thinking mode is not supported by model ${model}. ` +
|
|
110
|
+
`Use glm-4.6 for thinking capabilities.`);
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Create default chat options with sensible defaults
|
|
116
|
+
*/
|
|
117
|
+
export function createDefaultChatOptions(model) {
|
|
118
|
+
const modelName = model || "glm-4.6";
|
|
119
|
+
const config = getModelConfig(modelName);
|
|
120
|
+
return {
|
|
121
|
+
model: modelName,
|
|
122
|
+
temperature: config.defaultTemperature,
|
|
123
|
+
maxTokens: Math.min(8192, config.maxOutputTokens), // Conservative default
|
|
124
|
+
stream: false,
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
//# sourceMappingURL=types.js.map
|