@ebowwa/ai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +106 -0
- package/dist/client.d.ts +179 -0
- package/dist/client.js +492 -0
- package/dist/index.d.ts +20 -0
- package/dist/index.js +18 -0
- package/dist/prompts.d.ts +222 -0
- package/dist/prompts.js +462 -0
- package/dist/schemas/ai.d.ts +1335 -0
- package/dist/schemas/ai.js +416 -0
- package/dist/schemas/glm.d.ts +16 -0
- package/dist/schemas/glm.js +25 -0
- package/dist/schemas/index.d.ts +5 -0
- package/dist/schemas/index.js +5 -0
- package/dist/types.d.ts +13 -0
- package/dist/types.js +13 -0
- package/package.json +78 -0
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Zod schemas for AI module runtime validation
|
|
3
|
+
*
|
|
4
|
+
* Provides:
|
|
5
|
+
* - Runtime type validation for all AI operations
|
|
6
|
+
* - Type inference for TypeScript types
|
|
7
|
+
* - Safe parsing with error handling
|
|
8
|
+
* - Streaming support for Server-Sent Events
|
|
9
|
+
* - Reusable schemas across multiple AI providers
|
|
10
|
+
*/
|
|
11
|
+
import { z } from "zod";
|
|
12
|
+
// ============================================================================
|
|
13
|
+
// BASE SCHEMAS
|
|
14
|
+
// ============================================================================
|
|
15
|
+
/**
|
|
16
|
+
* Chat message role (OpenAI-compatible)
|
|
17
|
+
*/
|
|
18
|
+
export const ChatMessageRoleSchema = z.enum(["system", "user", "assistant"]);
|
|
19
|
+
/**
|
|
20
|
+
* Base chat message (OpenAI-compatible)
|
|
21
|
+
*/
|
|
22
|
+
export const ChatMessageSchema = z.object({
|
|
23
|
+
role: ChatMessageRoleSchema,
|
|
24
|
+
content: z.string(),
|
|
25
|
+
});
|
|
26
|
+
/**
|
|
27
|
+
* Token usage information (camelCase)
|
|
28
|
+
*/
|
|
29
|
+
export const TokenUsageSchema = z.object({
|
|
30
|
+
promptTokens: z.number().int().nonnegative(),
|
|
31
|
+
completionTokens: z.number().int().nonnegative(),
|
|
32
|
+
totalTokens: z.number().int().nonnegative(),
|
|
33
|
+
});
|
|
34
|
+
/**
|
|
35
|
+
* Raw API usage (snake_case from most APIs)
|
|
36
|
+
*/
|
|
37
|
+
export const RawUsageSchema = z.object({
|
|
38
|
+
prompt_tokens: z.number().int().nonnegative(),
|
|
39
|
+
completion_tokens: z.number().int().nonnegative(),
|
|
40
|
+
total_tokens: z.number().int().nonnegative(),
|
|
41
|
+
});
|
|
42
|
+
// ============================================================================
|
|
43
|
+
// REQUEST SCHEMAS
|
|
44
|
+
// ============================================================================
|
|
45
|
+
/**
|
|
46
|
+
* Base AI request options shared across all AI requests
|
|
47
|
+
*/
|
|
48
|
+
export const BaseAIRequestOptionsSchema = z.object({
|
|
49
|
+
model: z.string().optional(),
|
|
50
|
+
temperature: z.number().min(0).max(2).optional(),
|
|
51
|
+
maxTokens: z.number().int().positive().optional(),
|
|
52
|
+
});
|
|
53
|
+
/**
|
|
54
|
+
* Chat completion options (extends base with stream/timeout/retry)
|
|
55
|
+
*/
|
|
56
|
+
export const ChatCompletionOptionsSchema = BaseAIRequestOptionsSchema.extend({
|
|
57
|
+
stream: z.boolean().optional().default(false),
|
|
58
|
+
timeout: z.number().int().positive().optional().default(30000),
|
|
59
|
+
maxRetries: z.number().int().nonnegative().optional().default(3),
|
|
60
|
+
});
|
|
61
|
+
/**
|
|
62
|
+
* Simple prompt-based AI request
|
|
63
|
+
*/
|
|
64
|
+
export const AIRequestSchema = BaseAIRequestOptionsSchema.extend({
|
|
65
|
+
prompt: z.string(),
|
|
66
|
+
systemPrompt: z.string().optional(),
|
|
67
|
+
});
|
|
68
|
+
/**
|
|
69
|
+
* Multi-turn chat request
|
|
70
|
+
*/
|
|
71
|
+
export const AIChatRequestSchema = BaseAIRequestOptionsSchema.extend({
|
|
72
|
+
messages: z.array(ChatMessageSchema).min(1),
|
|
73
|
+
});
|
|
74
|
+
// ============================================================================
|
|
75
|
+
// RESPONSE SCHEMAS
|
|
76
|
+
// ============================================================================
|
|
77
|
+
/**
|
|
78
|
+
* Latency information
|
|
79
|
+
*/
|
|
80
|
+
export const LatencyInfoSchema = z.object({
|
|
81
|
+
totalMs: z.number().nonnegative(),
|
|
82
|
+
formatted: z.string(),
|
|
83
|
+
});
|
|
84
|
+
/**
|
|
85
|
+
* Raw API response choice (snake_case from API)
|
|
86
|
+
*/
|
|
87
|
+
export const RawChoiceSchema = z.object({
|
|
88
|
+
index: z.number().int().nonnegative(),
|
|
89
|
+
message: z.object({
|
|
90
|
+
role: z.string(),
|
|
91
|
+
content: z.string(),
|
|
92
|
+
}),
|
|
93
|
+
finish_reason: z.string(),
|
|
94
|
+
});
|
|
95
|
+
/**
|
|
96
|
+
* Raw chat completion response from API (snake_case)
|
|
97
|
+
*/
|
|
98
|
+
export const RawChatCompletionResponseSchema = z.object({
|
|
99
|
+
id: z.string(),
|
|
100
|
+
object: z.string(),
|
|
101
|
+
created: z.number().int().nonnegative(),
|
|
102
|
+
model: z.string(),
|
|
103
|
+
choices: z.array(RawChoiceSchema).min(1),
|
|
104
|
+
usage: RawUsageSchema.optional(),
|
|
105
|
+
});
|
|
106
|
+
/**
|
|
107
|
+
* Chat completion response (internal camelCase)
|
|
108
|
+
*/
|
|
109
|
+
export const ChatCompletionResponseSchema = z.object({
|
|
110
|
+
id: z.string(),
|
|
111
|
+
object: z.string(),
|
|
112
|
+
created: z.number().int().nonnegative(),
|
|
113
|
+
model: z.string(),
|
|
114
|
+
choices: z
|
|
115
|
+
.array(z.object({
|
|
116
|
+
index: z.number().int().nonnegative(),
|
|
117
|
+
message: z.object({
|
|
118
|
+
role: z.string(),
|
|
119
|
+
content: z.string(),
|
|
120
|
+
}),
|
|
121
|
+
finish_reason: z.string(),
|
|
122
|
+
}))
|
|
123
|
+
.min(1),
|
|
124
|
+
usage: TokenUsageSchema.optional(),
|
|
125
|
+
latency: LatencyInfoSchema.optional(),
|
|
126
|
+
});
|
|
127
|
+
/**
|
|
128
|
+
* Simple AI response
|
|
129
|
+
*/
|
|
130
|
+
export const AIResponseSchema = z.object({
|
|
131
|
+
success: z.boolean(),
|
|
132
|
+
content: z.string().optional(),
|
|
133
|
+
error: z.string().optional(),
|
|
134
|
+
usage: TokenUsageSchema.optional(),
|
|
135
|
+
});
|
|
136
|
+
/**
|
|
137
|
+
* Chat response
|
|
138
|
+
*/
|
|
139
|
+
export const AIChatResponseSchema = z.object({
|
|
140
|
+
success: z.boolean(),
|
|
141
|
+
message: z.string().optional(),
|
|
142
|
+
usage: TokenUsageSchema.optional(),
|
|
143
|
+
error: z.string().optional(),
|
|
144
|
+
});
|
|
145
|
+
// ============================================================================
|
|
146
|
+
// STREAMING SCHEMAS
|
|
147
|
+
// ============================================================================
|
|
148
|
+
/**
|
|
149
|
+
* Stream delta content (incremental content from streaming responses)
|
|
150
|
+
*/
|
|
151
|
+
export const StreamDeltaSchema = z.object({
|
|
152
|
+
role: z.string().optional(),
|
|
153
|
+
content: z.string().optional(),
|
|
154
|
+
});
|
|
155
|
+
/**
|
|
156
|
+
* Stream chunk type
|
|
157
|
+
*/
|
|
158
|
+
export const StreamChunkTypeSchema = z.enum(["text", "done", "error"]);
|
|
159
|
+
/**
|
|
160
|
+
* Raw stream chunk from API (snake_case)
|
|
161
|
+
*/
|
|
162
|
+
export const RawStreamChunkSchema = z.object({
|
|
163
|
+
id: z.string(),
|
|
164
|
+
object: z.string(),
|
|
165
|
+
created: z.number().int().nonnegative(),
|
|
166
|
+
model: z.string(),
|
|
167
|
+
choices: z
|
|
168
|
+
.array(z.object({
|
|
169
|
+
index: z.number().int().nonnegative(),
|
|
170
|
+
delta: StreamDeltaSchema,
|
|
171
|
+
finish_reason: z.string().nullable().optional(),
|
|
172
|
+
}))
|
|
173
|
+
.min(1),
|
|
174
|
+
usage: RawUsageSchema.optional(),
|
|
175
|
+
});
|
|
176
|
+
/**
|
|
177
|
+
* Processed stream chunk (internal camelCase)
|
|
178
|
+
*/
|
|
179
|
+
export const StreamChunkSchema = z.object({
|
|
180
|
+
type: StreamChunkTypeSchema,
|
|
181
|
+
id: z.string().optional(),
|
|
182
|
+
content: z.string().optional(),
|
|
183
|
+
finishReason: z.string().optional(),
|
|
184
|
+
usage: TokenUsageSchema.optional(),
|
|
185
|
+
error: z.string().optional(),
|
|
186
|
+
});
|
|
187
|
+
// ============================================================================
|
|
188
|
+
// ERROR SCHEMAS
|
|
189
|
+
// ============================================================================
|
|
190
|
+
/**
|
|
191
|
+
* Generic API error response (most providers use this format)
|
|
192
|
+
*/
|
|
193
|
+
export const APIErrorResponseSchema = z.object({
|
|
194
|
+
error: z
|
|
195
|
+
.object({
|
|
196
|
+
message: z.string(),
|
|
197
|
+
type: z.string().optional(),
|
|
198
|
+
code: z.string().optional(),
|
|
199
|
+
})
|
|
200
|
+
.optional(),
|
|
201
|
+
});
|
|
202
|
+
/**
|
|
203
|
+
* Error types categorization
|
|
204
|
+
*/
|
|
205
|
+
export const ErrorTypeSchema = z.enum([
|
|
206
|
+
"timeout",
|
|
207
|
+
"auth",
|
|
208
|
+
"rate_limit",
|
|
209
|
+
"network",
|
|
210
|
+
"validation",
|
|
211
|
+
"unknown",
|
|
212
|
+
]);
|
|
213
|
+
// ============================================================================
|
|
214
|
+
// VALIDATION HELPERS
|
|
215
|
+
// ============================================================================
|
|
216
|
+
/**
|
|
217
|
+
* Validate chat message
|
|
218
|
+
*/
|
|
219
|
+
export function validateChatMessage(data) {
|
|
220
|
+
return ChatMessageSchema.safeParse(data);
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Validate chat messages array
|
|
224
|
+
*/
|
|
225
|
+
export function validateChatMessages(data) {
|
|
226
|
+
return z.array(ChatMessageSchema).min(1).safeParse(data);
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
229
|
+
* Validate chat completion options
|
|
230
|
+
*/
|
|
231
|
+
export function validateChatCompletionOptions(data) {
|
|
232
|
+
return ChatCompletionOptionsSchema.safeParse(data);
|
|
233
|
+
}
|
|
234
|
+
/**
|
|
235
|
+
* Validate raw API response
|
|
236
|
+
*/
|
|
237
|
+
export function validateRawResponse(data) {
|
|
238
|
+
return RawChatCompletionResponseSchema.safeParse(data);
|
|
239
|
+
}
|
|
240
|
+
/**
|
|
241
|
+
* Validate and convert raw usage to internal format
|
|
242
|
+
*/
|
|
243
|
+
export function convertUsage(raw) {
|
|
244
|
+
const result = RawUsageSchema.safeParse(raw);
|
|
245
|
+
if (!result.success)
|
|
246
|
+
return null;
|
|
247
|
+
return {
|
|
248
|
+
promptTokens: result.data.prompt_tokens,
|
|
249
|
+
completionTokens: result.data.completion_tokens,
|
|
250
|
+
totalTokens: result.data.total_tokens,
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
/**
|
|
254
|
+
* Categorize error from unknown data
|
|
255
|
+
* Generic error categorization that works across providers
|
|
256
|
+
*/
|
|
257
|
+
export function categorizeError(data) {
|
|
258
|
+
if (data instanceof Error) {
|
|
259
|
+
if (data.message.includes("timeout")) {
|
|
260
|
+
return "timeout";
|
|
261
|
+
}
|
|
262
|
+
if (data.message.includes("unauthorized") || data.message.includes("401")) {
|
|
263
|
+
return "auth";
|
|
264
|
+
}
|
|
265
|
+
if (data.message.includes("rate limit") || data.message.includes("429")) {
|
|
266
|
+
return "rate_limit";
|
|
267
|
+
}
|
|
268
|
+
if (data instanceof TypeError || data.message.includes("fetch")) {
|
|
269
|
+
return "network";
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
const errorResult = APIErrorResponseSchema.safeParse(data);
|
|
273
|
+
if (errorResult.success && errorResult.data.error) {
|
|
274
|
+
const errorMsg = errorResult.data.error.message.toLowerCase();
|
|
275
|
+
if (errorMsg.includes("timeout"))
|
|
276
|
+
return "timeout";
|
|
277
|
+
if (errorMsg.includes("unauthorized") ||
|
|
278
|
+
errorMsg.includes("invalid api key") ||
|
|
279
|
+
errorMsg.includes("401"))
|
|
280
|
+
return "auth";
|
|
281
|
+
if (errorMsg.includes("rate limit") || errorMsg.includes("429"))
|
|
282
|
+
return "rate_limit";
|
|
283
|
+
}
|
|
284
|
+
return "unknown";
|
|
285
|
+
}
|
|
286
|
+
// ============================================================================
|
|
287
|
+
// STREAMING HELPERS
|
|
288
|
+
// ============================================================================
|
|
289
|
+
/**
|
|
290
|
+
* Parse a single SSE line
|
|
291
|
+
* Returns null if line is empty or just whitespace
|
|
292
|
+
*/
|
|
293
|
+
export function parseSSERow(line) {
|
|
294
|
+
const trimmed = line.trim();
|
|
295
|
+
if (!trimmed || trimmed === "")
|
|
296
|
+
return null;
|
|
297
|
+
if (!trimmed.startsWith("data: "))
|
|
298
|
+
return null;
|
|
299
|
+
const data = trimmed.slice(6);
|
|
300
|
+
if (data === "[DONE]")
|
|
301
|
+
return data;
|
|
302
|
+
return data;
|
|
303
|
+
}
|
|
304
|
+
/**
|
|
305
|
+
* Parse SSE response body and yield stream chunks
|
|
306
|
+
* Converts raw SSE lines to processed StreamChunk objects
|
|
307
|
+
*/
|
|
308
|
+
export async function* parseSSEStream(response) {
|
|
309
|
+
if (!response.body) {
|
|
310
|
+
yield {
|
|
311
|
+
type: "error",
|
|
312
|
+
error: "Response body is null",
|
|
313
|
+
};
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
const reader = response.body.getReader();
|
|
317
|
+
const decoder = new TextDecoder();
|
|
318
|
+
let buffer = "";
|
|
319
|
+
try {
|
|
320
|
+
while (true) {
|
|
321
|
+
const { done, value } = await reader.read();
|
|
322
|
+
if (done)
|
|
323
|
+
break;
|
|
324
|
+
buffer += decoder.decode(value, { stream: true });
|
|
325
|
+
const lines = buffer.split("\n");
|
|
326
|
+
buffer = lines.pop() || ""; // Keep the last incomplete line in buffer
|
|
327
|
+
for (const line of lines) {
|
|
328
|
+
const data = parseSSERow(line);
|
|
329
|
+
if (!data)
|
|
330
|
+
continue;
|
|
331
|
+
if (data === "[DONE]") {
|
|
332
|
+
yield {
|
|
333
|
+
type: "done",
|
|
334
|
+
};
|
|
335
|
+
return;
|
|
336
|
+
}
|
|
337
|
+
try {
|
|
338
|
+
const raw = JSON.parse(data);
|
|
339
|
+
const rawChunkResult = RawStreamChunkSchema.safeParse(raw);
|
|
340
|
+
if (!rawChunkResult.success) {
|
|
341
|
+
yield {
|
|
342
|
+
type: "error",
|
|
343
|
+
error: `Invalid stream chunk: ${rawChunkResult.error.message}`,
|
|
344
|
+
};
|
|
345
|
+
continue;
|
|
346
|
+
}
|
|
347
|
+
const rawChunk = rawChunkResult.data;
|
|
348
|
+
const delta = rawChunk.choices[0].delta;
|
|
349
|
+
// Convert usage if present (usually only in final chunk)
|
|
350
|
+
const usage = rawChunk.usage
|
|
351
|
+
? {
|
|
352
|
+
promptTokens: rawChunk.usage.prompt_tokens,
|
|
353
|
+
completionTokens: rawChunk.usage.completion_tokens,
|
|
354
|
+
totalTokens: rawChunk.usage.total_tokens,
|
|
355
|
+
}
|
|
356
|
+
: undefined;
|
|
357
|
+
yield {
|
|
358
|
+
type: "text",
|
|
359
|
+
id: rawChunk.id,
|
|
360
|
+
content: delta.content || "",
|
|
361
|
+
finishReason: rawChunk.choices[0].finish_reason || undefined,
|
|
362
|
+
usage,
|
|
363
|
+
};
|
|
364
|
+
}
|
|
365
|
+
catch (error) {
|
|
366
|
+
yield {
|
|
367
|
+
type: "error",
|
|
368
|
+
error: `Failed to parse stream chunk: ${error instanceof Error ? error.message : String(error)}`,
|
|
369
|
+
};
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
finally {
|
|
375
|
+
reader.releaseLock();
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
// ============================================================================
|
|
379
|
+
// EXPORTS
|
|
380
|
+
// ============================================================================
|
|
381
|
+
export default {
|
|
382
|
+
// Messages
|
|
383
|
+
ChatMessageSchema,
|
|
384
|
+
ChatMessageRoleSchema,
|
|
385
|
+
// Usage
|
|
386
|
+
TokenUsageSchema,
|
|
387
|
+
RawUsageSchema,
|
|
388
|
+
// Requests
|
|
389
|
+
BaseAIRequestOptionsSchema,
|
|
390
|
+
ChatCompletionOptionsSchema,
|
|
391
|
+
AIRequestSchema,
|
|
392
|
+
AIChatRequestSchema,
|
|
393
|
+
// Responses
|
|
394
|
+
LatencyInfoSchema,
|
|
395
|
+
RawChatCompletionResponseSchema,
|
|
396
|
+
ChatCompletionResponseSchema,
|
|
397
|
+
AIResponseSchema,
|
|
398
|
+
AIChatResponseSchema,
|
|
399
|
+
// Streaming
|
|
400
|
+
StreamDeltaSchema,
|
|
401
|
+
StreamChunkTypeSchema,
|
|
402
|
+
RawStreamChunkSchema,
|
|
403
|
+
StreamChunkSchema,
|
|
404
|
+
// Errors
|
|
405
|
+
APIErrorResponseSchema,
|
|
406
|
+
ErrorTypeSchema,
|
|
407
|
+
// Helpers
|
|
408
|
+
validateChatMessage,
|
|
409
|
+
validateChatMessages,
|
|
410
|
+
validateChatCompletionOptions,
|
|
411
|
+
validateRawResponse,
|
|
412
|
+
convertUsage,
|
|
413
|
+
categorizeError,
|
|
414
|
+
parseSSERow,
|
|
415
|
+
parseSSEStream,
|
|
416
|
+
};
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Zod schemas for Z.AI GLM API
|
|
3
|
+
*
|
|
4
|
+
* Provider-specific schemas for Z.AI (GLM) models.
|
|
5
|
+
* Generic AI schemas (chat messages, streaming, etc.) are in ./ai.ts
|
|
6
|
+
*/
|
|
7
|
+
import { z } from "zod";
|
|
8
|
+
/**
|
|
9
|
+
* Available GLM models from Z.AI API
|
|
10
|
+
*/
|
|
11
|
+
export declare const GLMModelSchema: z.ZodEnum<["GLM-4.7", "GLM-4.6", "GLM-4.5", "GLM-4.5-air"]>;
|
|
12
|
+
export type GLMModel = z.infer<typeof GLMModelSchema>;
|
|
13
|
+
declare const _default: {
|
|
14
|
+
GLMModelSchema: z.ZodEnum<["GLM-4.7", "GLM-4.6", "GLM-4.5", "GLM-4.5-air"]>;
|
|
15
|
+
};
|
|
16
|
+
export default _default;
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Zod schemas for Z.AI GLM API
|
|
3
|
+
*
|
|
4
|
+
* Provider-specific schemas for Z.AI (GLM) models.
|
|
5
|
+
* Generic AI schemas (chat messages, streaming, etc.) are in ./ai.ts
|
|
6
|
+
*/
|
|
7
|
+
import { z } from "zod";
|
|
8
|
+
// ============================================================================
|
|
9
|
+
// Z.AI GLM MODELS
|
|
10
|
+
// ============================================================================
|
|
11
|
+
/**
|
|
12
|
+
* Available GLM models from Z.AI API
|
|
13
|
+
*/
|
|
14
|
+
export const GLMModelSchema = z.enum([
|
|
15
|
+
"GLM-4.7",
|
|
16
|
+
"GLM-4.6",
|
|
17
|
+
"GLM-4.5",
|
|
18
|
+
"GLM-4.5-air",
|
|
19
|
+
]);
|
|
20
|
+
// ============================================================================
|
|
21
|
+
// EXPORTS
|
|
22
|
+
// ============================================================================
|
|
23
|
+
export default {
|
|
24
|
+
GLMModelSchema,
|
|
25
|
+
};
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI protocol related types for the application
|
|
3
|
+
*
|
|
4
|
+
* This file now re-exports types from schema.ts for backward compatibility.
|
|
5
|
+
* New code should import directly from schema.ts.
|
|
6
|
+
*
|
|
7
|
+
* Schema provides:
|
|
8
|
+
* - Runtime validation with Zod
|
|
9
|
+
* - Type inference for TypeScript
|
|
10
|
+
* - Helper functions for validation
|
|
11
|
+
*/
|
|
12
|
+
export type * from "./schemas/ai.js";
|
|
13
|
+
export { validateChatMessage, validateChatMessages, validateChatCompletionOptions, validateRawResponse, convertUsage, categorizeError, } from "./schemas/ai.js";
|
package/dist/types.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI protocol related types for the application
|
|
3
|
+
*
|
|
4
|
+
* This file now re-exports types from schema.ts for backward compatibility.
|
|
5
|
+
* New code should import directly from schema.ts.
|
|
6
|
+
*
|
|
7
|
+
* Schema provides:
|
|
8
|
+
* - Runtime validation with Zod
|
|
9
|
+
* - Type inference for TypeScript
|
|
10
|
+
* - Helper functions for validation
|
|
11
|
+
*/
|
|
12
|
+
// Re-export validation helpers for convenience
|
|
13
|
+
export { validateChatMessage, validateChatMessages, validateChatCompletionOptions, validateRawResponse, convertUsage, categorizeError, } from "./schemas/ai.js";
|
package/package.json
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@ebowwa/ai",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "AI/LLM client utilities and GLM API integration",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"import": "./dist/index.js"
|
|
12
|
+
},
|
|
13
|
+
"./types": {
|
|
14
|
+
"types": "./dist/types.d.ts",
|
|
15
|
+
"import": "./dist/types.js"
|
|
16
|
+
},
|
|
17
|
+
"./prompts": {
|
|
18
|
+
"types": "./dist/prompts.d.ts",
|
|
19
|
+
"import": "./dist/prompts.js"
|
|
20
|
+
},
|
|
21
|
+
"./client": {
|
|
22
|
+
"types": "./dist/client.d.ts",
|
|
23
|
+
"import": "./dist/client.js"
|
|
24
|
+
},
|
|
25
|
+
"./schemas": {
|
|
26
|
+
"types": "./dist/schemas/index.d.ts",
|
|
27
|
+
"import": "./dist/schemas/index.js"
|
|
28
|
+
},
|
|
29
|
+
"./schemas/ai": {
|
|
30
|
+
"types": "./dist/schemas/ai.d.ts",
|
|
31
|
+
"import": "./dist/schemas/ai.js"
|
|
32
|
+
},
|
|
33
|
+
"./schemas/glm": {
|
|
34
|
+
"types": "./dist/schemas/glm.d.ts",
|
|
35
|
+
"import": "./dist/schemas/glm.js"
|
|
36
|
+
}
|
|
37
|
+
},
|
|
38
|
+
"files": [
|
|
39
|
+
"dist",
|
|
40
|
+
"README.md",
|
|
41
|
+
"LICENSE"
|
|
42
|
+
],
|
|
43
|
+
"scripts": {
|
|
44
|
+
"build": "tsc",
|
|
45
|
+
"typecheck": "tsc --noEmit",
|
|
46
|
+
"prepublishOnly": "npm run build"
|
|
47
|
+
},
|
|
48
|
+
"keywords": [
|
|
49
|
+
"ai",
|
|
50
|
+
"llm",
|
|
51
|
+
"glm",
|
|
52
|
+
"openai",
|
|
53
|
+
"chat",
|
|
54
|
+
"streaming",
|
|
55
|
+
"zod",
|
|
56
|
+
"typescript"
|
|
57
|
+
],
|
|
58
|
+
"author": "Ebowwa Labs <labs@ebowwa.com>",
|
|
59
|
+
"license": "MIT",
|
|
60
|
+
"homepage": "https://github.com/ebowwa/ai#readme",
|
|
61
|
+
"repository": {
|
|
62
|
+
"type": "git",
|
|
63
|
+
"url": "git+https://github.com/ebowwa/ai.git"
|
|
64
|
+
},
|
|
65
|
+
"bugs": {
|
|
66
|
+
"url": "https://github.com/ebowwa/ai/issues"
|
|
67
|
+
},
|
|
68
|
+
"dependencies": {
|
|
69
|
+
"zod": "^3.24.1"
|
|
70
|
+
},
|
|
71
|
+
"devDependencies": {
|
|
72
|
+
"@types/node": "^22.10.2",
|
|
73
|
+
"typescript": "^5.7.2"
|
|
74
|
+
},
|
|
75
|
+
"engines": {
|
|
76
|
+
"node": ">=18.0.0"
|
|
77
|
+
}
|
|
78
|
+
}
|