@qianxude/ai 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +599 -0
  3. package/dist/client/client.d.ts +56 -0
  4. package/dist/client/client.d.ts.map +1 -0
  5. package/dist/client/client.js +285 -0
  6. package/dist/client/client.js.map +1 -0
  7. package/dist/client/config.d.ts +34 -0
  8. package/dist/client/config.d.ts.map +1 -0
  9. package/dist/client/config.js +141 -0
  10. package/dist/client/config.js.map +1 -0
  11. package/dist/client/index.d.ts +9 -0
  12. package/dist/client/index.d.ts.map +1 -0
  13. package/dist/client/index.js +13 -0
  14. package/dist/client/index.js.map +1 -0
  15. package/dist/client/providers.d.ts +27 -0
  16. package/dist/client/providers.d.ts.map +1 -0
  17. package/dist/client/providers.js +235 -0
  18. package/dist/client/providers.js.map +1 -0
  19. package/dist/client/task.d.ts +59 -0
  20. package/dist/client/task.d.ts.map +1 -0
  21. package/dist/client/task.js +179 -0
  22. package/dist/client/task.js.map +1 -0
  23. package/dist/index.d.ts +3 -0
  24. package/dist/index.d.ts.map +1 -0
  25. package/dist/index.js +5 -0
  26. package/dist/index.js.map +1 -0
  27. package/dist/types/base.d.ts +38 -0
  28. package/dist/types/base.d.ts.map +1 -0
  29. package/dist/types/base.js +6 -0
  30. package/dist/types/base.js.map +1 -0
  31. package/dist/types/client.d.ts +47 -0
  32. package/dist/types/client.d.ts.map +1 -0
  33. package/dist/types/client.js +2 -0
  34. package/dist/types/client.js.map +1 -0
  35. package/dist/types/common.d.ts +19 -0
  36. package/dist/types/common.d.ts.map +1 -0
  37. package/dist/types/common.js +31 -0
  38. package/dist/types/common.js.map +1 -0
  39. package/dist/types/config.d.ts +26 -0
  40. package/dist/types/config.d.ts.map +1 -0
  41. package/dist/types/config.js +2 -0
  42. package/dist/types/config.js.map +1 -0
  43. package/dist/types/index.d.ts +14 -0
  44. package/dist/types/index.d.ts.map +1 -0
  45. package/dist/types/index.js +7 -0
  46. package/dist/types/index.js.map +1 -0
  47. package/dist/types/message.d.ts +25 -0
  48. package/dist/types/message.d.ts.map +1 -0
  49. package/dist/types/message.js +2 -0
  50. package/dist/types/message.js.map +1 -0
  51. package/dist/types/mod.d.ts +2 -0
  52. package/dist/types/mod.d.ts.map +1 -0
  53. package/dist/types/mod.js +2 -0
  54. package/dist/types/mod.js.map +1 -0
  55. package/dist/types/options.d.ts +31 -0
  56. package/dist/types/options.d.ts.map +1 -0
  57. package/dist/types/options.js +5 -0
  58. package/dist/types/options.js.map +1 -0
  59. package/dist/types/provider.d.ts +22 -0
  60. package/dist/types/provider.d.ts.map +1 -0
  61. package/dist/types/provider.js +2 -0
  62. package/dist/types/provider.js.map +1 -0
  63. package/dist/types/response.d.ts +19 -0
  64. package/dist/types/response.d.ts.map +1 -0
  65. package/dist/types/response.js +5 -0
  66. package/dist/types/response.js.map +1 -0
  67. package/dist/types/task.d.ts +28 -0
  68. package/dist/types/task.d.ts.map +1 -0
  69. package/dist/types/task.js +2 -0
  70. package/dist/types/task.js.map +1 -0
  71. package/package.json +42 -0
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 qianxude
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,599 @@
1
+ # @qianxude/ai
2
+
3
+ A TypeScript LLM client with provider-model-task architecture, designed for the Cechat knowledge base toolset.
4
+
5
+ ## Overview
6
+
7
+ This package provides a modular architecture for making LLM calls. It features:
8
+
9
+ 1. **Provider-Model-Task Architecture** - Flexible configuration for multiple LLM providers
10
+ 2. **OpenAI-compatible API** - Works with CloudEdge, SiliconFlow, OpenAI, and other compatible services
11
+
12
+ ## Installation
13
+
14
+ ```bash
15
+ bun add @qianxude/ai
16
+ ```
17
+
18
+ ## Architecture
19
+
20
+ ### Package Structure
21
+
22
+ ```
23
+ ┌─────────────────────────────────────────────────────────────────────────┐
24
+ │ @qianxude/ai │
25
+ ├──────────────────────────┬──────────────────────────────────────────────┤
26
+ │ Client │ Types │
27
+ │ (@qianxude/ai/client) │ (@qianxude/ai) │
28
+ ├──────────────────────────┼──────────────────────────────────────────────┤
29
+ │ LLMClient │ │
30
+ │ ConfigLoader │ CompletionOptions │
31
+ │ OpenAICompatibleProvider │ │
32
+ │ Provider-Model-Task │ LLMConfig │
33
+ └──────────────────────────┴──────────────────────────────────────────────┘
34
+ ```
35
+
36
+ ### Provider-Model-Task Architecture
37
+
38
+ The new LLM client uses a three-layer configuration architecture that separates concerns and enables flexible model management:
39
+
40
+ ```
41
+ ┌────────────────────────────────────────────────────────────────┐
42
+ │ Task Layer │
43
+ │ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
44
+ │ │classification│ │ reasoning │ │ generation │ │
45
+ │ └──────┬───────┘ └──────┬───────┘ └──────┬───────┘ │
46
+ └─────────┼────────────────┼────────────────┼────────────────────┘
47
+ │ │ │
48
+ ▼ ▼ ▼
49
+ ┌────────────────────────────────────────────────────────────────┐
50
+ │ Model Layer │
51
+ │ ┌──────────────────┐ ┌──────────────────┐ │
52
+ │ │ deepseek-chat │ │ deepseek-reasoner│ │
53
+ │ │ temperature: 0.3│ │ temperature: 0.1 │ │
54
+ │ │ maxTokens: 2000 │ │ maxTokens: 4096 │ │
55
+ │ └──────┬───────────┘ └──────┬───────────┘ │
56
+ └─────────┼───────────────────────────┼──────────────────────────┘
57
+ │ │
58
+ └───────────┬───────────────┘
59
+
60
+
61
+ ┌────────────────────────────────────────────────────────────────┐
62
+ │ Provider Layer │
63
+ │ ┌──────────────────────────────────────────────────────┐ │
64
+ │ │ ce │ │
65
+ │ │ apiKeyEnv: CE_API_KEY │ │
66
+ │ │ baseUrlEnv: CE_BASE_URL │ │
67
+ │ └──────────────────────────────────────────────────────┘ │
68
+ └────────────────────────────────────────────────────────────────┘
69
+ ```
70
+
71
+ #### How It Works
72
+
73
+ 1. **Task Layer** - Semantic abstraction for model selection
74
+ - Tasks represent high-level use cases (e.g., `classification`, `reasoning`)
75
+ - Tasks map to specific models in the configuration
76
+ - Allows changing models without modifying code
77
+
78
+ 2. **Model Layer** - Model-specific configuration
79
+ - Each model defines its parameters (temperature, maxTokens, etc.)
80
+ - Models reference a provider for API connectivity
81
+ - Multiple models can use the same provider
82
+
83
+ 3. **Provider Layer** - API endpoint configuration
84
+ - Defines environment variable names for API keys and URLs
85
+ - All providers use OpenAI-compatible API protocol
86
+ - Easy to switch between different LLM services
87
+
88
+ ### Core Components
89
+
90
+ 1. **LLM Client** (`@qianxude/ai/client`) - Provider-model-task based client with multi-provider support
91
+ 2. **Type Definitions** (`@qianxude/ai`) - Shared TypeScript types and types
92
+
93
+ ## Configuration
94
+
95
+ ### Using llm.manifest.json (Required)
96
+
97
+ The LLM client requires a `llm.manifest.json` configuration file. You must set the `LLM_MANIFEST` environment variable to point to this file.
98
+
99
+ #### 1. Create llm.manifest.json
100
+
101
+ ```json
102
+ {
103
+ "version": "1.0",
104
+ "providers": {
105
+ "ce": {
106
+ "name": "CloudEdge",
107
+ "apiKeyEnv": "CE_API_KEY",
108
+ "baseUrlEnv": "CE_BASE_URL"
109
+ }
110
+ },
111
+ "models": {
112
+ "deepseek-chat": {
113
+ "provider": "ce",
114
+ "model": "DeepSeek-V3.2-671B",
115
+ "temperature": 0.3,
116
+ "maxTokens": 2000
117
+ },
118
+ "deepseek-reasoner": {
119
+ "provider": "ce",
120
+ "model": "DeepSeek-R1-671B",
121
+ "temperature": 0.1,
122
+ "maxTokens": 4096
123
+ }
124
+ },
125
+ "tasks": {
126
+ "classification": "deepseek-chat",
127
+ "generation": "deepseek-chat",
128
+ "reasoning": "deepseek-reasoner",
129
+ "summarization": "deepseek-chat"
130
+ }
131
+ }
132
+ ```
133
+
134
+ #### 2. Set Environment Variables
135
+
136
+ | Variable | Required | Description |
137
+ | --------------------- | -------- | -------------------------------------------------- |
138
+ | `LLM_MANIFEST` | **Yes** | Path to your `llm.manifest.json` file |
139
+ | `CE_API_KEY` | Yes\* | API authentication key (required by ce provider) |
140
+ | `CE_BASE_URL` | Yes\* | Base URL for the LLM API (required by ce provider) |
141
+
142
+ \* Required only if using the `ce` provider. Each provider defines its own required environment variables in `llm.manifest.json` via `apiKeyEnv` and `baseUrlEnv`.
143
+
144
+ #### Example
145
+
146
+ ```bash
147
+ export LLM_MANIFEST=./llm.manifest.json
148
+ export CE_API_KEY=your_api_key
149
+ export CE_BASE_URL=https://api.ce.example.com
150
+ ```
151
+
152
+ ### Configuration Schema
153
+
154
+ #### `providers`
155
+
156
+ Providers define API endpoints and credential sources:
157
+
158
+ ```json
159
+ {
160
+ "providers": {
161
+ "ce": {
162
+ "name": "CloudEdge",
163
+ "apiKeyEnv": "CE_API_KEY",
164
+ "baseUrlEnv": "CE_BASE_URL"
165
+ },
166
+ "sf": {
167
+ "name": "SiliconFlow",
168
+ "apiKeyEnv": "SF_API_KEY",
169
+ "baseUrlEnv": "SF_BASE_URL"
170
+ }
171
+ }
172
+ }
173
+ ```
174
+
175
+ Each provider defines:
176
+
177
+ - `name`: Human-readable name for logging
178
+ - `apiKeyEnv`: Environment variable name containing the API key
179
+ - `baseUrlEnv`: Environment variable name containing the base URL
180
+
181
+ #### `models`
182
+
183
+ Models define LLM configurations and reference providers:
184
+
185
+ ```json
186
+ {
187
+ "models": {
188
+ "deepseek-chat": {
189
+ "provider": "ce",
190
+ "model": "DeepSeek-V3.2-671B",
191
+ "temperature": 0.3,
192
+ "maxTokens": 2000,
193
+ "topP": 0.9
194
+ },
195
+ "deepseek-reasoner": {
196
+ "provider": "ce",
197
+ "model": "DeepSeek-R1-671B",
198
+ "temperature": 0.1,
199
+ "maxTokens": 4096
200
+ }
201
+ }
202
+ }
203
+ ```
204
+
205
+ Each model defines:
206
+
207
+ - `provider`: Reference to provider key (must exist in `providers`)
208
+ - `model`: Actual model name/id for the API
209
+ - `temperature`: Default sampling temperature (0-2)
210
+ - `maxTokens`: Default maximum tokens per request
211
+ - `topP`: Optional nucleus sampling parameter (0-1)
212
+
213
+ #### `tasks`
214
+
215
+ Tasks provide semantic model selection:
216
+
217
+ ```json
218
+ {
219
+ "tasks": {
220
+ "classification": "deepseek-chat",
221
+ "generation": "deepseek-chat",
222
+ "reasoning": "deepseek-reasoner",
223
+ "summarization": "deepseek-chat",
224
+ "extraction": "deepseek-chat",
225
+ "translation": "deepseek-chat"
226
+ }
227
+ }
228
+ ```
229
+
230
+ Task-to-model mappings:
231
+
232
+ - Key: Task name representing a use case
233
+ - Value: Model ID from `models` section
234
+
235
+ This abstraction allows:
236
+
237
+ 1. **Model switching without code changes** - Update the task mapping to use a different model
238
+ 2. **A/B testing** - Route specific tasks to different models for comparison
239
+ 3. **Cost optimization** - Use cheaper models for simple tasks, expensive ones for complex tasks
240
+ 4. **Fallback strategies** - Define multiple tasks for the same use case with different model tiers
241
+
242
+ ### Advanced Configuration Examples
243
+
244
+ #### Multi-Provider Setup
245
+
246
+ ```json
247
+ {
248
+ "version": "1.0",
249
+ "providers": {
250
+ "ce": {
251
+ "name": "CloudEdge",
252
+ "apiKeyEnv": "CE_API_KEY",
253
+ "baseUrlEnv": "CE_BASE_URL"
254
+ },
255
+ "sf": {
256
+ "name": "SiliconFlow",
257
+ "apiKeyEnv": "SF_API_KEY",
258
+ "baseUrlEnv": "SF_BASE_URL"
259
+ }
260
+ },
261
+ "models": {
262
+ "deepseek-chat": {
263
+ "provider": "ce",
264
+ "model": "DeepSeek-V3.2-671B",
265
+ "temperature": 0.3,
266
+ "maxTokens": 2000
267
+ },
268
+ "qwen-max": {
269
+ "provider": "sf",
270
+ "model": "qwen-max",
271
+ "temperature": 0.3,
272
+ "maxTokens": 2000
273
+ }
274
+ },
275
+ "tasks": {
276
+ "classification": "deepseek-chat",
277
+ "generation": "qwen-max"
278
+ }
279
+ }
280
+ ```
281
+
282
+ #### Task-Based Model Selection by Complexity
283
+
284
+ ```json
285
+ {
286
+ "version": "1.0",
287
+ "providers": {
288
+ "ce": {
289
+ "name": "CloudEdge",
290
+ "apiKeyEnv": "CE_API_KEY",
291
+ "baseUrlEnv": "CE_BASE_URL"
292
+ }
293
+ },
294
+ "models": {
295
+ "deepseek-chat": {
296
+ "provider": "ce",
297
+ "model": "DeepSeek-V3.2-671B",
298
+ "temperature": 0.3,
299
+ "maxTokens": 1000
300
+ },
301
+ "deepseek-reasoner": {
302
+ "provider": "ce",
303
+ "model": "DeepSeek-R1-671B",
304
+ "temperature": 0.1,
305
+ "maxTokens": 4000
306
+ }
307
+ },
308
+ "tasks": {
309
+ "simple_classification": "deepseek-chat",
310
+ "complex_reasoning": "deepseek-reasoner",
311
+ "code_generation": "deepseek-reasoner",
312
+ "text_summarization": "deepseek-chat"
313
+ }
314
+ }
315
+ ```
316
+
317
+ ## Usage
318
+
319
+ ### Basic LLM Client
320
+
321
+ ```typescript
322
+ import { createLLMClient } from '@qianxude/ai/client';
323
+
324
+ // Create client (uses LLM_MANIFEST env var)
325
+ const llm = createLLMClient();
326
+ await llm.initialize();
327
+
328
+ // Simple completion (uses first available model)
329
+ const response = await llm.complete('Explain TypeScript generics');
330
+ console.log(response.content);
331
+
332
+ // Task-based model selection
333
+ const result = await llm.complete('Classify this text', { task: 'classification' });
334
+
335
+ // Direct model selection
336
+ const result2 = await llm.complete('Write a function', {
337
+ model: 'deepseek-reasoner',
338
+ temperature: 0.7,
339
+ maxTokens: 1000,
340
+ });
341
+ ```
342
+
343
+ ### With Custom Config Path (Overrides Env Var)
344
+
345
+ ```typescript
346
+ const llm = createLLMClient({ manifestPath: './config/llm.manifest.json' });
347
+ await llm.initialize();
348
+ ```
349
+
350
+ ### With Logger
351
+
352
+ ```typescript
353
+ const llm = createLLMClient({ logger: consoleLogger });
354
+ await llm.initialize();
355
+ ```
356
+
357
+ ### Complete with Messages
358
+
359
+ ```typescript
360
+ import { LLMMessage } from '@qianxude/ai/client';
361
+
362
+ const messages: LLMMessage[] = [
363
+ { role: 'system', content: 'You are a helpful assistant' },
364
+ { role: 'user', content: 'Hello!' },
365
+ ];
366
+
367
+ const response = await llm.complete(messages, { task: 'generation' });
368
+ ```
369
+
370
+ ### Accessing Configuration
371
+
372
+ ```typescript
373
+ // Get model for a task
374
+ const modelId = llm.getModelForTask('reasoning'); // 'deepseek-reasoner'
375
+
376
+ // Get full config
377
+ const config = llm.getConfig();
378
+
379
+ // Get tasks using a specific model
380
+ const tasks = configLoader.getTasksForModel('deepseek-chat');
381
+ // Returns: ['classification', 'generation', 'summarization']
382
+ ```
383
+
384
+ ### Runtime Model Resolution
385
+
386
+ When you call `complete()`, the client resolves the model using this priority:
387
+
388
+ 1. **Explicit model ID** - If `options.model` is provided, use it directly
389
+ 2. **Task-based selection** - If `options.task` is provided, look up the model in config
390
+ 3. **First available model** - Use the first model defined in `llm.manifest.json`
391
+
392
+ ```typescript
393
+ // Priority 1: Direct model selection
394
+ await llm.complete('Hello', { model: 'deepseek-reasoner' });
395
+
396
+ // Priority 2: Task-based selection
397
+ await llm.complete('Hello', { task: 'classification' });
398
+
399
+ // Priority 3: First available model
400
+ await llm.complete('Hello');
401
+ ```
402
+
403
+ This resolution happens at runtime, allowing dynamic model selection based on context.
404
+
405
+ ### Migrating from Legacy to New Architecture
406
+
407
+ If you were previously using the legacy client, here are the key changes:
408
+
409
+ **Legacy approach (removed):**
410
+
411
+ ```typescript
412
+ // This code no longer works - legacy client has been removed
413
+ const llm = createLegacyLLMClient(console);
414
+ const response = await llm.complete('Hello'); // Returns string
415
+ ```
416
+
417
+ **New approach:**
418
+
419
+ ```typescript
420
+ import { createLLMClient } from '@qianxude/ai/client';
421
+
422
+ const llm = createLLMClient({ logger: console });
423
+ await llm.initialize();
424
+ const response = await llm.complete('Hello'); // Returns LLMResponse
425
+ console.log(response.content);
426
+ ```
427
+
428
+ **Key differences:**
429
+
430
+ 1. New client requires `initialize()` call before use
431
+ 2. New client returns `LLMResponse` objects with `content` and `usage`
432
+ 3. New client supports task-based model selection via `llm.manifest.json`
433
+ 4. New client uses options object instead of logger as first parameter
434
+ 5. New client requires a `llm.manifest.json` configuration file
435
+
436
+ ## API Reference
437
+
438
+ ### TaskBasedLLMClient (New Interface)
439
+
440
+ The primary interface for the new provider-model-task architecture:
441
+
442
+ ```typescript
443
+ interface TaskBasedLLMClient {
444
+ initialize(): Promise<void>;
445
+ complete(input: string | LLMMessage[], options?: CompletionOptions): Promise<LLMResponse>;
446
+ stream(input: string | LLMMessage[], options?: CompletionOptions): AsyncGenerator<LLMStreamChunk>;
447
+ getModelForTask(task: string): string;
448
+ getConfig(): LLMConfig;
449
+ }
450
+ ```
451
+
452
+ **Methods:**
453
+
454
+ - `initialize()` - Load and validate configuration from `llm.manifest.json`
455
+ - `complete()` - Single prompt completion or multi-turn conversation completion with task-based model selection. Accepts either a string prompt or an array of messages.
456
+ - `stream()` - Stream completion with task-based model selection. Accepts either a string prompt or an array of messages.
457
+ - `getModelForTask()` - Get model ID for a configured task
458
+ - `getConfig()` - Get full configuration object
459
+
460
+ ### LLMClient
461
+
462
+ Implementation of `TaskBasedLLMClient`:
463
+
464
+ ```typescript
465
+ class LLMClient implements TaskBasedLLMClient {
466
+ constructor(options?: LLMClientOptions);
467
+
468
+ initialize(): Promise<void>;
469
+ complete(input: string | LLMMessage[], options?: CompletionOptions): Promise<LLMResponse>;
470
+ stream(input: string | LLMMessage[], options?: CompletionOptions): AsyncGenerator<LLMStreamChunk>;
471
+ getModelForTask(task: string): string;
472
+ getConfig(): LLMConfig;
473
+ }
474
+ ```
475
+
476
+ ### ConfigLoader
477
+
478
+ ```typescript
479
+ class ConfigLoader {
480
+ constructor(configPath: string);
481
+
482
+ load(): Promise<LLMConfig>;
483
+ resolveModel(modelId: string): ResolvedModelConfig;
484
+ getModelForTask(task: string): string;
485
+ getTasksForModel(modelId: string): string[];
486
+ getConfig(): LLMConfig;
487
+ }
488
+ ```
489
+
490
+ ### OpenAICompatibleProvider
491
+
492
+ ```typescript
493
+ class OpenAICompatibleProvider implements ProviderClient {
494
+ complete(config: ResolvedModelConfig, call: LLMRequest): Promise<LLMResponse>;
495
+ stream(config: ResolvedModelConfig, call: LLMRequest): AsyncGenerator<LLMStreamChunk>;
496
+ }
497
+ ```
498
+
499
+ ## Types
500
+
501
+ ### Core Types
502
+
503
+ ```typescript
504
+ interface CompletionOptions {
505
+ task?: string; // Task-based model selection (e.g., 'classification')
506
+ model?: string; // Direct model selection (e.g., 'deepseek-chat')
507
+ systemPrompt?: string; // System prompt for the conversation
508
+ temperature?: number; // Sampling temperature (0-2)
509
+ topP?: number; // Nucleus sampling (0-1)
510
+ maxTokens?: number; // Maximum tokens to generate
511
+ }
512
+
513
+ interface LLMResponse {
514
+ content: string;
515
+ usage?: {
516
+ promptTokens: number;
517
+ completionTokens: number;
518
+ totalTokens: number;
519
+ };
520
+ model: string;
521
+ }
522
+
523
+ interface LLMConfig {
524
+ version: string;
525
+ providers: Record<string, LLMProvider>;
526
+ models: Record<string, LLMModel>;
527
+ tasks: Record<string, string>;
528
+ }
529
+
530
+ interface LLMProvider {
531
+ name: string;
532
+ apiKeyEnv: string;
533
+ baseUrlEnv: string;
534
+ }
535
+
536
+ interface LLMModel {
537
+ provider: string;
538
+ model: string;
539
+ temperature: number;
540
+ maxTokens: number;
541
+ topP?: number;
542
+ }
543
+
544
+ interface TaskBasedLLMClient {
545
+ initialize(): Promise<void>;
546
+ complete(input: string | LLMMessage[], options?: CompletionOptions): Promise<LLMResponse>;
547
+ stream(input: string | LLMMessage[], options?: CompletionOptions): AsyncGenerator<LLMStreamChunk>;
548
+ getModelForTask(task: string): string;
549
+ getConfig(): LLMConfig;
550
+ }
551
+
552
+ interface LLMMessage {
553
+ role: 'system' | 'user' | 'assistant';
554
+ content: string;
555
+ }
556
+ ```
557
+
558
+ ### Error Types
559
+
560
+ ```typescript
561
+ class LLMError extends Error {
562
+ readonly code: string;
563
+ readonly cause?: unknown;
564
+ }
565
+
566
+ class ConfigurationError extends LLMError {
567
+ // Configuration-related errors
568
+ }
569
+
570
+ class ProviderError extends LLMError {
571
+ readonly provider: string;
572
+ // Provider API errors
573
+ }
574
+ ```
575
+
576
+ ## Error Handling
577
+
578
+ ```typescript
579
+ import { createLLMClient, ConfigurationError, ProviderError } from '@qianxude/ai/client';
580
+
581
+ const llm = createLLMClient();
582
+
583
+ try {
584
+ await llm.initialize();
585
+ const response = await llm.complete('Hello');
586
+ } catch (error) {
587
+ if (error instanceof ConfigurationError) {
588
+ console.error('Config error:', error.message);
589
+ } else if (error instanceof ProviderError) {
590
+ console.error('Provider error:', error.message, 'Provider:', error.provider);
591
+ } else {
592
+ console.error('Unexpected error:', error);
593
+ }
594
+ }
595
+ ```
596
+
597
+ ## License
598
+
599
+ Private package for Cechat internal use.
@@ -0,0 +1,56 @@
1
+ import { t } from '../types/mod.js';
2
+ export declare class LLMClient implements t.TaskBasedLLMClient {
3
+ private configLoader?;
4
+ private initialized;
5
+ private logger;
6
+ private manifestPath?;
7
+ private provider?;
8
+ constructor(options?: t.LLMClientOptions);
9
+ /**
10
+ * Get or create the provider instance (lazy initialization)
11
+ */
12
+ private getProvider;
13
+ /**
14
+ * Initialize the client by loading configuration
15
+ */
16
+ initialize(): Promise<void>;
17
+ /**
18
+ * Ensure the client is initialized
19
+ */
20
+ private ensureInitialized;
21
+ /**
22
+ * Get the model ID for a specific task
23
+ */
24
+ getModelForTask(task: string): string;
25
+ /**
26
+ * Get the raw configuration
27
+ */
28
+ getConfig(): t.LLMConfig;
29
+ /**
30
+ * Complete a request with either a prompt string or messages array
31
+ * @param input - Either a prompt string or an array of LLMMessage objects
32
+ * @param options - Optional completion options
33
+ */
34
+ complete(input: string | t.LLMMessage[], options?: t.CompletionOptions): Promise<t.LLMResponse>;
35
+ /**
36
+ * Stream a request with either a prompt string or messages array
37
+ * @param input - Either a prompt string or an array of LLMMessage objects
38
+ * @param options - Optional completion options
39
+ */
40
+ stream(input: string | t.LLMMessage[], options?: t.CompletionOptions): AsyncGenerator<t.LLMStreamChunk>;
41
+ /**
42
+ * Resolve model ID from options or task defaults
43
+ */
44
+ private resolveModelId;
45
+ /**
46
+ * Create a task with resolved configuration
47
+ * @param taskName - The task name to resolve configuration for
48
+ * @param options - Optional task-level options
49
+ */
50
+ createTask(taskName: string, options?: t.LLMTaskOptions): t.LLMTask;
51
+ }
52
+ /**
53
+ * Create a new LLM client instance
54
+ */
55
+ export declare function createLLMClient(options?: t.LLMClientOptions): t.TaskBasedLLMClient;
56
+ //# sourceMappingURL=client.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../../src/client/client.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,CAAC,EAAE,MAAM,iBAAiB,CAAC;AAqBpC,qBAAa,SAAU,YAAW,CAAC,CAAC,kBAAkB;IACpD,OAAO,CAAC,YAAY,CAAC,CAAe;IACpC,OAAO,CAAC,WAAW,CAAS;IAC5B,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,YAAY,CAAC,CAAS;IAC9B,OAAO,CAAC,QAAQ,CAAC,CAAmB;gBAExB,OAAO,GAAE,CAAC,CAAC,gBAAqB;IAK5C;;OAEG;IACH,OAAO,CAAC,WAAW;IAOnB;;OAEG;IACG,UAAU,IAAI,OAAO,CAAC,IAAI,CAAC;IAWjC;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAMzB;;OAEG;IACH,eAAe,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM;IAKrC;;OAEG;IACH,SAAS,IAAI,CAAC,CAAC,SAAS;IAKxB;;;;OAIG;IACG,QAAQ,CAAC,KAAK,EAAE,MAAM,GAAG,CAAC,CAAC,UAAU,EAAE,EAAE,OAAO,GAAE,CAAC,CAAC,iBAAsB,GAAG,OAAO,CAAC,CAAC,CAAC,WAAW,CAAC;IAuFzG;;;;OAIG;IACI,MAAM,CAAC,KAAK,EAAE,MAAM,GAAG,CAAC,CAAC,UAAU,EAAE,EAAE,OAAO,GAAE,CAAC,CAAC,iBAAsB,GAAG,cAAc,CAAC,CAAC,CAAC,cAAc,CAAC;IAqFlH;;OAEG;IACH,OAAO,CAAC,cAAc;IAkBtB;;;;OAIG;IACH,UAAU,CAAC,QAAQ,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,CAAC,CAAC,cAAc,GAAG,CAAC,CAAC,OAAO;CAgBpE;AAED;;GAEG;AACH,wBAAgB,eAAe,CAAC,OAAO,CAAC,EAAE,CAAC,CAAC,gBAAgB,GAAG,CAAC,CAAC,kBAAkB,CAElF"}