@altsafe/aidirector 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,659 @@
1
+ /**
2
+ * AI Director Client Types
3
+ * Type definitions for the SDK
4
+ */
5
+ /**
6
+ * Configuration for AI Director client
7
+ */
8
+ interface AIDirectorConfig {
9
+ /**
10
+ * Your AI Director secret key
11
+ * Get this from your dashboard at https://aidirector.dev/dashboard/keys
12
+ * Format: aid_sk_<random>
13
+ */
14
+ secretKey: string;
15
+ /**
16
+ * API base URL
17
+ * @default 'http://localhost:3000' in development
18
+ * @example 'https://api.aidirector.dev'
19
+ */
20
+ baseUrl?: string;
21
+ /**
22
+ * Request timeout in milliseconds
23
+ * @default 600000 (10 minutes)
24
+ */
25
+ timeout?: number;
26
+ /**
27
+ * Maximum number of retries for failed requests
28
+ * Set to 0 to disable retries
29
+ * @default 3
30
+ */
31
+ maxRetries?: number;
32
+ /**
33
+ * Enable debug logging to console
34
+ * @default false
35
+ */
36
+ debug?: boolean;
37
+ }
38
+ /**
39
+ * Options for the generate method
40
+ */
41
+ interface GenerateOptions {
42
+ /**
43
+ * ID of the fallback chain to use
44
+ * Get chain IDs from your dashboard
45
+ */
46
+ chainId: string;
47
+ /**
48
+ * The prompt to send to the AI
49
+ */
50
+ prompt: string;
51
+ /**
52
+ * Optional JSON schema for response validation
53
+ * The AI will be instructed to return data matching this schema
54
+ * @example { name: 'string', age: 'number', tags: 'string[]' }
55
+ */
56
+ schema?: Record<string, unknown>;
57
+ /**
58
+ * Override the client timeout for this request
59
+ */
60
+ timeout?: number;
61
+ /**
62
+ * Generation parameters
63
+ */
64
+ options?: GenerationParameters;
65
+ }
66
+ /**
67
+ * AI generation parameters
68
+ */
69
+ interface GenerationParameters {
70
+ /**
71
+ * Controls randomness (0 = deterministic, 2 = very random)
72
+ * @default 0.7
73
+ */
74
+ temperature?: number;
75
+ /**
76
+ * Maximum tokens to generate
77
+ */
78
+ maxTokens?: number;
79
+ /**
80
+ * Nucleus sampling parameter
81
+ * @default 1.0
82
+ */
83
+ topP?: number;
84
+ /**
85
+ * Top-K sampling parameter
86
+ */
87
+ topK?: number;
88
+ /**
89
+ * System prompt to prepend
90
+ */
91
+ systemPrompt?: string;
92
+ }
93
+ /**
94
+ * Result from the generate method
95
+ */
96
+ interface GenerateResult {
97
+ /**
98
+ * Whether the request was successful
99
+ */
100
+ success: boolean;
101
+ /**
102
+ * The generated data
103
+ */
104
+ data: GenerateData;
105
+ /**
106
+ * Request metadata
107
+ */
108
+ meta: GenerateMeta;
109
+ /**
110
+ * Error information (only present if success is false)
111
+ */
112
+ error?: GenerateError;
113
+ }
114
+ /**
115
+ * Generated data
116
+ */
117
+ interface GenerateData {
118
+ /**
119
+ * Schema-compliant objects (or all parsed objects if no schema)
120
+ */
121
+ valid: unknown[];
122
+ /**
123
+ * Objects that failed schema validation but were successfully parsed
124
+ */
125
+ invalid: unknown[];
126
+ /**
127
+ * Raw AI response text (useful for debugging)
128
+ */
129
+ rawContent?: string;
130
+ }
131
+ /**
132
+ * Generation metadata
133
+ */
134
+ interface GenerateMeta {
135
+ /**
136
+ * Whether the response was served from cache
137
+ */
138
+ cached: boolean;
139
+ /**
140
+ * The model that generated the response
141
+ */
142
+ modelUsed: string;
143
+ /**
144
+ * Token usage
145
+ */
146
+ tokensUsed: TokenUsage;
147
+ /**
148
+ * Request latency in milliseconds
149
+ */
150
+ latencyMs: number;
151
+ /**
152
+ * Models attempted before success (includes failed models)
153
+ */
154
+ attemptedModels: string[];
155
+ /**
156
+ * Why the generation stopped
157
+ */
158
+ finishReason?: 'stop' | 'length' | 'content_filter' | 'error';
159
+ /**
160
+ * Whether JSON recovery was needed
161
+ */
162
+ recovered?: boolean;
163
+ }
164
+ /**
165
+ * Token usage information
166
+ */
167
+ interface TokenUsage {
168
+ input: number;
169
+ output: number;
170
+ total?: number;
171
+ }
172
+ /**
173
+ * Error information
174
+ */
175
+ interface GenerateError {
176
+ /**
177
+ * Error code for programmatic handling
178
+ */
179
+ code: string;
180
+ /**
181
+ * Human-readable error message
182
+ */
183
+ message: string;
184
+ /**
185
+ * Whether the error can be retried
186
+ */
187
+ retryable?: boolean;
188
+ /**
189
+ * Additional error details
190
+ */
191
+ details?: Record<string, unknown>;
192
+ }
193
+ /**
194
+ * Callbacks for streaming generation
195
+ */
196
+ interface StreamCallbacks {
197
+ /**
198
+ * Called for each text chunk received
199
+ */
200
+ onChunk?: (chunk: string) => void;
201
+ /**
202
+ * Called when streaming is complete
203
+ */
204
+ onComplete?: (result: GenerateResult) => void;
205
+ /**
206
+ * Called if an error occurs
207
+ */
208
+ onError?: (error: Error) => void;
209
+ /**
210
+ * Called with progress updates
211
+ */
212
+ onProgress?: (progress: StreamProgress) => void;
213
+ }
214
+ /**
215
+ * Streaming progress information
216
+ */
217
+ interface StreamProgress {
218
+ /**
219
+ * Total characters received so far
220
+ */
221
+ charactersReceived: number;
222
+ /**
223
+ * Elapsed time in milliseconds
224
+ */
225
+ elapsedMs: number;
226
+ /**
227
+ * Current model being used
228
+ */
229
+ model: string;
230
+ }
231
+ /**
232
+ * Information about a fallback chain
233
+ */
234
+ interface ChainInfo {
235
+ /**
236
+ * Unique chain identifier
237
+ */
238
+ id: string;
239
+ /**
240
+ * Human-readable chain name
241
+ */
242
+ name: string;
243
+ /**
244
+ * Chain description
245
+ */
246
+ description?: string;
247
+ /**
248
+ * Whether this is the default chain
249
+ */
250
+ isDefault: boolean;
251
+ /**
252
+ * Chain steps (models to try in order)
253
+ */
254
+ steps: ChainStep[];
255
+ /**
256
+ * Number of requests using this chain
257
+ */
258
+ requestCount?: number;
259
+ /**
260
+ * Date chain was created
261
+ */
262
+ createdAt?: string;
263
+ }
264
+ /**
265
+ * A step in a fallback chain
266
+ */
267
+ interface ChainStep {
268
+ /**
269
+ * Model identifier
270
+ */
271
+ modelId: string;
272
+ /**
273
+ * Human-readable model name
274
+ */
275
+ modelName: string;
276
+ /**
277
+ * Provider (GEMINI, OPENROUTER)
278
+ */
279
+ provider: 'GEMINI' | 'OPENROUTER';
280
+ /**
281
+ * Step priority (lower = tried first)
282
+ */
283
+ priority: number;
284
+ /**
285
+ * Maximum retries for this step
286
+ */
287
+ maxRetries?: number;
288
+ /**
289
+ * Timeout for this step in milliseconds
290
+ */
291
+ timeoutMs?: number;
292
+ }
293
+ /**
294
+ * Information about an AI model
295
+ */
296
+ interface ModelInfo {
297
+ /**
298
+ * Model identifier (use this in chains)
299
+ */
300
+ id: string;
301
+ /**
302
+ * Provider name
303
+ */
304
+ provider: 'GEMINI' | 'OPENROUTER';
305
+ /**
306
+ * Human-readable model name
307
+ */
308
+ displayName: string;
309
+ /**
310
+ * Model description
311
+ */
312
+ description?: string;
313
+ /**
314
+ * Maximum input tokens
315
+ */
316
+ inputTokenLimit?: number;
317
+ /**
318
+ * Maximum output tokens
319
+ */
320
+ outputTokenLimit?: number;
321
+ /**
322
+ * Whether model supports JSON mode
323
+ */
324
+ supportsJsonMode: boolean;
325
+ /**
326
+ * Whether model supports streaming
327
+ */
328
+ supportsStreaming?: boolean;
329
+ /**
330
+ * Whether model supports thinking/reasoning
331
+ */
332
+ supportsThinking?: boolean;
333
+ /**
334
+ * Whether model is free to use
335
+ */
336
+ isFree: boolean;
337
+ /**
338
+ * Pricing per 1M tokens (if available)
339
+ */
340
+ pricing?: {
341
+ input: number;
342
+ output: number;
343
+ currency: string;
344
+ };
345
+ }
346
+ /**
347
+ * Usage statistics
348
+ */
349
+ interface UsageStats {
350
+ /**
351
+ * Total requests in period
352
+ */
353
+ totalRequests: number;
354
+ /**
355
+ * Successful requests
356
+ */
357
+ successfulRequests: number;
358
+ /**
359
+ * Failed requests
360
+ */
361
+ failedRequests: number;
362
+ /**
363
+ * Cached responses served
364
+ */
365
+ cachedResponses: number;
366
+ /**
367
+ * Total tokens used
368
+ */
369
+ totalTokens: TokenUsage;
370
+ /**
371
+ * Usage by model
372
+ */
373
+ byModel: Record<string, {
374
+ requests: number;
375
+ tokens: TokenUsage;
376
+ }>;
377
+ /**
378
+ * Average latency in milliseconds
379
+ */
380
+ avgLatencyMs: number;
381
+ /**
382
+ * Period start date
383
+ */
384
+ periodStart: string;
385
+ /**
386
+ * Period end date
387
+ */
388
+ periodEnd: string;
389
+ }
390
+ /**
391
+ * Health check result
392
+ */
393
+ interface HealthResult {
394
+ /**
395
+ * Whether the API is healthy
396
+ */
397
+ ok: boolean;
398
+ /**
399
+ * Response latency in milliseconds
400
+ */
401
+ latencyMs: number;
402
+ /**
403
+ * API version
404
+ */
405
+ version?: string;
406
+ /**
407
+ * Current time on server
408
+ */
409
+ timestamp?: string;
410
+ }
411
+
412
+ /**
413
+ * AI Director Client SDK
414
+ *
415
+ * Production-grade client for the AI Director API gateway.
416
+ * Use in Next.js API routes, Server Actions, or any Node.js/Edge environment.
417
+ *
418
+ * Features:
419
+ * - 🔐 HMAC Authentication with browser support
420
+ * - ⚡ Configurable timeout (default 10 minutes)
421
+ * - 🔄 Automatic retries with exponential backoff
422
+ * - 📦 Structured error handling
423
+ * - 🎯 Full TypeScript support
424
+ *
425
+ * @example
426
+ * ```typescript
427
+ * import { AIDirector } from '@aidirector/client';
428
+ *
429
+ * const client = new AIDirector({
430
+ * secretKey: process.env.AIDIRECTOR_SECRET_KEY!,
431
+ * baseUrl: 'https://api.aidirector.dev',
432
+ * });
433
+ *
434
+ * const result = await client.generate({
435
+ * chainId: 'my-chain',
436
+ * prompt: 'Generate 5 user profiles',
437
+ * schema: { name: 'string', age: 'number' },
438
+ * });
439
+ *
440
+ * if (result.success) {
441
+ * console.log(result.data.valid);
442
+ * }
443
+ * ```
444
+ */
445
+ declare class AIDirector {
446
+ private readonly secretKey;
447
+ private readonly baseUrl;
448
+ private readonly timeout;
449
+ private readonly maxRetries;
450
+ private readonly keyPrefix;
451
+ private readonly debug;
452
+ constructor(config: AIDirectorConfig);
453
+ /**
454
+ * Generate content using your fallback chain
455
+ *
456
+ * @param options - Generation options
457
+ * @returns Promise resolving to GenerateResult
458
+ * @throws AIDirectorError subclasses on failure
459
+ *
460
+ * @example
461
+ * ```typescript
462
+ * const result = await client.generate({
463
+ * chainId: 'production-chain',
464
+ * prompt: 'List 3 programming languages',
465
+ * options: { temperature: 0.7 },
466
+ * });
467
+ * ```
468
+ */
469
+ generate(options: GenerateOptions): Promise<GenerateResult>;
470
+ /**
471
+ * Generate content with streaming (for long responses)
472
+ *
473
+ * @param options - Generation options
474
+ * @param callbacks - Streaming callbacks
475
+ *
476
+ * @example
477
+ * ```typescript
478
+ * await client.generateStream(
479
+ * { chainId: 'my-chain', prompt: 'Write a long story' },
480
+ * {
481
+ * onChunk: (chunk) => process.stdout.write(chunk),
482
+ * onComplete: (result) => console.log('\nDone!', result.meta),
483
+ * onError: (error) => console.error('Error:', error),
484
+ * }
485
+ * );
486
+ * ```
487
+ */
488
+ generateStream(options: GenerateOptions, callbacks: StreamCallbacks): Promise<void>;
489
+ /**
490
+ * List available AI models
491
+ *
492
+ * @returns Array of available models
493
+ */
494
+ listModels(): Promise<ModelInfo[]>;
495
+ /**
496
+ * Get your fallback chains
497
+ * Note: Requires cookies/session from authenticated context
498
+ *
499
+ * @returns Array of chain configurations
500
+ */
501
+ listChains(): Promise<ChainInfo[]>;
502
+ /**
503
+ * Get usage statistics for your account
504
+ *
505
+ * @param options - Date range options
506
+ * @returns Usage statistics
507
+ */
508
+ getUsage(options?: {
509
+ startDate?: Date;
510
+ endDate?: Date;
511
+ }): Promise<UsageStats>;
512
+ /**
513
+ * Health check - verify API connection
514
+ *
515
+ * @returns Health status with latency
516
+ */
517
+ health(): Promise<{
518
+ ok: boolean;
519
+ latencyMs: number;
520
+ version?: string;
521
+ }>;
522
+ /**
523
+ * Create a new client with different configuration
524
+ * Useful for testing or switching environments
525
+ *
526
+ * @param overrides - Configuration overrides
527
+ * @returns New AIDirector instance
528
+ */
529
+ withConfig(overrides: Partial<AIDirectorConfig>): AIDirector;
530
+ /**
531
+ * Make authenticated API request
532
+ */
533
+ private makeAuthenticatedRequest;
534
+ /**
535
+ * Fetch with timeout
536
+ */
537
+ private fetchWithTimeout;
538
+ /**
539
+ * Parse API error response
540
+ */
541
+ private parseError;
542
+ /**
543
+ * Check if error is retryable
544
+ */
545
+ private isRetryable;
546
+ /**
547
+ * Sleep utility
548
+ */
549
+ private sleep;
550
+ /**
551
+ * Debug logging
552
+ */
553
+ private log;
554
+ }
555
+
556
+ /**
557
+ * AI Director Error Classes
558
+ * Structured error types for better error handling
559
+ */
560
+ /**
561
+ * Base error class for AI Director errors
562
+ */
563
+ declare class AIDirectorError extends Error {
564
+ readonly code: string;
565
+ readonly retryable: boolean;
566
+ readonly statusCode?: number;
567
+ readonly originalError?: Error;
568
+ constructor(message: string, code: string, options?: {
569
+ retryable?: boolean;
570
+ statusCode?: number;
571
+ cause?: Error;
572
+ });
573
+ }
574
+ /**
575
+ * Configuration error - invalid client setup
576
+ */
577
+ declare class ConfigurationError extends AIDirectorError {
578
+ constructor(message: string);
579
+ }
580
+ /**
581
+ * Authentication error - invalid or expired credentials
582
+ */
583
+ declare class AuthenticationError extends AIDirectorError {
584
+ constructor(message: string, statusCode?: number);
585
+ }
586
+ /**
587
+ * Rate limit error - too many requests
588
+ */
589
+ declare class RateLimitError extends AIDirectorError {
590
+ readonly retryAfterMs: number;
591
+ constructor(message: string, retryAfterMs?: number);
592
+ }
593
+ /**
594
+ * Timeout error - request took too long
595
+ */
596
+ declare class TimeoutError extends AIDirectorError {
597
+ readonly timeoutMs: number;
598
+ constructor(timeoutMs: number);
599
+ }
600
+ /**
601
+ * Network error - connection failed
602
+ */
603
+ declare class NetworkError extends AIDirectorError {
604
+ constructor(message: string, cause?: Error);
605
+ }
606
+ /**
607
+ * Chain error - fallback chain execution failed
608
+ */
609
+ declare class ChainExecutionError extends AIDirectorError {
610
+ readonly attemptedModels: string[];
611
+ constructor(message: string, attemptedModels?: string[]);
612
+ }
613
+ /**
614
+ * Validation error - schema validation failed
615
+ */
616
+ declare class ValidationError extends AIDirectorError {
617
+ readonly validationErrors: unknown[];
618
+ constructor(message: string, validationErrors?: unknown[]);
619
+ }
620
+ /**
621
+ * Server error - internal server error
622
+ */
623
+ declare class ServerError extends AIDirectorError {
624
+ constructor(message: string, statusCode?: number);
625
+ }
626
+ /**
627
+ * Check if error is an AI Director error
628
+ */
629
+ declare function isAIDirectorError(error: unknown): error is AIDirectorError;
630
+ /**
631
+ * Check if error is retryable
632
+ */
633
+ declare function isRetryableError(error: unknown): boolean;
634
+
635
+ /**
636
+ * HMAC Signature Generation
637
+ *
638
+ * Supports both Node.js (crypto) and browser (Web Crypto API) environments.
639
+ * The signature algorithm matches the server-side verification.
640
+ */
641
+ /**
642
+ * Generate HMAC signature for request authentication
643
+ *
644
+ * Automatically detects environment and uses appropriate implementation.
645
+ *
646
+ * IMPORTANT: We hash the secret key first, then use the hash as HMAC key.
647
+ * This allows the server to verify using the stored hash (it never sees the full key).
648
+ */
649
+ declare function generateSignature(secretKey: string, method: string, path: string, body: string, timestamp: number): Promise<string>;
650
+ /**
651
+ * Get the key prefix for identification (sent in headers)
652
+ */
653
+ declare function getKeyPrefix(secretKey: string): string;
654
+ /**
655
+ * Validate secret key format
656
+ */
657
+ declare function isValidSecretKey(secretKey: string): boolean;
658
+
659
+ export { AIDirector, type AIDirectorConfig, AIDirectorError, AuthenticationError, ChainExecutionError, type ChainInfo, type ChainStep, ConfigurationError, type GenerateData, type GenerateError, type GenerateMeta, type GenerateOptions, type GenerateResult, type GenerationParameters, type HealthResult, type ModelInfo, NetworkError, RateLimitError, ServerError, type StreamCallbacks, type StreamProgress, TimeoutError, type TokenUsage, type UsageStats, ValidationError, generateSignature, getKeyPrefix, isAIDirectorError, isRetryableError, isValidSecretKey };