@juspay/neurolink 7.41.0 → 7.41.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/CHANGELOG.md +4 -0
  2. package/dist/cli/loop/optionsSchema.d.ts +1 -1
  3. package/dist/cli/loop/optionsSchema.js +1 -1
  4. package/dist/index.d.ts +1 -1
  5. package/dist/lib/index.d.ts +1 -1
  6. package/dist/lib/models/modelRegistry.d.ts +1 -1
  7. package/dist/lib/models/modelRegistry.js +1 -1
  8. package/dist/lib/providers/amazonBedrock.js +1 -0
  9. package/dist/lib/providers/azureOpenai.d.ts +1 -1
  10. package/dist/lib/providers/azureOpenai.js +1 -1
  11. package/dist/lib/providers/googleAiStudio.js +1 -0
  12. package/dist/lib/providers/index.d.ts +0 -25
  13. package/dist/lib/providers/index.js +0 -21
  14. package/dist/lib/providers/openaiCompatible.js +1 -0
  15. package/dist/lib/types/cli.d.ts +53 -40
  16. package/dist/lib/types/providers.d.ts +183 -1
  17. package/dist/lib/types/providers.js +42 -0
  18. package/dist/lib/types/streamTypes.d.ts +1 -1
  19. package/dist/lib/utils/conversationMemory.d.ts +1 -1
  20. package/dist/lib/utils/providerHealth.d.ts +1 -1
  21. package/dist/lib/utils/providerHealth.js +1 -1
  22. package/dist/lib/utils/providerSetupMessages.js +1 -1
  23. package/dist/models/modelRegistry.d.ts +1 -1
  24. package/dist/models/modelRegistry.js +1 -1
  25. package/dist/providers/amazonBedrock.js +1 -0
  26. package/dist/providers/azureOpenai.d.ts +1 -1
  27. package/dist/providers/azureOpenai.js +1 -1
  28. package/dist/providers/googleAiStudio.js +1 -0
  29. package/dist/providers/index.d.ts +0 -25
  30. package/dist/providers/index.js +0 -21
  31. package/dist/providers/openaiCompatible.js +1 -0
  32. package/dist/types/cli.d.ts +53 -40
  33. package/dist/types/modelTypes.d.ts +20 -20
  34. package/dist/types/providers.d.ts +183 -1
  35. package/dist/types/providers.js +42 -0
  36. package/dist/types/streamTypes.d.ts +1 -1
  37. package/dist/utils/conversationMemory.d.ts +1 -1
  38. package/dist/utils/providerHealth.d.ts +1 -1
  39. package/dist/utils/providerHealth.js +1 -1
  40. package/dist/utils/providerSetupMessages.js +1 -1
  41. package/package.json +1 -1
  42. package/dist/core/types.d.ts +0 -271
  43. package/dist/core/types.js +0 -153
  44. package/dist/lib/core/types.d.ts +0 -271
  45. package/dist/lib/core/types.js +0 -153
package/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## [7.41.2](https://github.com/juspay/neurolink/compare/v7.41.1...v7.41.2) (2025-09-20)
2
+
3
+ ## [7.41.1](https://github.com/juspay/neurolink/compare/v7.41.0...v7.41.1) (2025-09-20)
4
+
1
5
  ## [7.41.0](https://github.com/juspay/neurolink/compare/v7.40.1...v7.41.0) (2025-09-20)
2
6
 
3
7
  ### Features
@@ -1,4 +1,4 @@
1
- import { type TextGenerationOptions } from "../../lib/core/types.js";
1
+ import type { TextGenerationOptions } from "../../lib/types/generateTypes.js";
2
2
  /**
3
3
  * Defines the schema for a session variable or a generation option.
4
4
  */
@@ -1,4 +1,4 @@
1
- import { AIProviderName, } from "../../lib/core/types.js";
1
+ import { AIProviderName } from "../../lib/types/providers.js";
2
2
  /**
3
3
  * Master schema for all text generation options.
4
4
  * This object provides metadata for validation and help text in the CLI loop.
package/dist/index.d.ts CHANGED
@@ -103,4 +103,4 @@ export type { TextGenerationOptions, TextGenerationResult, AnalyticsData, Evalua
103
103
  * console.log(result.content);
104
104
  * ```
105
105
  */
106
- export declare function generateText(options: import("./core/types.js").TextGenerationOptions): Promise<import("./core/types.js").TextGenerationResult>;
106
+ export declare function generateText(options: import("./types/index.js").TextGenerationOptions): Promise<import("./types/index.js").TextGenerationResult>;
@@ -103,4 +103,4 @@ export type { TextGenerationOptions, TextGenerationResult, AnalyticsData, Evalua
103
103
  * console.log(result.content);
104
104
  * ```
105
105
  */
106
- export declare function generateText(options: import("./core/types.js").TextGenerationOptions): Promise<import("./core/types.js").TextGenerationResult>;
106
+ export declare function generateText(options: import("./types/index.js").TextGenerationOptions): Promise<import("./types/index.js").TextGenerationResult>;
@@ -3,7 +3,7 @@
3
3
  * Provides centralized model data for models command system
4
4
  * Part of Phase 4.1 - Models Command System
5
5
  */
6
- import { AIProviderName } from "../core/types.js";
6
+ import { AIProviderName } from "../types/providers.js";
7
7
  import type { JsonValue } from "../types/common.js";
8
8
  /**
9
9
  * Model capabilities interface
@@ -3,7 +3,7 @@
3
3
  * Provides centralized model data for models command system
4
4
  * Part of Phase 4.1 - Models Command System
5
5
  */
6
- import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, DEFAULT_MODEL_ALIASES, } from "../core/types.js";
6
+ import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, DEFAULT_MODEL_ALIASES, } from "../types/providers.js";
7
7
  /**
8
8
  * Comprehensive model registry
9
9
  */
@@ -3,6 +3,7 @@ import { BedrockClient, ListFoundationModelsCommand, } from "@aws-sdk/client-bed
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { convertZodToJsonSchema } from "../utils/schemaConversion.js";
6
+ // Bedrock-specific types now imported from ../types/providerSpecific.js
6
7
  export class AmazonBedrockProvider extends BaseProvider {
7
8
  bedrockClient;
8
9
  conversationHistory = [];
@@ -1,6 +1,6 @@
1
1
  import { type LanguageModelV1 } from "ai";
2
2
  import { BaseProvider } from "../core/baseProvider.js";
3
- import type { AIProviderName } from "../core/types.js";
3
+ import type { AIProviderName } from "../types/providers.js";
4
4
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
5
  export declare class AzureOpenAIProvider extends BaseProvider {
6
6
  private apiKey;
@@ -1,7 +1,7 @@
1
1
  import { createAzure } from "@ai-sdk/azure";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
- import { APIVersions } from "../core/types.js";
4
+ import { APIVersions } from "../types/providers.js";
5
5
  import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
6
6
  import { logger } from "../utils/logger.js";
7
7
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -8,6 +8,7 @@ import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } fro
8
8
  import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
+ // Google AI Live API types now imported from ../types/providerSpecific.js
11
12
  // Create Google GenAI client
12
13
  async function createGoogleGenAIClient(apiKey) {
13
14
  const mod = await import("@google/genai");
@@ -15,28 +15,3 @@ export { OllamaProvider as Ollama } from "./ollama.js";
15
15
  export { MistralProvider as MistralAI } from "./mistral.js";
16
16
  export { LiteLLMProvider as LiteLLM } from "./litellm.js";
17
17
  export type { AIProvider } from "../types/index.js";
18
- /**
19
- * Provider registry for dynamic provider instantiation
20
- */
21
- export declare const PROVIDERS: {
22
- readonly vertex: "GoogleVertexAI";
23
- readonly bedrock: "AmazonBedrock";
24
- readonly sagemaker: "AmazonSageMaker";
25
- readonly openai: "OpenAI";
26
- readonly "openai-compatible": "OpenAICompatible";
27
- readonly anthropic: "AnthropicProvider";
28
- readonly azure: "AzureOpenAIProvider";
29
- readonly "google-ai": "GoogleAIStudio";
30
- readonly huggingface: "HuggingFace";
31
- readonly ollama: "Ollama";
32
- readonly mistral: "MistralAI";
33
- readonly litellm: "LiteLLM";
34
- };
35
- /**
36
- * Type for valid provider names
37
- */
38
- export type ProviderName = keyof typeof PROVIDERS;
39
- /**
40
- * List of all available provider names
41
- */
42
- export declare const AVAILABLE_PROVIDERS: ProviderName[];
@@ -14,24 +14,3 @@ export { HuggingFaceProvider as HuggingFace } from "./huggingFace.js";
14
14
  export { OllamaProvider as Ollama } from "./ollama.js";
15
15
  export { MistralProvider as MistralAI } from "./mistral.js";
16
16
  export { LiteLLMProvider as LiteLLM } from "./litellm.js";
17
- /**
18
- * Provider registry for dynamic provider instantiation
19
- */
20
- export const PROVIDERS = {
21
- vertex: "GoogleVertexAI",
22
- bedrock: "AmazonBedrock",
23
- sagemaker: "AmazonSageMaker",
24
- openai: "OpenAI",
25
- "openai-compatible": "OpenAICompatible",
26
- anthropic: "AnthropicProvider",
27
- azure: "AzureOpenAIProvider",
28
- "google-ai": "GoogleAIStudio",
29
- huggingface: "HuggingFace",
30
- ollama: "Ollama",
31
- mistral: "MistralAI",
32
- litellm: "LiteLLM",
33
- };
34
- /**
35
- * List of all available provider names
36
- */
37
- export const AVAILABLE_PROVIDERS = Object.keys(PROVIDERS);
@@ -33,6 +33,7 @@ const getOpenAICompatibleConfig = () => {
33
33
  const getDefaultOpenAICompatibleModel = () => {
34
34
  return process.env.OPENAI_COMPATIBLE_MODEL || undefined;
35
35
  };
36
+ // ModelsResponse type now imported from ../types/providerSpecific.js
36
37
  /**
37
38
  * OpenAI Compatible Provider - BaseProvider Implementation
38
39
  * Provides access to one of the OpenAI-compatible endpoint (OpenRouter, vLLM, LiteLLM, etc.)
@@ -6,9 +6,9 @@ import type { AnalyticsData, TokenUsage } from "./analytics.js";
6
6
  import type { EvaluationData } from "../index.js";
7
7
  import type { ToolCall, ToolResult } from "./tools.js";
8
8
  /**
9
- * Base command arguments interface
9
+ * Base command arguments type
10
10
  */
11
- export interface BaseCommandArgs {
11
+ export type BaseCommandArgs = {
12
12
  /** Enable debug output */
13
13
  debug?: boolean;
14
14
  /** Output format */
@@ -19,11 +19,11 @@ export interface BaseCommandArgs {
19
19
  quiet?: boolean;
20
20
  /** Index signature to allow additional properties */
21
21
  [key: string]: unknown;
22
- }
22
+ };
23
23
  /**
24
24
  * Generate command arguments
25
25
  */
26
- export interface GenerateCommandArgs extends BaseCommandArgs {
26
+ export type GenerateCommandArgs = BaseCommandArgs & {
27
27
  /** Input text or prompt */
28
28
  input?: string;
29
29
  /** AI provider to use */
@@ -48,11 +48,11 @@ export interface GenerateCommandArgs extends BaseCommandArgs {
48
48
  maxSteps?: number;
49
49
  /** Output file */
50
50
  output?: string;
51
- }
51
+ };
52
52
  /**
53
53
  * Stream command arguments
54
54
  */
55
- export interface StreamCommandArgs extends BaseCommandArgs {
55
+ export type StreamCommandArgs = BaseCommandArgs & {
56
56
  /** Input text or prompt */
57
57
  input?: string;
58
58
  /** AI provider to use */
@@ -67,11 +67,11 @@ export interface StreamCommandArgs extends BaseCommandArgs {
67
67
  maxTokens?: number;
68
68
  /** Disable tools */
69
69
  disableTools?: boolean;
70
- }
70
+ };
71
71
  /**
72
72
  * Batch command arguments
73
73
  */
74
- export interface BatchCommandArgs extends BaseCommandArgs {
74
+ export type BatchCommandArgs = BaseCommandArgs & {
75
75
  /** Input file path */
76
76
  file?: string;
77
77
  /** AI provider to use */
@@ -90,11 +90,11 @@ export interface BatchCommandArgs extends BaseCommandArgs {
90
90
  output?: string;
91
91
  /** Disable tools */
92
92
  disableTools?: boolean;
93
- }
93
+ };
94
94
  /**
95
95
  * MCP command arguments - Enhanced with transport and server management
96
96
  */
97
- export interface MCPCommandArgs extends BaseCommandArgs {
97
+ export type MCPCommandArgs = BaseCommandArgs & {
98
98
  /** MCP server name */
99
99
  server?: string;
100
100
  /** MCP server name (alias for server) */
@@ -135,11 +135,11 @@ export interface MCPCommandArgs extends BaseCommandArgs {
135
135
  source?: string;
136
136
  /** Connection timeout */
137
137
  timeout?: number;
138
- }
138
+ };
139
139
  /**
140
140
  * Models command arguments - Enhanced for model management
141
141
  */
142
- export interface ModelsCommandArgs extends Omit<BaseCommandArgs, "format"> {
142
+ export type ModelsCommandArgs = Omit<BaseCommandArgs, "format"> & {
143
143
  /** AI provider to query (single or array) */
144
144
  provider?: string | string[];
145
145
  /** Model category filter */
@@ -208,11 +208,11 @@ export interface ModelsCommandArgs extends Omit<BaseCommandArgs, "format"> {
208
208
  resolve?: boolean;
209
209
  /** Maximum tokens filter */
210
210
  maxTokens?: number;
211
- }
211
+ };
212
212
  /**
213
213
  * Ollama command arguments
214
214
  */
215
- export interface OllamaCommandArgs extends BaseCommandArgs {
215
+ export type OllamaCommandArgs = BaseCommandArgs & {
216
216
  /** Ollama model name */
217
217
  model?: string;
218
218
  /** List available models */
@@ -223,11 +223,11 @@ export interface OllamaCommandArgs extends BaseCommandArgs {
223
223
  remove?: boolean;
224
224
  /** Show model information */
225
225
  show?: boolean;
226
- }
226
+ };
227
227
  /**
228
228
  * SageMaker command arguments
229
229
  */
230
- export interface SageMakerCommandArgs extends BaseCommandArgs {
230
+ export type SageMakerCommandArgs = BaseCommandArgs & {
231
231
  /** SageMaker endpoint name */
232
232
  endpoint?: string;
233
233
  /** Model name for the endpoint */
@@ -258,20 +258,33 @@ export interface SageMakerCommandArgs extends BaseCommandArgs {
258
258
  region?: string;
259
259
  /** Force operation without confirmation */
260
260
  force?: boolean;
261
- }
261
+ };
262
+ /**
263
+ * Secure configuration container that avoids process.env exposure
264
+ */
265
+ export type SecureConfiguration = {
266
+ accessKeyId: string;
267
+ secretAccessKey: string;
268
+ region: string;
269
+ endpointName: string;
270
+ timeout: number;
271
+ maxRetries: number;
272
+ sessionId: string;
273
+ createdAt: number;
274
+ };
262
275
  /**
263
276
  * Provider status command arguments
264
277
  */
265
- export interface ProviderStatusArgs extends BaseCommandArgs {
278
+ export type ProviderStatusArgs = BaseCommandArgs & {
266
279
  /** Specific provider to check */
267
280
  provider?: string;
268
281
  /** Check all providers */
269
282
  all?: boolean;
270
- }
283
+ };
271
284
  /**
272
285
  * CLI command result
273
286
  */
274
- export interface CommandResult {
287
+ export type CommandResult = {
275
288
  /** Command success status */
276
289
  success: boolean;
277
290
  /** Result data */
@@ -286,11 +299,11 @@ export interface CommandResult {
286
299
  timestamp?: number;
287
300
  command?: string;
288
301
  };
289
- }
302
+ };
290
303
  /**
291
304
  * Generate command result
292
305
  */
293
- export interface GenerateResult extends CommandResult {
306
+ export type GenerateResult = CommandResult & {
294
307
  content: string;
295
308
  provider?: string;
296
309
  model?: string;
@@ -312,25 +325,25 @@ export interface GenerateResult extends CommandResult {
312
325
  name: string;
313
326
  description: string;
314
327
  }>;
315
- }
328
+ };
316
329
  /**
317
330
  * Stream result chunk
318
331
  */
319
- export interface StreamChunk {
332
+ export type StreamChunk = {
320
333
  content?: string;
321
334
  delta?: string;
322
335
  done?: boolean;
323
336
  metadata?: UnknownRecord;
324
- }
337
+ };
325
338
  /**
326
339
  * CLI output formatting options
327
340
  */
328
- export interface OutputOptions {
329
- format: "text" | "json" | "table";
341
+ export type OutputOptions = {
342
+ format: "text" | "json" | "table" | "yaml";
330
343
  pretty?: boolean;
331
344
  color?: boolean;
332
345
  compact?: boolean;
333
- }
346
+ };
334
347
  /**
335
348
  * Command handler function type
336
349
  */
@@ -338,7 +351,7 @@ export type CommandHandler<TArgs = BaseCommandArgs, TResult = CommandResult> = (
338
351
  /**
339
352
  * Command definition
340
353
  */
341
- export interface CommandDefinition<TArgs = BaseCommandArgs> {
354
+ export type CommandDefinition<TArgs = BaseCommandArgs> = {
342
355
  name: string;
343
356
  description: string;
344
357
  aliases?: string[];
@@ -351,38 +364,38 @@ export interface CommandDefinition<TArgs = BaseCommandArgs> {
351
364
  };
352
365
  };
353
366
  handler: CommandHandler<TArgs>;
354
- }
367
+ };
355
368
  /**
356
369
  * CLI context
357
370
  */
358
- export interface CLIContext {
371
+ export type CLIContext = {
359
372
  cwd: string;
360
373
  args: string[];
361
374
  env: NodeJS.ProcessEnv;
362
375
  exitCode?: number;
363
- }
376
+ };
364
377
  /**
365
378
  * Color mapping for CLI output
366
379
  */
367
- export interface ColorMap {
380
+ export type ColorMap = {
368
381
  [severity: string]: {
369
382
  color: string;
370
383
  symbol?: string;
371
384
  };
372
- }
385
+ };
373
386
  /**
374
387
  * Display severity colors (for evaluation display)
375
388
  */
376
- export interface SeverityColors {
389
+ export type SeverityColors = {
377
390
  [key: string]: {
378
391
  color: string;
379
392
  symbol: string;
380
393
  };
381
- }
394
+ };
382
395
  /**
383
396
  * JSON output structure
384
397
  */
385
- export interface JSONOutput {
398
+ export type JSONOutput = {
386
399
  success: boolean;
387
400
  data?: JsonValue;
388
401
  error?: string;
@@ -391,13 +404,13 @@ export interface JSONOutput {
391
404
  command: string;
392
405
  version?: string;
393
406
  };
394
- }
407
+ };
395
408
  /**
396
409
  * Console override for quiet mode
397
410
  */
398
- export interface ConsoleOverride {
411
+ export type ConsoleOverride = {
399
412
  [method: string]: (() => void) | undefined;
400
- }
413
+ };
401
414
  /**
402
415
  * Type guard for generate result
403
416
  */
@@ -48,7 +48,9 @@ export declare enum OpenAIModels {
48
48
  GPT_4_TURBO = "gpt-4-turbo",
49
49
  GPT_4O = "gpt-4o",
50
50
  GPT_4O_MINI = "gpt-4o-mini",
51
- GPT_3_5_TURBO = "gpt-3.5-turbo"
51
+ GPT_3_5_TURBO = "gpt-3.5-turbo",
52
+ O1_PREVIEW = "o1-preview",
53
+ O1_MINI = "o1-mini"
52
54
  }
53
55
  /**
54
56
  * Supported Models for Google Vertex AI
@@ -90,6 +92,19 @@ export declare enum AnthropicModels {
90
92
  CLAUDE_3_OPUS = "claude-3-opus-20240229",
91
93
  CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
92
94
  }
95
+ /**
96
+ * API Versions for various providers
97
+ */
98
+ export declare enum APIVersions {
99
+ AZURE_LATEST = "2025-04-01-preview",
100
+ AZURE_STABLE = "2024-10-21",
101
+ AZURE_LEGACY = "2023-12-01-preview",
102
+ OPENAI_CURRENT = "v1",
103
+ OPENAI_BETA = "v1-beta",
104
+ GOOGLE_AI_CURRENT = "v1",
105
+ GOOGLE_AI_BETA = "v1beta",
106
+ ANTHROPIC_CURRENT = "2023-06-01"
107
+ }
93
108
  /**
94
109
  * Union type of all supported model names
95
110
  */
@@ -403,6 +418,173 @@ export type AISDKGenerateResult = GenerateResult & {
403
418
  }>;
404
419
  [key: string]: unknown;
405
420
  };
421
+ /**
422
+ * Bedrock tool usage structure
423
+ */
424
+ export type BedrockToolUse = {
425
+ toolUseId: string;
426
+ name: string;
427
+ input: Record<string, unknown>;
428
+ };
429
+ /**
430
+ * Bedrock tool result structure
431
+ */
432
+ export type BedrockToolResult = {
433
+ toolUseId: string;
434
+ content: Array<{
435
+ text: string;
436
+ }>;
437
+ status: string;
438
+ };
439
+ /**
440
+ * Bedrock content block structure
441
+ */
442
+ export type BedrockContentBlock = {
443
+ text?: string;
444
+ toolUse?: BedrockToolUse;
445
+ toolResult?: BedrockToolResult;
446
+ };
447
+ /**
448
+ * Bedrock message structure
449
+ */
450
+ export type BedrockMessage = {
451
+ role: "user" | "assistant";
452
+ content: BedrockContentBlock[];
453
+ };
454
+ /**
455
+ * Google AI Live media configuration
456
+ */
457
+ export type GenAILiveMedia = {
458
+ data: string;
459
+ mimeType: string;
460
+ };
461
+ /**
462
+ * Live server message inline data
463
+ */
464
+ export type LiveServerMessagePartInlineData = {
465
+ data?: string;
466
+ };
467
+ /**
468
+ * Live server message model turn
469
+ */
470
+ export type LiveServerMessageModelTurn = {
471
+ parts?: Array<{
472
+ inlineData?: LiveServerMessagePartInlineData;
473
+ }>;
474
+ };
475
+ /**
476
+ * Live server content structure
477
+ */
478
+ export type LiveServerContent = {
479
+ modelTurn?: LiveServerMessageModelTurn;
480
+ interrupted?: boolean;
481
+ };
482
+ /**
483
+ * Live server message structure
484
+ */
485
+ export type LiveServerMessage = {
486
+ serverContent?: LiveServerContent;
487
+ };
488
+ /**
489
+ * Live connection callbacks
490
+ */
491
+ export type LiveConnectCallbacks = {
492
+ onopen?: () => void;
493
+ onmessage?: (message: LiveServerMessage) => void;
494
+ onerror?: (e: {
495
+ message?: string;
496
+ }) => void;
497
+ onclose?: (e: {
498
+ code?: number;
499
+ reason?: string;
500
+ }) => void;
501
+ };
502
+ /**
503
+ * Live connection configuration
504
+ */
505
+ export type LiveConnectConfig = {
506
+ model: string;
507
+ callbacks: LiveConnectCallbacks;
508
+ config: {
509
+ responseModalities: string[];
510
+ speechConfig: {
511
+ voiceConfig: {
512
+ prebuiltVoiceConfig: {
513
+ voiceName: string;
514
+ };
515
+ };
516
+ };
517
+ };
518
+ };
519
+ /**
520
+ * Google AI Live session interface
521
+ */
522
+ export type GenAILiveSession = {
523
+ sendRealtimeInput?: (payload: {
524
+ media?: GenAILiveMedia;
525
+ event?: string;
526
+ }) => Promise<void> | void;
527
+ sendInput?: (payload: {
528
+ event?: string;
529
+ media?: GenAILiveMedia;
530
+ }) => Promise<void> | void;
531
+ close?: (code?: number, reason?: string) => Promise<void> | void;
532
+ };
533
+ /**
534
+ * Google AI client interface
535
+ */
536
+ export type GenAIClient = {
537
+ live: {
538
+ connect: (config: LiveConnectConfig) => Promise<GenAILiveSession>;
539
+ };
540
+ };
541
+ /**
542
+ * Google GenAI constructor type
543
+ */
544
+ export type GoogleGenAIClass = new (cfg: {
545
+ apiKey: string;
546
+ }) => GenAIClient;
547
+ /**
548
+ * OpenAI-compatible models endpoint response structure
549
+ */
550
+ export type ModelsResponse = {
551
+ data: Array<{
552
+ id: string;
553
+ object: string;
554
+ created?: number;
555
+ owned_by?: string;
556
+ }>;
557
+ };
558
+ /**
559
+ * Default model aliases for easy reference
560
+ */
561
+ export declare const DEFAULT_MODEL_ALIASES: {
562
+ readonly LATEST_OPENAI: OpenAIModels.GPT_4O;
563
+ readonly FASTEST_OPENAI: OpenAIModels.GPT_4O_MINI;
564
+ readonly LATEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_SONNET;
565
+ readonly FASTEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_HAIKU;
566
+ readonly LATEST_GOOGLE: GoogleAIModels.GEMINI_2_5_PRO;
567
+ readonly FASTEST_GOOGLE: GoogleAIModels.GEMINI_2_5_FLASH;
568
+ readonly BEST_CODING: AnthropicModels.CLAUDE_3_5_SONNET;
569
+ readonly BEST_ANALYSIS: GoogleAIModels.GEMINI_2_5_PRO;
570
+ readonly BEST_CREATIVE: AnthropicModels.CLAUDE_3_5_SONNET;
571
+ readonly BEST_VALUE: GoogleAIModels.GEMINI_2_5_FLASH;
572
+ };
573
+ /**
574
+ * @deprecated Use DEFAULT_MODEL_ALIASES instead. Will be removed in future version.
575
+ */
576
+ export declare const ModelAliases: {
577
+ readonly LATEST_OPENAI: OpenAIModels.GPT_4O;
578
+ readonly FASTEST_OPENAI: OpenAIModels.GPT_4O_MINI;
579
+ readonly LATEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_SONNET;
580
+ readonly FASTEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_HAIKU;
581
+ readonly LATEST_GOOGLE: GoogleAIModels.GEMINI_2_5_PRO;
582
+ readonly FASTEST_GOOGLE: GoogleAIModels.GEMINI_2_5_FLASH;
583
+ readonly BEST_CODING: AnthropicModels.CLAUDE_3_5_SONNET;
584
+ readonly BEST_ANALYSIS: GoogleAIModels.GEMINI_2_5_PRO;
585
+ readonly BEST_CREATIVE: AnthropicModels.CLAUDE_3_5_SONNET;
586
+ readonly BEST_VALUE: GoogleAIModels.GEMINI_2_5_FLASH;
587
+ };
406
588
  /**
407
589
  * Default provider configurations
408
590
  */
@@ -40,6 +40,8 @@ export var OpenAIModels;
40
40
  OpenAIModels["GPT_4O"] = "gpt-4o";
41
41
  OpenAIModels["GPT_4O_MINI"] = "gpt-4o-mini";
42
42
  OpenAIModels["GPT_3_5_TURBO"] = "gpt-3.5-turbo";
43
+ OpenAIModels["O1_PREVIEW"] = "o1-preview";
44
+ OpenAIModels["O1_MINI"] = "o1-mini";
43
45
  })(OpenAIModels || (OpenAIModels = {}));
44
46
  /**
45
47
  * Supported Models for Google Vertex AI
@@ -95,6 +97,46 @@ export var AnthropicModels;
95
97
  AnthropicModels["CLAUDE_3_OPUS"] = "claude-3-opus-20240229";
96
98
  AnthropicModels["CLAUDE_3_HAIKU"] = "claude-3-haiku-20240307";
97
99
  })(AnthropicModels || (AnthropicModels = {}));
100
+ /**
101
+ * API Versions for various providers
102
+ */
103
+ export var APIVersions;
104
+ (function (APIVersions) {
105
+ // Azure OpenAI API versions
106
+ APIVersions["AZURE_LATEST"] = "2025-04-01-preview";
107
+ APIVersions["AZURE_STABLE"] = "2024-10-21";
108
+ APIVersions["AZURE_LEGACY"] = "2023-12-01-preview";
109
+ // OpenAI API versions
110
+ APIVersions["OPENAI_CURRENT"] = "v1";
111
+ APIVersions["OPENAI_BETA"] = "v1-beta";
112
+ // Google AI API versions
113
+ APIVersions["GOOGLE_AI_CURRENT"] = "v1";
114
+ APIVersions["GOOGLE_AI_BETA"] = "v1beta";
115
+ // Anthropic API versions
116
+ APIVersions["ANTHROPIC_CURRENT"] = "2023-06-01";
117
+ // Other provider versions can be added here
118
+ })(APIVersions || (APIVersions = {}));
119
+ /**
120
+ * Default model aliases for easy reference
121
+ */
122
+ export const DEFAULT_MODEL_ALIASES = {
123
+ // Latest recommended models per provider
124
+ LATEST_OPENAI: OpenAIModels.GPT_4O,
125
+ FASTEST_OPENAI: OpenAIModels.GPT_4O_MINI,
126
+ LATEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_SONNET,
127
+ FASTEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_HAIKU,
128
+ LATEST_GOOGLE: GoogleAIModels.GEMINI_2_5_PRO,
129
+ FASTEST_GOOGLE: GoogleAIModels.GEMINI_2_5_FLASH,
130
+ // Best models by use case
131
+ BEST_CODING: AnthropicModels.CLAUDE_3_5_SONNET,
132
+ BEST_ANALYSIS: GoogleAIModels.GEMINI_2_5_PRO,
133
+ BEST_CREATIVE: AnthropicModels.CLAUDE_3_5_SONNET,
134
+ BEST_VALUE: GoogleAIModels.GEMINI_2_5_FLASH,
135
+ };
136
+ /**
137
+ * @deprecated Use DEFAULT_MODEL_ALIASES instead. Will be removed in future version.
138
+ */
139
+ export const ModelAliases = DEFAULT_MODEL_ALIASES;
98
140
  /**
99
141
  * Default provider configurations
100
142
  */