@juspay/neurolink 8.28.0 → 8.30.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/README.md +23 -2
  3. package/dist/adapters/video/vertexVideoHandler.d.ts +12 -2
  4. package/dist/adapters/video/vertexVideoHandler.js +12 -2
  5. package/dist/core/baseProvider.d.ts +19 -0
  6. package/dist/core/baseProvider.js +174 -0
  7. package/dist/index.d.ts +3 -3
  8. package/dist/index.js +7 -1
  9. package/dist/lib/adapters/video/vertexVideoHandler.d.ts +12 -2
  10. package/dist/lib/adapters/video/vertexVideoHandler.js +12 -2
  11. package/dist/lib/core/baseProvider.d.ts +19 -0
  12. package/dist/lib/core/baseProvider.js +174 -0
  13. package/dist/lib/index.d.ts +3 -3
  14. package/dist/lib/index.js +7 -1
  15. package/dist/lib/mcp/auth/index.d.ts +6 -0
  16. package/dist/lib/mcp/auth/index.js +12 -0
  17. package/dist/lib/mcp/auth/oauthClientProvider.d.ts +93 -0
  18. package/dist/lib/mcp/auth/oauthClientProvider.js +326 -0
  19. package/dist/lib/mcp/auth/tokenStorage.d.ts +56 -0
  20. package/dist/lib/mcp/auth/tokenStorage.js +135 -0
  21. package/dist/lib/mcp/externalServerManager.d.ts +5 -1
  22. package/dist/lib/mcp/externalServerManager.js +84 -22
  23. package/dist/lib/mcp/httpRateLimiter.d.ts +152 -0
  24. package/dist/lib/mcp/httpRateLimiter.js +365 -0
  25. package/dist/lib/mcp/httpRetryHandler.d.ts +62 -0
  26. package/dist/lib/mcp/httpRetryHandler.js +154 -0
  27. package/dist/lib/mcp/index.d.ts +5 -0
  28. package/dist/lib/mcp/index.js +8 -0
  29. package/dist/lib/mcp/mcpClientFactory.d.ts +25 -2
  30. package/dist/lib/mcp/mcpClientFactory.js +206 -10
  31. package/dist/lib/mcp/toolRegistry.d.ts +1 -2
  32. package/dist/lib/mcp/toolRegistry.js +1 -5
  33. package/dist/lib/neurolink.js +3 -0
  34. package/dist/lib/providers/amazonBedrock.js +4 -1
  35. package/dist/lib/providers/ollama.js +4 -1
  36. package/dist/lib/sdk/toolRegistration.d.ts +3 -25
  37. package/dist/lib/types/cli.d.ts +42 -42
  38. package/dist/lib/types/externalMcp.d.ts +55 -3
  39. package/dist/lib/types/externalMcp.js +0 -1
  40. package/dist/lib/types/generateTypes.d.ts +37 -0
  41. package/dist/lib/types/hitlTypes.d.ts +38 -0
  42. package/dist/lib/types/index.d.ts +6 -8
  43. package/dist/lib/types/index.js +4 -4
  44. package/dist/lib/types/mcpTypes.d.ts +235 -27
  45. package/dist/lib/types/providers.d.ts +16 -16
  46. package/dist/lib/types/sdkTypes.d.ts +2 -2
  47. package/dist/lib/types/tools.d.ts +42 -3
  48. package/dist/lib/types/utilities.d.ts +19 -0
  49. package/dist/mcp/auth/index.d.ts +6 -0
  50. package/dist/mcp/auth/index.js +11 -0
  51. package/dist/mcp/auth/oauthClientProvider.d.ts +93 -0
  52. package/dist/mcp/auth/oauthClientProvider.js +325 -0
  53. package/dist/mcp/auth/tokenStorage.d.ts +56 -0
  54. package/dist/mcp/auth/tokenStorage.js +134 -0
  55. package/dist/mcp/externalServerManager.d.ts +5 -1
  56. package/dist/mcp/externalServerManager.js +84 -22
  57. package/dist/mcp/httpRateLimiter.d.ts +152 -0
  58. package/dist/mcp/httpRateLimiter.js +364 -0
  59. package/dist/mcp/httpRetryHandler.d.ts +62 -0
  60. package/dist/mcp/httpRetryHandler.js +153 -0
  61. package/dist/mcp/index.d.ts +5 -0
  62. package/dist/mcp/index.js +8 -0
  63. package/dist/mcp/mcpClientFactory.d.ts +25 -2
  64. package/dist/mcp/mcpClientFactory.js +206 -10
  65. package/dist/mcp/toolRegistry.d.ts +1 -2
  66. package/dist/mcp/toolRegistry.js +1 -5
  67. package/dist/neurolink.js +3 -0
  68. package/dist/providers/amazonBedrock.js +4 -1
  69. package/dist/providers/ollama.js +4 -1
  70. package/dist/sdk/toolRegistration.d.ts +3 -25
  71. package/dist/types/cli.d.ts +42 -42
  72. package/dist/types/externalMcp.d.ts +55 -3
  73. package/dist/types/externalMcp.js +0 -1
  74. package/dist/types/generateTypes.d.ts +37 -0
  75. package/dist/types/hitlTypes.d.ts +38 -0
  76. package/dist/types/index.d.ts +6 -8
  77. package/dist/types/index.js +4 -4
  78. package/dist/types/mcpTypes.d.ts +235 -27
  79. package/dist/types/providers.d.ts +16 -16
  80. package/dist/types/sdkTypes.d.ts +2 -2
  81. package/dist/types/tools.d.ts +42 -3
  82. package/dist/types/utilities.d.ts +19 -0
  83. package/package.json +2 -1
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [8.30.0](https://github.com/juspay/neurolink/compare/v8.29.0...v8.30.0) (2026-01-03)
2
+
3
+ ### Features
4
+
5
+ - **(video):** add video generation support to NeuroLink SDK with Vertex AI ([6b490a1](https://github.com/juspay/neurolink/commit/6b490a1f436a823ff6bad41fc77f98d62be08c68))
6
+
7
+ ## [8.29.0](https://github.com/juspay/neurolink/compare/v8.28.0...v8.29.0) (2026-01-02)
8
+
9
+ ### Features
10
+
11
+ - **(mcp):** add HTTP/Streamable HTTP transport support for MCP servers ([67f1c23](https://github.com/juspay/neurolink/commit/67f1c23ac2d5e687b7455c627da952a820af773b))
12
+
1
13
  ## [8.28.0](https://github.com/juspay/neurolink/compare/v8.27.0...v8.28.0) (2026-01-02)
2
14
 
3
15
  ### Features
package/README.md CHANGED
@@ -25,6 +25,7 @@ Extracted from production systems at Juspay and battle-tested at enterprise scal
25
25
 
26
26
  ## What's New (Q4 2025)
27
27
 
28
+ - **HTTP/Streamable HTTP Transport for MCP** – Connect to remote MCP servers via HTTP with authentication headers, retry logic, and rate limiting. → [HTTP Transport Guide](docs/MCP-HTTP-TRANSPORT.md)
28
29
  - 🧠 **Gemini 3 Preview Support** - Full support for gemini-3-flash-preview and gemini-3-pro-preview with extended thinking capabilities
29
30
  - **Structured Output with Zod Schemas** – Type-safe JSON generation with automatic validation using `schema` + `output.format: "json"` in `generate()`. → [Structured Output Guide](docs/features/structured-output.md)
30
31
  - **CSV File Support** – Attach CSV files to prompts for AI-powered data analysis with auto-detection. → [CSV Guide](docs/features/multimodal-chat.md#csv-file-support)
@@ -96,7 +97,7 @@ NeuroLink is a comprehensive AI development platform. Every feature below is pro
96
97
  **58+ External MCP Servers** supported (GitHub, PostgreSQL, Google Drive, Slack, and more):
97
98
 
98
99
  ```typescript
99
- // Add any MCP server dynamically
100
+ // stdio transport - local MCP servers via command execution
100
101
  await neurolink.addExternalMCPServer("github", {
101
102
  command: "npx",
102
103
  args: ["-y", "@modelcontextprotocol/server-github"],
@@ -104,13 +105,32 @@ await neurolink.addExternalMCPServer("github", {
104
105
  env: { GITHUB_TOKEN: process.env.GITHUB_TOKEN },
105
106
  });
106
107
 
108
+ // HTTP transport - remote MCP servers via URL
109
+ await neurolink.addExternalMCPServer("github-copilot", {
110
+ transport: "http",
111
+ url: "https://api.githubcopilot.com/mcp",
112
+ headers: { Authorization: "Bearer YOUR_COPILOT_TOKEN" },
113
+ timeout: 15000,
114
+ retries: 5,
115
+ });
116
+
107
117
  // Tools automatically available to AI
108
118
  const result = await neurolink.generate({
109
119
  input: { text: 'Create a GitHub issue titled "Bug in auth flow"' },
110
120
  });
111
121
  ```
112
122
 
123
+ **MCP Transport Options:**
124
+
125
+ | Transport | Use Case | Key Features |
126
+ | ----------- | -------------- | ----------------------------------------------- |
127
+ | `stdio` | Local servers | Command execution, environment variables |
128
+ | `http` | Remote servers | URL-based, auth headers, retries, rate limiting |
129
+ | `sse` | Event streams | Server-Sent Events, real-time updates |
130
+ | `websocket` | Bi-directional | Full-duplex communication |
131
+
113
132
  **[📖 MCP Integration Guide](docs/advanced/mcp-integration.md)** - Setup external servers
133
+ **[📖 HTTP Transport Guide](docs/MCP-HTTP-TRANSPORT.md)** - Remote MCP server configuration
114
134
 
115
135
  ---
116
136
 
@@ -327,7 +347,7 @@ Full command and API breakdown lives in [`docs/cli/commands.md`](docs/cli/comman
327
347
  | **Memory & context** | Conversation memory, Mem0 integration, Redis history export (Q4), context summarization (Q4). |
328
348
  | **CLI tooling** | Loop sessions (Q3), setup wizard, config validation, Redis auto-detect, JSON output. |
329
349
  | **Enterprise ops** | Proxy support, regional routing (Q3), telemetry hooks, configuration management. |
330
- | **Tool ecosystem** | MCP auto discovery, LiteLLM hub access, SageMaker custom deployment, web search. |
350
+ | **Tool ecosystem** | MCP auto discovery, HTTP/stdio/SSE/WebSocket transports, LiteLLM hub access, SageMaker custom deployment, web search. |
331
351
 
332
352
  ## Documentation Map
333
353
 
@@ -349,6 +369,7 @@ Full command and API breakdown lives in [`docs/cli/commands.md`](docs/cli/comman
349
369
  - **Enterprise proxy & security** – Configure outbound policies and compliance posture. → [`docs/ENTERPRISE-PROXY-SETUP.md`](docs/ENTERPRISE-PROXY-SETUP.md)
350
370
  - **Configuration automation** – Manage environments, regions, and credentials safely. → [`docs/CONFIGURATION-MANAGEMENT.md`](docs/CONFIGURATION-MANAGEMENT.md)
351
371
  - **MCP tool ecosystem** – Auto-discover Model Context Protocol tools and extend workflows. → [`docs/advanced/mcp-integration.md`](docs/advanced/mcp-integration.md)
372
+ - **Remote MCP via HTTP** – Connect to HTTP-based MCP servers with authentication, retries, and rate limiting. → [`docs/MCP-HTTP-TRANSPORT.md`](docs/MCP-HTTP-TRANSPORT.md)
352
373
 
353
374
  ## Contributing & Support
354
375
 
@@ -16,8 +16,12 @@ import { NeuroLinkError } from "../../utils/errorHandling.js";
16
16
  * Video generation runtime error codes
17
17
  *
18
18
  * These are for runtime/execution errors during video generation.
19
- * Input validation errors (missing image, invalid options, etc.) are handled
20
- * by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
19
+ * Pure option/shape validation (missing image option, invalid config values, etc.)
20
+ * is handled by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
21
+ *
22
+ * Error categorization:
23
+ * - INVALID_INPUT → ErrorCategory.execution (runtime I/O failures)
24
+ * - parameterValidation errors → ErrorCategory.validation (schema/option issues)
21
25
  *
22
26
  * Following TTS pattern (TTS_ERROR_CODES + TTSError in ttsProcessor.ts)
23
27
  */
@@ -28,6 +32,12 @@ export declare const VIDEO_ERROR_CODES: {
28
32
  readonly PROVIDER_NOT_CONFIGURED: "VIDEO_PROVIDER_NOT_CONFIGURED";
29
33
  /** Polling for video completion timed out */
30
34
  readonly POLL_TIMEOUT: "VIDEO_POLL_TIMEOUT";
35
+ /**
36
+ * Runtime I/O error during input processing.
37
+ * Used for: failed URL fetch, failed file read, corrupt/unreadable buffer.
38
+ * NOT for: missing options or invalid config shapes (use parameterValidation).
39
+ */
40
+ readonly INVALID_INPUT: "VIDEO_INVALID_INPUT";
31
41
  };
32
42
  /**
33
43
  * Video generation error class
@@ -21,8 +21,12 @@ import { logger } from "../../utils/logger.js";
21
21
  * Video generation runtime error codes
22
22
  *
23
23
  * These are for runtime/execution errors during video generation.
24
- * Input validation errors (missing image, invalid options, etc.) are handled
25
- * by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
24
+ * Pure option/shape validation (missing image option, invalid config values, etc.)
25
+ * is handled by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
26
+ *
27
+ * Error categorization:
28
+ * - INVALID_INPUT → ErrorCategory.execution (runtime I/O failures)
29
+ * - parameterValidation errors → ErrorCategory.validation (schema/option issues)
26
30
  *
27
31
  * Following TTS pattern (TTS_ERROR_CODES + TTSError in ttsProcessor.ts)
28
32
  */
@@ -33,6 +37,12 @@ export const VIDEO_ERROR_CODES = {
33
37
  PROVIDER_NOT_CONFIGURED: "VIDEO_PROVIDER_NOT_CONFIGURED",
34
38
  /** Polling for video completion timed out */
35
39
  POLL_TIMEOUT: "VIDEO_POLL_TIMEOUT",
40
+ /**
41
+ * Runtime I/O error during input processing.
42
+ * Used for: failed URL fetch, failed file read, corrupt/unreadable buffer.
43
+ * NOT for: missing options or invalid config shapes (use parameterValidation).
44
+ */
45
+ INVALID_INPUT: "VIDEO_INVALID_INPUT",
36
46
  };
37
47
  /**
38
48
  * Video generation error class
@@ -226,6 +226,25 @@ export declare abstract class BaseProvider implements AIProvider {
226
226
  */
227
227
  protected normalizeStreamOptions(optionsOrPrompt: StreamOptions | string): StreamOptions;
228
228
  protected enhanceResult(result: EnhancedGenerateResult, options: TextGenerationOptions, startTime: number): Promise<EnhancedGenerateResult>;
229
+ /**
230
+ * Handle video generation mode
231
+ *
232
+ * Generates video from input image + text prompt using Vertex AI Veo 3.1.
233
+ *
234
+ * @param options - Text generation options with video configuration
235
+ * @param startTime - Generation start timestamp for metrics
236
+ * @returns Enhanced result with video data
237
+ *
238
+ * @example
239
+ * ```typescript
240
+ * const result = await provider.generate({
241
+ * input: { text: "Product showcase", images: [imageBuffer] },
242
+ * output: { mode: "video", video: { resolution: "1080p" } }
243
+ * });
244
+ * // result.video contains the generated video
245
+ * ```
246
+ */
247
+ private handleVideoGeneration;
229
248
  /**
230
249
  * Create analytics - delegated to TelemetryHandler
231
250
  */
@@ -323,6 +323,11 @@ export class BaseProvider {
323
323
  this.validateOptions(options);
324
324
  const startTime = Date.now();
325
325
  try {
326
+ // ===== VIDEO GENERATION MODE =====
327
+ // Generate video from image + prompt using Veo 3.1
328
+ if (options.output?.mode === "video") {
329
+ return await this.handleVideoGeneration(options, startTime);
330
+ }
326
331
  // ===== TTS MODE 1: Direct Input Synthesis (useAiResponse=false) =====
327
332
  // Synthesize input text directly without AI generation
328
333
  // This is optimal for simple read-aloud scenarios
@@ -659,6 +664,175 @@ export class BaseProvider {
659
664
  }
660
665
  return enhancedResult;
661
666
  }
667
+ /**
668
+ * Handle video generation mode
669
+ *
670
+ * Generates video from input image + text prompt using Vertex AI Veo 3.1.
671
+ *
672
+ * @param options - Text generation options with video configuration
673
+ * @param startTime - Generation start timestamp for metrics
674
+ * @returns Enhanced result with video data
675
+ *
676
+ * @example
677
+ * ```typescript
678
+ * const result = await provider.generate({
679
+ * input: { text: "Product showcase", images: [imageBuffer] },
680
+ * output: { mode: "video", video: { resolution: "1080p" } }
681
+ * });
682
+ * // result.video contains the generated video
683
+ * ```
684
+ */
685
+ async handleVideoGeneration(options, startTime) {
686
+ // Dynamic imports to avoid loading video dependencies unless needed
687
+ const { generateVideoWithVertex, VideoError, VIDEO_ERROR_CODES } = await import("../adapters/video/vertexVideoHandler.js");
688
+ const { validateVideoGenerationInput, validateImageForVideo } = await import("../utils/parameterValidation.js");
689
+ const { ErrorFactory } = await import("../utils/errorHandling.js");
690
+ // Build GenerateOptions for validation
691
+ const generateOptions = {
692
+ input: options.input || { text: options.prompt || "" },
693
+ output: options.output,
694
+ provider: options.provider,
695
+ model: options.model,
696
+ };
697
+ // Validate video generation input
698
+ const validation = validateVideoGenerationInput(generateOptions);
699
+ if (!validation.isValid) {
700
+ throw ErrorFactory.invalidParameters("video-generation", new Error(validation.errors.map((e) => e.message).join("; ")), { errors: validation.errors });
701
+ }
702
+ // Log warnings if any
703
+ if (validation.warnings.length > 0) {
704
+ for (const warning of validation.warnings) {
705
+ logger.warn(`Video generation warning: ${warning}`);
706
+ }
707
+ }
708
+ // Extract image from input
709
+ const imageInput = options.input?.images?.[0];
710
+ if (!imageInput) {
711
+ throw new VideoError({
712
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
713
+ message: "Video generation requires an input image. Provide via input.images array.",
714
+ retriable: false,
715
+ context: { field: "input.images" },
716
+ });
717
+ }
718
+ // Timeout for image IO operations (15 seconds)
719
+ const IMAGE_IO_TIMEOUT_MS = 15000;
720
+ // Load image buffer if path/URL
721
+ let imageBuffer;
722
+ if (typeof imageInput === "string") {
723
+ if (imageInput.startsWith("http://") ||
724
+ imageInput.startsWith("https://")) {
725
+ // URL - fetch the image with timeout
726
+ logger.debug("Fetching image from URL for video generation", {
727
+ url: imageInput.substring(0, 100),
728
+ });
729
+ let response;
730
+ try {
731
+ response = await this.executeWithTimeout(() => fetch(imageInput), {
732
+ timeout: IMAGE_IO_TIMEOUT_MS,
733
+ operationType: "generate", // Part of video generation flow
734
+ });
735
+ }
736
+ catch (error) {
737
+ throw new VideoError({
738
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
739
+ message: `Failed to fetch image from URL: ${error instanceof Error ? error.message : "Request timed out"}`,
740
+ retriable: true,
741
+ context: { url: imageInput, timeout: IMAGE_IO_TIMEOUT_MS },
742
+ originalError: error instanceof Error ? error : undefined,
743
+ });
744
+ }
745
+ if (!response.ok) {
746
+ throw new VideoError({
747
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
748
+ message: `Failed to fetch image from URL: ${response.status} ${response.statusText}`,
749
+ retriable: response.status >= 500,
750
+ context: { url: imageInput, status: response.status },
751
+ });
752
+ }
753
+ imageBuffer = Buffer.from(await response.arrayBuffer());
754
+ }
755
+ else {
756
+ // File path - read from disk with timeout
757
+ logger.debug("Reading image from path for video generation", {
758
+ path: imageInput,
759
+ });
760
+ const fs = await import("node:fs/promises");
761
+ try {
762
+ imageBuffer = await this.executeWithTimeout(() => fs.readFile(imageInput), { timeout: IMAGE_IO_TIMEOUT_MS, operationType: "generate" });
763
+ }
764
+ catch (error) {
765
+ throw new VideoError({
766
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
767
+ message: `Failed to read image file: ${error instanceof Error ? error.message : String(error)}`,
768
+ retriable: false,
769
+ context: { path: imageInput, timeout: IMAGE_IO_TIMEOUT_MS },
770
+ originalError: error instanceof Error ? error : undefined,
771
+ });
772
+ }
773
+ }
774
+ }
775
+ else if (Buffer.isBuffer(imageInput)) {
776
+ imageBuffer = imageInput;
777
+ }
778
+ else if (typeof imageInput === "object" && "data" in imageInput) {
779
+ // ImageWithAltText type
780
+ const imgData = imageInput.data;
781
+ if (typeof imgData === "string") {
782
+ imageBuffer = Buffer.from(imgData, "base64");
783
+ }
784
+ else if (Buffer.isBuffer(imgData)) {
785
+ imageBuffer = imgData;
786
+ }
787
+ else {
788
+ throw new VideoError({
789
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
790
+ message: "ImageWithAltText.data must be a base64 string or Buffer.",
791
+ retriable: false,
792
+ context: { field: "input.images[0].data", type: typeof imgData },
793
+ });
794
+ }
795
+ }
796
+ else {
797
+ throw new VideoError({
798
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
799
+ message: "Invalid image input type. Provide Buffer, path string, URL, or ImageWithAltText.",
800
+ retriable: false,
801
+ context: { field: "input.images[0]", type: typeof imageInput },
802
+ });
803
+ }
804
+ // Validate image format and size (for Buffer inputs)
805
+ const imageValidation = validateImageForVideo(imageBuffer);
806
+ if (imageValidation) {
807
+ throw ErrorFactory.invalidParameters("video-generation", new Error(imageValidation.message), { field: "input.images[0]", validation: imageValidation });
808
+ }
809
+ // Get prompt text
810
+ const prompt = options.prompt || options.input?.text || "";
811
+ logger.info("Starting video generation", {
812
+ provider: "vertex",
813
+ model: options.model || "veo-3.1-generate-001",
814
+ promptLength: prompt.length,
815
+ imageSize: imageBuffer.length,
816
+ resolution: options.output?.video?.resolution || "720p",
817
+ duration: options.output?.video?.length || 6,
818
+ });
819
+ // Generate video using Vertex handler (no processor abstraction)
820
+ const videoResult = await generateVideoWithVertex(imageBuffer, prompt, options.output?.video);
821
+ logger.info("Video generation complete", {
822
+ videoSize: videoResult.data.length,
823
+ duration: videoResult.metadata?.duration,
824
+ processingTime: videoResult.metadata?.processingTime,
825
+ });
826
+ // Build result
827
+ const baseResult = {
828
+ content: prompt, // Echo the prompt as content
829
+ provider: "vertex",
830
+ model: options.model || "veo-3.1-generate-001",
831
+ usage: { input: 0, output: 0, total: 0 },
832
+ video: videoResult,
833
+ };
834
+ return await this.enhanceResult(baseResult, options, startTime);
835
+ }
662
836
  /**
663
837
  * Create analytics - delegated to TelemetryHandler
664
838
  */
package/dist/index.d.ts CHANGED
@@ -10,7 +10,7 @@ import { AIProviderFactory } from "./core/factory.js";
10
10
  export { AIProviderFactory };
11
11
  export type { AIProvider, AIModelProviderConfig, StreamingOptions, ProviderAttempt, SupportedModelName, } from "./types/index.js";
12
12
  export type { GenerateOptions, GenerateResult, EnhancedProvider, } from "./types/generateTypes.js";
13
- export type { ToolContext } from "./sdk/toolRegistration.js";
13
+ export type { ToolContext } from "./types/tools.js";
14
14
  export { validateTool } from "./sdk/toolRegistration.js";
15
15
  export type { ToolResult, ToolDefinition } from "./types/tools.js";
16
16
  export { DEFAULT_PROVIDER_CONFIGS } from "./types/index.js";
@@ -83,8 +83,8 @@ export declare function createBestAIProvider(requestedProvider?: string, modelNa
83
83
  * await writeFile('output.txt', 'Hello from MCP!');
84
84
  * ```
85
85
  */
86
- export { initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger, } from "./mcp/index.js";
87
- export type { McpMetadata, DiscoveredMcp } from "./types/mcpTypes.js";
86
+ export { initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger, HTTPRateLimiter, RateLimiterManager, globalRateLimiterManager, DEFAULT_RATE_LIMIT_CONFIG, DEFAULT_HTTP_RETRY_CONFIG, isRetryableStatusCode, isRetryableHTTPError, withHTTPRetry, InMemoryTokenStorage, FileTokenStorage, isTokenExpired, calculateExpiresAt, NeuroLinkOAuthProvider, createOAuthProviderFromConfig, MCPCircuitBreaker, CircuitBreakerManager, globalCircuitBreakerManager, } from "./mcp/index.js";
87
+ export type { McpMetadata, DiscoveredMcp, RateLimitConfig, HTTPRetryConfig, OAuthTokens, TokenStorage, MCPOAuthConfig, OAuthClientInformation, AuthorizationUrlResult, TokenExchangeRequest, } from "./types/mcpTypes.js";
88
88
  export type { ExecutionContext, ToolInfo, ToolExecutionResult, } from "./types/tools.js";
89
89
  export type { LogLevel } from "./types/utilities.js";
90
90
  export declare function initializeTelemetry(): Promise<boolean>;
package/dist/index.js CHANGED
@@ -94,7 +94,13 @@ export async function createBestAIProvider(requestedProvider, modelName) {
94
94
  export {
95
95
  // Core MCP ecosystem
96
96
  // Simplified MCP exports
97
- initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger, } from "./mcp/index.js";
97
+ initializeMCPEcosystem, listMCPs, executeMCP, getMCPStats, mcpLogger,
98
+ // HTTP Transport utilities
99
+ HTTPRateLimiter, RateLimiterManager, globalRateLimiterManager, DEFAULT_RATE_LIMIT_CONFIG, DEFAULT_HTTP_RETRY_CONFIG, isRetryableStatusCode, isRetryableHTTPError, withHTTPRetry,
100
+ // OAuth Authentication
101
+ InMemoryTokenStorage, FileTokenStorage, isTokenExpired, calculateExpiresAt, NeuroLinkOAuthProvider, createOAuthProviderFromConfig,
102
+ // Circuit Breaker
103
+ MCPCircuitBreaker, CircuitBreakerManager, globalCircuitBreakerManager, } from "./mcp/index.js";
98
104
  // ============================================================================
99
105
  // REAL-TIME SERVICES & TELEMETRY - Enterprise Platform Features
100
106
  // ============================================================================
@@ -16,8 +16,12 @@ import { NeuroLinkError } from "../../utils/errorHandling.js";
16
16
  * Video generation runtime error codes
17
17
  *
18
18
  * These are for runtime/execution errors during video generation.
19
- * Input validation errors (missing image, invalid options, etc.) are handled
20
- * by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
19
+ * Pure option/shape validation (missing image option, invalid config values, etc.)
20
+ * is handled by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
21
+ *
22
+ * Error categorization:
23
+ * - INVALID_INPUT → ErrorCategory.execution (runtime I/O failures)
24
+ * - parameterValidation errors → ErrorCategory.validation (schema/option issues)
21
25
  *
22
26
  * Following TTS pattern (TTS_ERROR_CODES + TTSError in ttsProcessor.ts)
23
27
  */
@@ -28,6 +32,12 @@ export declare const VIDEO_ERROR_CODES: {
28
32
  readonly PROVIDER_NOT_CONFIGURED: "VIDEO_PROVIDER_NOT_CONFIGURED";
29
33
  /** Polling for video completion timed out */
30
34
  readonly POLL_TIMEOUT: "VIDEO_POLL_TIMEOUT";
35
+ /**
36
+ * Runtime I/O error during input processing.
37
+ * Used for: failed URL fetch, failed file read, corrupt/unreadable buffer.
38
+ * NOT for: missing options or invalid config shapes (use parameterValidation).
39
+ */
40
+ readonly INVALID_INPUT: "VIDEO_INVALID_INPUT";
31
41
  };
32
42
  /**
33
43
  * Video generation error class
@@ -21,8 +21,12 @@ import { logger } from "../../utils/logger.js";
21
21
  * Video generation runtime error codes
22
22
  *
23
23
  * These are for runtime/execution errors during video generation.
24
- * Input validation errors (missing image, invalid options, etc.) are handled
25
- * by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
24
+ * Pure option/shape validation (missing image option, invalid config values, etc.)
25
+ * is handled by parameterValidation.ts using ERROR_CODES from errorHandling.ts.
26
+ *
27
+ * Error categorization:
28
+ * - INVALID_INPUT → ErrorCategory.execution (runtime I/O failures)
29
+ * - parameterValidation errors → ErrorCategory.validation (schema/option issues)
26
30
  *
27
31
  * Following TTS pattern (TTS_ERROR_CODES + TTSError in ttsProcessor.ts)
28
32
  */
@@ -33,6 +37,12 @@ export const VIDEO_ERROR_CODES = {
33
37
  PROVIDER_NOT_CONFIGURED: "VIDEO_PROVIDER_NOT_CONFIGURED",
34
38
  /** Polling for video completion timed out */
35
39
  POLL_TIMEOUT: "VIDEO_POLL_TIMEOUT",
40
+ /**
41
+ * Runtime I/O error during input processing.
42
+ * Used for: failed URL fetch, failed file read, corrupt/unreadable buffer.
43
+ * NOT for: missing options or invalid config shapes (use parameterValidation).
44
+ */
45
+ INVALID_INPUT: "VIDEO_INVALID_INPUT",
36
46
  };
37
47
  /**
38
48
  * Video generation error class
@@ -226,6 +226,25 @@ export declare abstract class BaseProvider implements AIProvider {
226
226
  */
227
227
  protected normalizeStreamOptions(optionsOrPrompt: StreamOptions | string): StreamOptions;
228
228
  protected enhanceResult(result: EnhancedGenerateResult, options: TextGenerationOptions, startTime: number): Promise<EnhancedGenerateResult>;
229
+ /**
230
+ * Handle video generation mode
231
+ *
232
+ * Generates video from input image + text prompt using Vertex AI Veo 3.1.
233
+ *
234
+ * @param options - Text generation options with video configuration
235
+ * @param startTime - Generation start timestamp for metrics
236
+ * @returns Enhanced result with video data
237
+ *
238
+ * @example
239
+ * ```typescript
240
+ * const result = await provider.generate({
241
+ * input: { text: "Product showcase", images: [imageBuffer] },
242
+ * output: { mode: "video", video: { resolution: "1080p" } }
243
+ * });
244
+ * // result.video contains the generated video
245
+ * ```
246
+ */
247
+ private handleVideoGeneration;
229
248
  /**
230
249
  * Create analytics - delegated to TelemetryHandler
231
250
  */
@@ -323,6 +323,11 @@ export class BaseProvider {
323
323
  this.validateOptions(options);
324
324
  const startTime = Date.now();
325
325
  try {
326
+ // ===== VIDEO GENERATION MODE =====
327
+ // Generate video from image + prompt using Veo 3.1
328
+ if (options.output?.mode === "video") {
329
+ return await this.handleVideoGeneration(options, startTime);
330
+ }
326
331
  // ===== TTS MODE 1: Direct Input Synthesis (useAiResponse=false) =====
327
332
  // Synthesize input text directly without AI generation
328
333
  // This is optimal for simple read-aloud scenarios
@@ -659,6 +664,175 @@ export class BaseProvider {
659
664
  }
660
665
  return enhancedResult;
661
666
  }
667
+ /**
668
+ * Handle video generation mode
669
+ *
670
+ * Generates video from input image + text prompt using Vertex AI Veo 3.1.
671
+ *
672
+ * @param options - Text generation options with video configuration
673
+ * @param startTime - Generation start timestamp for metrics
674
+ * @returns Enhanced result with video data
675
+ *
676
+ * @example
677
+ * ```typescript
678
+ * const result = await provider.generate({
679
+ * input: { text: "Product showcase", images: [imageBuffer] },
680
+ * output: { mode: "video", video: { resolution: "1080p" } }
681
+ * });
682
+ * // result.video contains the generated video
683
+ * ```
684
+ */
685
+ async handleVideoGeneration(options, startTime) {
686
+ // Dynamic imports to avoid loading video dependencies unless needed
687
+ const { generateVideoWithVertex, VideoError, VIDEO_ERROR_CODES } = await import("../adapters/video/vertexVideoHandler.js");
688
+ const { validateVideoGenerationInput, validateImageForVideo } = await import("../utils/parameterValidation.js");
689
+ const { ErrorFactory } = await import("../utils/errorHandling.js");
690
+ // Build GenerateOptions for validation
691
+ const generateOptions = {
692
+ input: options.input || { text: options.prompt || "" },
693
+ output: options.output,
694
+ provider: options.provider,
695
+ model: options.model,
696
+ };
697
+ // Validate video generation input
698
+ const validation = validateVideoGenerationInput(generateOptions);
699
+ if (!validation.isValid) {
700
+ throw ErrorFactory.invalidParameters("video-generation", new Error(validation.errors.map((e) => e.message).join("; ")), { errors: validation.errors });
701
+ }
702
+ // Log warnings if any
703
+ if (validation.warnings.length > 0) {
704
+ for (const warning of validation.warnings) {
705
+ logger.warn(`Video generation warning: ${warning}`);
706
+ }
707
+ }
708
+ // Extract image from input
709
+ const imageInput = options.input?.images?.[0];
710
+ if (!imageInput) {
711
+ throw new VideoError({
712
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
713
+ message: "Video generation requires an input image. Provide via input.images array.",
714
+ retriable: false,
715
+ context: { field: "input.images" },
716
+ });
717
+ }
718
+ // Timeout for image IO operations (15 seconds)
719
+ const IMAGE_IO_TIMEOUT_MS = 15000;
720
+ // Load image buffer if path/URL
721
+ let imageBuffer;
722
+ if (typeof imageInput === "string") {
723
+ if (imageInput.startsWith("http://") ||
724
+ imageInput.startsWith("https://")) {
725
+ // URL - fetch the image with timeout
726
+ logger.debug("Fetching image from URL for video generation", {
727
+ url: imageInput.substring(0, 100),
728
+ });
729
+ let response;
730
+ try {
731
+ response = await this.executeWithTimeout(() => fetch(imageInput), {
732
+ timeout: IMAGE_IO_TIMEOUT_MS,
733
+ operationType: "generate", // Part of video generation flow
734
+ });
735
+ }
736
+ catch (error) {
737
+ throw new VideoError({
738
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
739
+ message: `Failed to fetch image from URL: ${error instanceof Error ? error.message : "Request timed out"}`,
740
+ retriable: true,
741
+ context: { url: imageInput, timeout: IMAGE_IO_TIMEOUT_MS },
742
+ originalError: error instanceof Error ? error : undefined,
743
+ });
744
+ }
745
+ if (!response.ok) {
746
+ throw new VideoError({
747
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
748
+ message: `Failed to fetch image from URL: ${response.status} ${response.statusText}`,
749
+ retriable: response.status >= 500,
750
+ context: { url: imageInput, status: response.status },
751
+ });
752
+ }
753
+ imageBuffer = Buffer.from(await response.arrayBuffer());
754
+ }
755
+ else {
756
+ // File path - read from disk with timeout
757
+ logger.debug("Reading image from path for video generation", {
758
+ path: imageInput,
759
+ });
760
+ const fs = await import("node:fs/promises");
761
+ try {
762
+ imageBuffer = await this.executeWithTimeout(() => fs.readFile(imageInput), { timeout: IMAGE_IO_TIMEOUT_MS, operationType: "generate" });
763
+ }
764
+ catch (error) {
765
+ throw new VideoError({
766
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
767
+ message: `Failed to read image file: ${error instanceof Error ? error.message : String(error)}`,
768
+ retriable: false,
769
+ context: { path: imageInput, timeout: IMAGE_IO_TIMEOUT_MS },
770
+ originalError: error instanceof Error ? error : undefined,
771
+ });
772
+ }
773
+ }
774
+ }
775
+ else if (Buffer.isBuffer(imageInput)) {
776
+ imageBuffer = imageInput;
777
+ }
778
+ else if (typeof imageInput === "object" && "data" in imageInput) {
779
+ // ImageWithAltText type
780
+ const imgData = imageInput.data;
781
+ if (typeof imgData === "string") {
782
+ imageBuffer = Buffer.from(imgData, "base64");
783
+ }
784
+ else if (Buffer.isBuffer(imgData)) {
785
+ imageBuffer = imgData;
786
+ }
787
+ else {
788
+ throw new VideoError({
789
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
790
+ message: "ImageWithAltText.data must be a base64 string or Buffer.",
791
+ retriable: false,
792
+ context: { field: "input.images[0].data", type: typeof imgData },
793
+ });
794
+ }
795
+ }
796
+ else {
797
+ throw new VideoError({
798
+ code: VIDEO_ERROR_CODES.INVALID_INPUT,
799
+ message: "Invalid image input type. Provide Buffer, path string, URL, or ImageWithAltText.",
800
+ retriable: false,
801
+ context: { field: "input.images[0]", type: typeof imageInput },
802
+ });
803
+ }
804
+ // Validate image format and size (for Buffer inputs)
805
+ const imageValidation = validateImageForVideo(imageBuffer);
806
+ if (imageValidation) {
807
+ throw ErrorFactory.invalidParameters("video-generation", new Error(imageValidation.message), { field: "input.images[0]", validation: imageValidation });
808
+ }
809
+ // Get prompt text
810
+ const prompt = options.prompt || options.input?.text || "";
811
+ logger.info("Starting video generation", {
812
+ provider: "vertex",
813
+ model: options.model || "veo-3.1-generate-001",
814
+ promptLength: prompt.length,
815
+ imageSize: imageBuffer.length,
816
+ resolution: options.output?.video?.resolution || "720p",
817
+ duration: options.output?.video?.length || 6,
818
+ });
819
+ // Generate video using Vertex handler (no processor abstraction)
820
+ const videoResult = await generateVideoWithVertex(imageBuffer, prompt, options.output?.video);
821
+ logger.info("Video generation complete", {
822
+ videoSize: videoResult.data.length,
823
+ duration: videoResult.metadata?.duration,
824
+ processingTime: videoResult.metadata?.processingTime,
825
+ });
826
+ // Build result
827
+ const baseResult = {
828
+ content: prompt, // Echo the prompt as content
829
+ provider: "vertex",
830
+ model: options.model || "veo-3.1-generate-001",
831
+ usage: { input: 0, output: 0, total: 0 },
832
+ video: videoResult,
833
+ };
834
+ return await this.enhanceResult(baseResult, options, startTime);
835
+ }
662
836
  /**
663
837
  * Create analytics - delegated to TelemetryHandler
664
838
  */