@juspay/neurolink 5.0.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. package/CHANGELOG.md +7 -0
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -26
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -1,3 +1,10 @@
1
+ # [5.1.0](https://github.com/juspay/neurolink/compare/v5.0.0...v5.1.0) (2025-07-13)
2
+
3
+
4
+ ### Features
5
+
6
+ * **core:** complete unified multimodal AI platform architecture with generate/stream unification ([846e409](https://github.com/juspay/neurolink/commit/846e409a4a77024ddee9961c9b5049bc99f8335e))
7
+
1
8
  # [5.0.0](https://github.com/juspay/neurolink/compare/v4.2.0...v5.0.0) (2025-07-11)
2
9
 
3
10
 
package/README.md CHANGED
@@ -17,53 +17,6 @@
17
17
  - **📊 Advanced Telemetry** - Optional OpenTelemetry monitoring with zero overhead when disabled
18
18
  - **💬 Enhanced Chat Services** - Dual-mode SSE + WebSocket support for enterprise applications
19
19
  - **🏗️ Enterprise Architecture** - Production-ready scaling with connection pooling and optimization
20
-
21
- ## 🏗️ Enterprise Configuration Management
22
-
23
- ### **✨ NEW: Automatic Backup System**
24
-
25
- ```bash
26
- # All config changes create automatic backups
27
- npm run config:update
28
- # ✅ Backup created: .neurolink.backups/neurolink-config-2025-01-07T10-30-00.js
29
-
30
- # Auto-restore on failures
31
- npm run config:validate
32
- # ✅ Config validated with suggestions and warnings
33
- ```
34
-
35
- ### **✨ NEW: Industry-Standard Interfaces**
36
-
37
- ```typescript
38
- // Modern camelCase interfaces with rich context
39
- interface ExecutionContext {
40
- sessionId?: string;
41
- userId?: string;
42
- aiProvider?: string;
43
- permissions?: string[];
44
- cacheOptions?: CacheOptions;
45
- fallbackOptions?: FallbackOptions;
46
- metadata?: Record<string, unknown>;
47
- }
48
-
49
- // Optional methods for maximum flexibility
50
- interface McpRegistry {
51
- registerServer?(
52
- serverId: string,
53
- config?: unknown,
54
- context?: ExecutionContext,
55
- ): Promise<void>;
56
- executeTool?<T>(
57
- toolName: string,
58
- args?: unknown,
59
- context?: ExecutionContext,
60
- ): Promise<T>;
61
- listTools?(context?: ExecutionContext): Promise<ToolInfo[]>;
62
- }
63
- ```
64
-
65
- ### **Enterprise Features**
66
-
67
20
  - **🔄 Automatic Backup/Restore** - Timestamped backups with hash verification
68
21
  - **✅ Config Validation** - Comprehensive validation with suggestions
69
22
  - **🏗️ Factory-First MCP** - Lighthouse-compatible architecture (99% compatible)
@@ -71,6 +24,19 @@ interface McpRegistry {
71
24
  - **⚡ Performance** - Tool execution <1ms, pipeline execution ~22ms
72
25
  - **🛡️ Error Recovery** - Graceful failures with auto-restore
73
26
 
27
+ ## ✅ LATEST UPDATE: Stream Function Migration Complete (2025-01-12)
28
+
29
+ **NeuroLink now uses `stream()` as the primary streaming function with future-ready multi-modal interface.**
30
+
31
+ - ✅ **New Primary Streaming**: `stream()` with multi-modal ready interface
32
+ - ✅ **Enhanced Generation**: `generate()` as primary generation function
33
+ - ✅ **Factory Enhanced**: Better provider management across all methods
34
+ - ✅ **Zero Breaking Changes**: All existing code continues working (backward compatibility)
35
+
36
+ > **Enhanced API**: NeuroLink uses `stream()` and `generate()` as primary functions with multi-modal ready interfaces and improved factory patterns.
37
+
38
+ ---
39
+
74
40
  ## 🚀 Quick Start
75
41
 
76
42
  ### Install & Run (2 minutes)
@@ -83,7 +49,11 @@ export GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"
83
49
  npx @juspay/neurolink generate "Hello, AI"
84
50
  npx @juspay/neurolink gen "Hello, AI" # Shortest form
85
51
 
86
- # 🆕 NEW: AI Enhancement Features
52
+ # Primary Method (generate) - Recommended
53
+ npx @juspay/neurolink generate "Explain AI" --provider google-ai
54
+ npx @juspay/neurolink gen "Write code" --provider openai # Shortest form
55
+
56
+ # 🆕 AI Enhancement Features
87
57
  npx @juspay/neurolink generate "Explain AI" --enable-analytics --debug
88
58
  npx @juspay/neurolink generate "Write code" --enable-evaluation --debug
89
59
  npx @juspay/neurolink generate "Help me" --context '{"userId":"123"}' --debug
@@ -99,13 +69,21 @@ npm install @juspay/neurolink
99
69
  ### Basic Usage
100
70
 
101
71
  ```typescript
102
- import { createBestAIProvider } from "@juspay/neurolink";
72
+ import { NeuroLink } from "@juspay/neurolink";
103
73
 
104
- // Auto-selects best available provider
74
+ // NEW: Primary method (recommended)
75
+ const neurolink = new NeuroLink();
76
+ const result = await neurolink.generate({
77
+ input: { text: "Write a haiku about programming" },
78
+ provider: "google-ai",
79
+ timeout: "30s", // Optional: Set custom timeout (default: 30s)
80
+ });
81
+ // Alternative: Auto-selects best available provider
82
+ import { createBestAIProvider } from "@juspay/neurolink";
105
83
  const provider = createBestAIProvider();
106
- const result = await provider.generateText({
84
+ const providerResult = await provider.generate({
107
85
  prompt: "Write a haiku about programming",
108
- timeout: "30s", // Optional: Set custom timeout (default: 30s)
86
+ timeout: "30s",
109
87
  });
110
88
 
111
89
  console.log(result.text);
@@ -118,7 +96,7 @@ Method aliases that match CLI command names:
118
96
 
119
97
  ```typescript
120
98
  // All three methods are equivalent:
121
- const result1 = await provider.generateText({ prompt: "Hello" }); // Original
99
+ const result1 = await provider.generate({ prompt: "Hello" }); // Original
122
100
  const result2 = await provider.generate({ prompt: "Hello" }); // Matches CLI 'generate'
123
101
  const result3 = await provider.gen({ prompt: "Hello" }); // Matches CLI 'gen'
124
102
 
@@ -126,7 +104,7 @@ const result3 = await provider.gen({ prompt: "Hello" }); // Matches CLI 'gen'
126
104
  const provider = createBestAIProvider();
127
105
 
128
106
  // Detailed method name
129
- const story = await provider.generateText({
107
+ const story = await provider.generate({
130
108
  prompt: "Write a short story about AI",
131
109
  maxTokens: 200,
132
110
  });
@@ -162,10 +140,10 @@ import { NeuroLink } from "@juspay/neurolink";
162
140
  const neurolink = new NeuroLink();
163
141
 
164
142
  // Basic usage
165
- const result = await neurolink.generateText("Write a story");
143
+ const result = await neurolink.generate("Write a story");
166
144
 
167
145
  // With enhancements (NEW!)
168
- const enhancedResult = await neurolink.generateText({
146
+ const enhancedResult = await neurolink.generate({
169
147
  prompt: "Write a business proposal",
170
148
  enableAnalytics: true, // Get usage & cost data
171
149
  enableEvaluation: true, // Get AI quality scores
@@ -189,7 +167,7 @@ const enhancedContext = createEnhancedContext(
189
167
  {
190
168
  domain: "Business development",
191
169
  role: "Business proposal assistant",
192
- toolsUsed: ["generate-text", "analytics-helper"],
170
+ toolsUsed: ["generate", "analytics-helper"],
193
171
  conversationHistory: [
194
172
  { role: "user", content: "I need help with our Q1 business plan" },
195
173
  {
@@ -273,7 +251,7 @@ console.log("Version:", status.version);
273
251
 
274
252
  // All AI operations are now automatically monitored
275
253
  const provider = await createBestAIProvider();
276
- const result = await provider.generateText({
254
+ const result = await provider.generate({
277
255
  prompt: "Generate business report",
278
256
  });
279
257
  // Telemetry automatically tracks: response time, token usage, cost, errors
@@ -426,7 +404,20 @@ export const POST: RequestHandler = async ({ request }) => {
426
404
  const provider = createBestAIProvider();
427
405
 
428
406
  try {
429
- const result = await provider.streamText({
407
+ // NEW: Primary streaming method (recommended)
408
+ const result = await provider.stream({
409
+ input: { text: message },
410
+ timeout: "2m", // 2 minutes for streaming
411
+ });
412
+
413
+ // Process stream
414
+ for await (const chunk of result.stream) {
415
+ // Handle streaming content
416
+ console.log(chunk.content);
417
+ }
418
+
419
+ // LEGACY: Backward compatibility (still works)
420
+ const legacyResult = await provider.stream({ input: { text:
430
421
  prompt: message,
431
422
  timeout: "2m", // 2 minutes for streaming
432
423
  });
@@ -444,7 +435,7 @@ export async function POST(request: NextRequest) {
444
435
  const { prompt } = await request.json();
445
436
  const provider = createBestAIProvider();
446
437
 
447
- const result = await provider.generateText({
438
+ const result = await provider.generate({
448
439
  prompt,
449
440
  timeout: process.env.AI_TIMEOUT || "30s", // Configurable timeout
450
441
  });
@@ -42,14 +42,15 @@ export class SSEChatHandler {
42
42
  data: { type: "start", sessionId, messageId: userMessage.id },
43
43
  });
44
44
  // Generate AI response with streaming
45
- const aiResponse = await this.provider.streamText({
46
- prompt: message,
45
+ const aiResponse = await this.provider.stream({
46
+ input: { text: message },
47
47
  temperature: options.temperature,
48
48
  maxTokens: options.maxTokens,
49
49
  systemPrompt: options.systemPrompt,
50
50
  });
51
- if (aiResponse?.textStream) {
52
- const reader = aiResponse.textStream.getReader();
51
+ if (aiResponse?.stream) {
52
+ // Convert async iterable to readable stream
53
+ const reader = aiResponse.stream;
53
54
  let fullResponse = "";
54
55
  try {
55
56
  while (true) {
@@ -60,12 +60,12 @@ export class WebSocketChatHandler extends SSEChatHandler {
60
60
  latencyTarget: 200,
61
61
  });
62
62
  // Generate AI response
63
- const result = await this.provider.generateText({
63
+ const result = await this.provider.generate({
64
64
  prompt: request.prompt,
65
65
  temperature: request.options?.temperature,
66
66
  maxTokens: request.options?.maxTokens,
67
67
  });
68
- if (!result || !result.text) {
68
+ if (!result || !result.content) {
69
69
  throw new Error("Invalid AI response");
70
70
  }
71
71
  // Send response via WebSocket
@@ -75,7 +75,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
75
75
  connectionId,
76
76
  timestamp: Date.now(),
77
77
  data: {
78
- text: result.text,
78
+ text: result.content,
79
79
  sessionId: request.sessionId,
80
80
  metadata: {
81
81
  provider: this.provider.constructor.name,
@@ -98,12 +98,12 @@ export class WebSocketChatHandler extends SSEChatHandler {
98
98
  async handleGroupChat(roomId, request) {
99
99
  try {
100
100
  // Process AI request
101
- const result = await this.provider.generateText({
101
+ const result = await this.provider.generate({
102
102
  prompt: request.prompt,
103
103
  temperature: request.options?.temperature,
104
104
  maxTokens: request.options?.maxTokens,
105
105
  });
106
- if (!result || !result.text) {
106
+ if (!result || !result.content) {
107
107
  throw new Error("Invalid AI response");
108
108
  }
109
109
  // Broadcast to room
@@ -113,7 +113,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
113
113
  connectionId: "system",
114
114
  timestamp: Date.now(),
115
115
  data: {
116
- text: result.text,
116
+ text: result.content,
117
117
  sessionId: request.sessionId,
118
118
  userId: request.userId,
119
119
  isGroupMessage: true,
@@ -136,11 +136,11 @@ export class WebSocketChatHandler extends SSEChatHandler {
136
136
  const channelId = randomUUID();
137
137
  const channel = this.wsServer.createStreamingChannel(connectionId, channelId);
138
138
  // Generate response
139
- const result = await this.provider.generateText({
139
+ const result = await this.provider.generate({
140
140
  prompt: request.prompt,
141
141
  ...request.options,
142
142
  });
143
- if (!result || !result.text) {
143
+ if (!result || !result.content) {
144
144
  throw new Error("Invalid AI response");
145
145
  }
146
146
  // Send complete response
@@ -150,7 +150,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
150
150
  connectionId,
151
151
  timestamp: Date.now(),
152
152
  data: {
153
- text: result.text,
153
+ text: result.content,
154
154
  isStreamingComplete: true,
155
155
  channelId,
156
156
  },
@@ -891,7 +891,7 @@ export function addMCPCommands(yargs) {
891
891
  type: "boolean",
892
892
  description: "Disable fallback to other sources",
893
893
  })
894
- .example('$0 mcp run generate-text --params \'{"prompt": "Hello world"}\'', "Run tool with fallback")
894
+ .example('$0 mcp run generate --params \'{"prompt": "Hello world"}\'', "Run tool with fallback")
895
895
  .example('$0 mcp run read_file --params \'{"path": "README.md"}\' --source manual', "Prefer manual config"), async (argv) => {
896
896
  console.log(chalk.blue(`🚀 Executing tool: ${argv.tool}`));
897
897
  const spinner = ora("Initializing NeuroLink MCP...").start();
@@ -53,7 +53,7 @@ async function pullModelHandler(argv) {
53
53
  try {
54
54
  execSync(`ollama pull ${model}`, { stdio: "inherit" });
55
55
  console.log(chalk.green(`\n✅ Successfully downloaded ${model}`));
56
- console.log(chalk.blue(`\nTest it with: npx @juspay/neurolink generate-text "Hello!" --provider ollama --model ${model}`));
56
+ console.log(chalk.blue(`\nTest it with: npx @juspay/neurolink generate "Hello!" --provider ollama --model ${model}`));
57
57
  }
58
58
  catch (error) {
59
59
  console.error(chalk.red(`\n❌ Failed to download ${model}`));
@@ -322,9 +322,9 @@ async function setupHandler() {
322
322
  console.log(chalk.blue("Next steps:"));
323
323
  console.log("1. List models: " + chalk.gray("neurolink ollama list-models"));
324
324
  console.log("2. Generate text: " +
325
- chalk.gray('neurolink generate-text "Hello!" --provider ollama'));
325
+ chalk.gray('neurolink generate "Hello!" --provider ollama'));
326
326
  console.log("3. Use specific model: " +
327
- chalk.gray('neurolink generate-text "Hello!" --provider ollama --model codellama'));
327
+ chalk.gray('neurolink generate "Hello!" --provider ollama --model codellama'));
328
328
  console.log(chalk.gray("\nFor more information, see: https://docs.neurolink.ai/providers/ollama"));
329
329
  }
330
330
  export default addOllamaCommands;
@@ -0,0 +1,14 @@
1
+ import type { CommandModule } from "yargs";
2
+ /**
3
+ * CLI Command Factory for generate commands
4
+ */
5
+ export declare class CLICommandFactory {
6
+ /**
7
+ * Create the new primary 'generate' command
8
+ */
9
+ static createGenerateCommand(): CommandModule;
10
+ /**
11
+ * Execute the generate command
12
+ */
13
+ private static executeGenerate;
14
+ }
@@ -0,0 +1,129 @@
1
+ import { NeuroLink } from "../../lib/neurolink.js";
2
+ import ora from "ora";
3
+ import chalk from "chalk";
4
+ /**
5
+ * CLI Command Factory for generate commands
6
+ */
7
+ export class CLICommandFactory {
8
+ /**
9
+ * Create the new primary 'generate' command
10
+ */
11
+ static createGenerateCommand() {
12
+ return {
13
+ command: "generate <input>",
14
+ describe: "Generate content using AI (primary command)",
15
+ builder: (yargs) => {
16
+ return yargs
17
+ .positional("input", {
18
+ describe: "Text input for generation",
19
+ type: "string",
20
+ })
21
+ .option("provider", {
22
+ describe: "AI provider to use",
23
+ type: "string",
24
+ choices: [
25
+ "google-ai",
26
+ "vertex",
27
+ "openai",
28
+ "anthropic",
29
+ "bedrock",
30
+ "azure",
31
+ "huggingface",
32
+ "ollama",
33
+ "mistral",
34
+ ],
35
+ default: "google-ai",
36
+ })
37
+ .option("model", {
38
+ describe: "Specific model to use",
39
+ type: "string",
40
+ })
41
+ .option("temperature", {
42
+ describe: "Temperature (0-1)",
43
+ type: "number",
44
+ })
45
+ .option("max-tokens", {
46
+ describe: "Maximum tokens",
47
+ type: "number",
48
+ })
49
+ .option("system-prompt", {
50
+ describe: "System prompt",
51
+ type: "string",
52
+ })
53
+ .option("timeout", {
54
+ describe: "Timeout (e.g., 30s, 2m)",
55
+ type: "string",
56
+ })
57
+ .option("disable-tools", {
58
+ describe: "Disable MCP tools",
59
+ type: "boolean",
60
+ default: false,
61
+ })
62
+ .option("enable-analytics", {
63
+ describe: "Enable usage analytics",
64
+ type: "boolean",
65
+ default: false,
66
+ })
67
+ .option("enable-evaluation", {
68
+ describe: "Enable AI quality evaluation",
69
+ type: "boolean",
70
+ default: false,
71
+ })
72
+ .option("output-format", {
73
+ describe: "Output format",
74
+ type: "string",
75
+ choices: ["text", "structured", "json"],
76
+ default: "text",
77
+ })
78
+ .option("debug", {
79
+ describe: "Enable debug output",
80
+ type: "boolean",
81
+ default: false,
82
+ });
83
+ },
84
+ handler: async (argv) => await CLICommandFactory.executeGenerate(argv),
85
+ };
86
+ }
87
+ /**
88
+ * Execute the generate command
89
+ */
90
+ static async executeGenerate(argv) {
91
+ const spinner = ora("Generating content...").start();
92
+ try {
93
+ const sdk = new NeuroLink();
94
+ const result = await sdk.generate({
95
+ input: { text: argv.input },
96
+ output: { format: argv.outputFormat },
97
+ provider: argv.provider,
98
+ model: argv.model,
99
+ temperature: argv.temperature,
100
+ maxTokens: argv.maxTokens,
101
+ systemPrompt: argv.systemPrompt,
102
+ timeout: argv.timeout,
103
+ enableAnalytics: argv.enableAnalytics,
104
+ enableEvaluation: argv.enableEvaluation,
105
+ });
106
+ spinner.succeed("Content generated successfully!");
107
+ console.log("\n" + chalk.cyan("Generated Content:"));
108
+ console.log(result.content);
109
+ if (argv.debug) {
110
+ console.log("\n" + chalk.yellow("Debug Information:"));
111
+ console.log("Provider:", result.provider);
112
+ console.log("Model:", result.model);
113
+ if (result.analytics) {
114
+ console.log("Analytics:", JSON.stringify(result.analytics, null, 2));
115
+ }
116
+ if (result.evaluation) {
117
+ console.log("Evaluation:", JSON.stringify(result.evaluation, null, 2));
118
+ }
119
+ }
120
+ // Exit successfully
121
+ process.exit(0);
122
+ }
123
+ catch (error) {
124
+ spinner.fail("Generation failed");
125
+ console.error(chalk.red("Error:"), error);
126
+ process.exit(1);
127
+ }
128
+ }
129
+ }
package/dist/cli/index.js CHANGED
@@ -21,6 +21,7 @@ import chalk from "chalk";
21
21
  import fs from "fs";
22
22
  import { addMCPCommands } from "./commands/mcp.js";
23
23
  import { addOllamaCommands } from "./commands/ollama.js";
24
+ import { CLICommandFactory } from "./factories/command-factory.js";
24
25
  import { AgentEnhancedProvider } from "../lib/providers/agent-enhanced-provider.js";
25
26
  import { logger } from "../lib/utils/logger.js";
26
27
  /**
@@ -289,9 +290,9 @@ const cli = yargs(args)
289
290
  }
290
291
  exitProcess(); // Default exit
291
292
  })
292
- // Generate Text Command
293
- .command(["generate-text [prompt]", "generate [prompt]", "gen [prompt]"], "Generate text using AI providers", (yargsInstance) => yargsInstance
294
- .usage("Usage: $0 generate-text [prompt] [options]")
293
+ // Generate Command (Primary)
294
+ .command(["generate [prompt]", "gen [prompt]"], "Generate content using AI providers", (yargsInstance) => yargsInstance
295
+ .usage("Usage: $0 generate [prompt] [options]")
295
296
  .positional("prompt", {
296
297
  type: "string",
297
298
  description: "Text prompt for AI generation (or read from stdin)",
@@ -378,10 +379,10 @@ const cli = yargs(args)
378
379
  type: "string",
379
380
  description: "JSON context object for custom data",
380
381
  })
381
- .example('$0 generate-text "Hello world"', "Basic text generation")
382
- .example('$0 generate-text "Write a story" --provider openai', "Use specific provider")
383
- .example('$0 generate-text "What time is it?"', "Use with natural tool integration (default)")
384
- .example('$0 generate-text "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
382
+ .example('$0 generate "Hello world"', "Basic content generation")
383
+ .example('$0 generate "Write a story" --provider openai', "Use specific provider")
384
+ .example('$0 generate "What time is it?"', "Use with natural tool integration (default)")
385
+ .example('$0 generate "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
385
386
  // SOLUTION 1: Handle stdin input if no prompt provided
386
387
  if (!argv.prompt && !process.stdin.isTTY) {
387
388
  // Read from stdin
@@ -429,11 +430,7 @@ const cli = yargs(args)
429
430
  if (errors.length > 0) {
430
431
  throw new Error(`Parameter validation failed:\n${errors.map((e) => ` • ${e}`).join("\n")}\n\nUse --help for valid parameter ranges.`);
431
432
  }
432
- // Check if generate-text was used specifically (for deprecation warning)
433
- const usedCommand = argv._[0];
434
- if (usedCommand === "generate-text" && !argv.quiet) {
435
- console.warn(chalk.yellow('⚠️ Warning: "generate-text" is deprecated. Use "generate" or "gen" instead for multimodal support.'));
436
- }
433
+ // Command is now the primary generate method
437
434
  let originalConsole = {};
438
435
  if (argv.format === "json" && !argv.quiet) {
439
436
  // Suppress only if not quiet, as quiet implies no spinners anyway
@@ -469,8 +466,8 @@ const cli = yargs(args)
469
466
  }
470
467
  if (argv.disableTools === true) {
471
468
  // Tools disabled - use standard SDK
472
- generatePromise = sdk.generateText({
473
- prompt: argv.prompt,
469
+ generatePromise = sdk.generate({
470
+ input: { text: argv.prompt },
474
471
  provider: argv.provider === "auto"
475
472
  ? undefined
476
473
  : argv.provider,
@@ -507,7 +504,7 @@ const cli = yargs(args)
507
504
  model: argv.model, // Use specified model or default
508
505
  toolCategory: "all", // Enable all tool categories
509
506
  });
510
- generatePromise = agentProvider.generateText({
507
+ generatePromise = agentProvider.generate({
511
508
  prompt: argv.prompt,
512
509
  temperature: argv.temperature,
513
510
  maxTokens: argv.maxTokens, // Respect user's token limit - no artificial caps
@@ -636,7 +633,7 @@ const cli = yargs(args)
636
633
  }
637
634
  })
638
635
  // Stream Text Command
639
- .command("stream [prompt]", "Stream text generation in real-time", (yargsInstance) => yargsInstance
636
+ .command("stream [prompt]", "Stream generation in real-time", (yargsInstance) => yargsInstance
640
637
  .usage("Usage: $0 stream [prompt] [options]")
641
638
  .positional("prompt", {
642
639
  type: "string",
@@ -750,8 +747,8 @@ const cli = yargs(args)
750
747
  let stream;
751
748
  if (argv.disableTools === true) {
752
749
  // Tools disabled - use standard SDK
753
- stream = await sdk.generateTextStream({
754
- prompt: argv.prompt,
750
+ stream = await sdk.stream({
751
+ input: { text: argv.prompt },
755
752
  provider: argv.provider === "auto"
756
753
  ? undefined
757
754
  : argv.provider,
@@ -784,8 +781,8 @@ const cli = yargs(args)
784
781
  toolCategory: "all", // Enable all tool categories
785
782
  });
786
783
  // Note: AgentEnhancedProvider doesn't support streaming with tools yet
787
- // Fall back to generateText for now
788
- const result = await agentProvider.generateText({
784
+ // Fall back to generate for now
785
+ const result = await agentProvider.generate({
789
786
  prompt: argv.prompt,
790
787
  temperature: argv.temperature,
791
788
  // NEW: Analytics and evaluation support
@@ -794,7 +791,7 @@ const cli = yargs(args)
794
791
  context: contextObj,
795
792
  });
796
793
  // Simulate streaming by outputting the result
797
- const text = result?.text || "";
794
+ const text = result?.content || "";
798
795
  const CHUNK_SIZE = 10;
799
796
  const DELAY_MS = 50;
800
797
  for (let i = 0; i < text.length; i += CHUNK_SIZE) {
@@ -809,7 +806,7 @@ const cli = yargs(args)
809
806
  displayAnalyticsAndEvaluation(result, argv);
810
807
  return; // Exit early for agent mode
811
808
  }
812
- for await (const chunk of stream) {
809
+ for await (const chunk of stream.stream) {
813
810
  process.stdout.write(chunk.content);
814
811
  // In debug mode, interleaved logging would appear here
815
812
  // (SDK logs are controlled by NEUROLINK_DEBUG set in middleware)
@@ -817,6 +814,8 @@ const cli = yargs(args)
817
814
  if (!argv.quiet) {
818
815
  process.stdout.write("\n");
819
816
  } // Ensure newline after stream
817
+ // Exit successfully
818
+ process.exit(0);
820
819
  }
821
820
  catch (error) {
822
821
  handleError(error, "Text streaming");
@@ -911,8 +910,8 @@ const cli = yargs(args)
911
910
  spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
912
911
  }
913
912
  try {
914
- const result = await sdk.generateText({
915
- prompt: prompts[i],
913
+ const result = await sdk.generate({
914
+ input: { text: prompts[i] },
916
915
  provider: argv.provider === "auto"
917
916
  ? undefined
918
917
  : argv.provider,
@@ -1074,8 +1073,8 @@ const cli = yargs(args)
1074
1073
  try {
1075
1074
  const start = Date.now();
1076
1075
  // Add timeout to prevent hanging
1077
- const testPromise = sdk.generateText({
1078
- prompt: "test",
1076
+ const testPromise = sdk.generate({
1077
+ input: { text: "test" },
1079
1078
  provider: p,
1080
1079
  maxTokens: 1,
1081
1080
  disableTools: true, // Disable tools for faster status check
@@ -1237,6 +1236,8 @@ const cli = yargs(args)
1237
1236
  .example("$0 completion >> ~/.zshrc", "Add to zsh"), async (argv) => {
1238
1237
  cli.showCompletionScript();
1239
1238
  });
1239
+ // Add NEW Generate Command (Primary)
1240
+ cli.command(CLICommandFactory.createGenerateCommand());
1240
1241
  // Add MCP Commands
1241
1242
  addMCPCommands(cli);
1242
1243
  // Add Ollama Commands
@@ -244,7 +244,7 @@ export async function testProviderConnectivity(providers, quiet = false) {
244
244
  }
245
245
  try {
246
246
  const start = Date.now();
247
- await sdk.generateText({ prompt: "test", provider, maxTokens: 1 });
247
+ await sdk.generate({ input: { text: "test" }, provider, maxTokens: 1 });
248
248
  const duration = Date.now() - start;
249
249
  results.push({ provider, status: "working", responseTime: duration });
250
250
  if (spinner) {
@@ -291,7 +291,7 @@ export function displaySetupSummary(result, quiet = false) {
291
291
  if (working > 0) {
292
292
  console.log(chalk.green("\n✅ Setup completed successfully!"));
293
293
  console.log(chalk.yellow("💡 You can now use NeuroLink with your configured providers."));
294
- console.log(chalk.gray(' Try: neurolink generate-text "Hello, AI!"'));
294
+ console.log(chalk.gray(' Try: neurolink generate "Hello, AI!"'));
295
295
  }
296
296
  else {
297
297
  console.log(chalk.red("\n❌ No providers are working."));