@juspay/neurolink 4.2.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +47 -2
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -29
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +2 -3
  122. package/dist/cli/commands/agent-generate.d.ts +0 -1
  123. package/dist/cli/commands/agent-generate.js +0 -67
package/CHANGELOG.md CHANGED
@@ -1,9 +1,54 @@
1
- # [4.2.0](https://github.com/juspay/neurolink/compare/v4.1.1...v4.2.0) (2025-07-11)
1
+ # [5.1.0](https://github.com/juspay/neurolink/compare/v5.0.0...v5.1.0) (2025-07-13)
2
+
3
+
4
+ ### Features
5
+
6
+ * **core:** complete unified multimodal AI platform architecture with generate/stream unification ([846e409](https://github.com/juspay/neurolink/commit/846e409a4a77024ddee9961c9b5049bc99f8335e))
7
+
8
+ # [5.0.0](https://github.com/juspay/neurolink/compare/v4.2.0...v5.0.0) (2025-07-11)
9
+
10
+
11
+ * refactor(cli)!: remove agent-generate command, unify CLI to single generate command ([9c034b7](https://github.com/juspay/neurolink/commit/9c034b7b5a8df3b861fccae0e617c5aa4c85a903))
12
+
13
+
14
+ ### Bug Fixes
2
15
 
16
+ * **scripts:** update docs:generate to use docs:validate instead of removed docs:sync ([3277bab](https://github.com/juspay/neurolink/commit/3277bab3eb1cec24a60fe28bf3897fce63d83d3a))
17
+
18
+
19
+ ### BREAKING CHANGES
20
+
21
+ * agent-generate command has been removed
22
+
23
+ The agent-generate command has been completely removed from the CLI. All
24
+ functionality is now available through the enhanced generate command with
25
+ tools enabled by default.
26
+
27
+ ### Changes Made:
28
+ - Delete src/cli/commands/agent-generate.ts command implementation
29
+ - Remove agent-generate import and registration from src/cli/index.ts
30
+ - Update docs/CLI-GUIDE.md to remove agent-generate documentation
31
+ - Update memory-bank documentation files to reflect unified approach
32
+ - Remove agent-generate test cases from scripts/corrected-functionality-test.js
33
+
34
+ ### Migration Guide:
35
+ - Replace `neurolink agent-generate "prompt"` with `neurolink generate "prompt"`
36
+ - Tools are enabled by default in generate command
37
+ - Use `--disable-tools` flag if tool-calling is not desired
38
+ - All previous agent-generate functionality available in generate command
39
+
40
+ ### Technical Impact:
41
+ - Simplified CLI interface with single text generation command
42
+ - Reduced codebase complexity and maintenance overhead
43
+ - Enhanced generate command provides all tool-calling capabilities
44
+ - Zero breaking changes to core functionality
45
+ - Clean TypeScript compilation and documentation consistency
46
+
47
+ # [4.2.0](https://github.com/juspay/neurolink/compare/v4.1.1...v4.2.0) (2025-07-11)
3
48
 
4
49
  ### Features
5
50
 
6
- * **mcp:** comprehensive MCP system enhancements with timeout management ([1d35b5e](https://github.com/juspay/neurolink/commit/1d35b5e12d03ce60bcdf0608749a1b99e8565567))
51
+ - **mcp:** comprehensive MCP system enhancements with timeout management ([1d35b5e](https://github.com/juspay/neurolink/commit/1d35b5e12d03ce60bcdf0608749a1b99e8565567))
7
52
 
8
53
  ## [4.1.1](https://github.com/juspay/neurolink/compare/v4.1.0...v4.1.1) (2025-07-10)
9
54
 
package/README.md CHANGED
@@ -17,53 +17,6 @@
17
17
  - **📊 Advanced Telemetry** - Optional OpenTelemetry monitoring with zero overhead when disabled
18
18
  - **💬 Enhanced Chat Services** - Dual-mode SSE + WebSocket support for enterprise applications
19
19
  - **🏗️ Enterprise Architecture** - Production-ready scaling with connection pooling and optimization
20
-
21
- ## 🏗️ Enterprise Configuration Management
22
-
23
- ### **✨ NEW: Automatic Backup System**
24
-
25
- ```bash
26
- # All config changes create automatic backups
27
- npm run config:update
28
- # ✅ Backup created: .neurolink.backups/neurolink-config-2025-01-07T10-30-00.js
29
-
30
- # Auto-restore on failures
31
- npm run config:validate
32
- # ✅ Config validated with suggestions and warnings
33
- ```
34
-
35
- ### **✨ NEW: Industry-Standard Interfaces**
36
-
37
- ```typescript
38
- // Modern camelCase interfaces with rich context
39
- interface ExecutionContext {
40
- sessionId?: string;
41
- userId?: string;
42
- aiProvider?: string;
43
- permissions?: string[];
44
- cacheOptions?: CacheOptions;
45
- fallbackOptions?: FallbackOptions;
46
- metadata?: Record<string, unknown>;
47
- }
48
-
49
- // Optional methods for maximum flexibility
50
- interface McpRegistry {
51
- registerServer?(
52
- serverId: string,
53
- config?: unknown,
54
- context?: ExecutionContext,
55
- ): Promise<void>;
56
- executeTool?<T>(
57
- toolName: string,
58
- args?: unknown,
59
- context?: ExecutionContext,
60
- ): Promise<T>;
61
- listTools?(context?: ExecutionContext): Promise<ToolInfo[]>;
62
- }
63
- ```
64
-
65
- ### **Enterprise Features**
66
-
67
20
  - **🔄 Automatic Backup/Restore** - Timestamped backups with hash verification
68
21
  - **✅ Config Validation** - Comprehensive validation with suggestions
69
22
  - **🏗️ Factory-First MCP** - Lighthouse-compatible architecture (99% compatible)
@@ -71,6 +24,19 @@ interface McpRegistry {
71
24
  - **⚡ Performance** - Tool execution <1ms, pipeline execution ~22ms
72
25
  - **🛡️ Error Recovery** - Graceful failures with auto-restore
73
26
 
27
+ ## ✅ LATEST UPDATE: Stream Function Migration Complete (2025-01-12)
28
+
29
+ **NeuroLink now uses `stream()` as the primary streaming function with future-ready multi-modal interface.**
30
+
31
+ - ✅ **New Primary Streaming**: `stream()` with multi-modal ready interface
32
+ - ✅ **Enhanced Generation**: `generate()` as primary generation function
33
+ - ✅ **Factory Enhanced**: Better provider management across all methods
34
+ - ✅ **Zero Breaking Changes**: All existing code continues working (backward compatibility)
35
+
36
+ > **Enhanced API**: NeuroLink uses `stream()` and `generate()` as primary functions with multi-modal ready interfaces and improved factory patterns.
37
+
38
+ ---
39
+
74
40
  ## 🚀 Quick Start
75
41
 
76
42
  ### Install & Run (2 minutes)
@@ -83,7 +49,11 @@ export GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"
83
49
  npx @juspay/neurolink generate "Hello, AI"
84
50
  npx @juspay/neurolink gen "Hello, AI" # Shortest form
85
51
 
86
- # 🆕 NEW: AI Enhancement Features
52
+ # Primary Method (generate) - Recommended
53
+ npx @juspay/neurolink generate "Explain AI" --provider google-ai
54
+ npx @juspay/neurolink gen "Write code" --provider openai # Shortest form
55
+
56
+ # 🆕 AI Enhancement Features
87
57
  npx @juspay/neurolink generate "Explain AI" --enable-analytics --debug
88
58
  npx @juspay/neurolink generate "Write code" --enable-evaluation --debug
89
59
  npx @juspay/neurolink generate "Help me" --context '{"userId":"123"}' --debug
@@ -99,13 +69,21 @@ npm install @juspay/neurolink
99
69
  ### Basic Usage
100
70
 
101
71
  ```typescript
102
- import { createBestAIProvider } from "@juspay/neurolink";
72
+ import { NeuroLink } from "@juspay/neurolink";
103
73
 
104
- // Auto-selects best available provider
74
+ // NEW: Primary method (recommended)
75
+ const neurolink = new NeuroLink();
76
+ const result = await neurolink.generate({
77
+ input: { text: "Write a haiku about programming" },
78
+ provider: "google-ai",
79
+ timeout: "30s", // Optional: Set custom timeout (default: 30s)
80
+ });
81
+ // Alternative: Auto-selects best available provider
82
+ import { createBestAIProvider } from "@juspay/neurolink";
105
83
  const provider = createBestAIProvider();
106
- const result = await provider.generateText({
84
+ const providerResult = await provider.generate({
107
85
  prompt: "Write a haiku about programming",
108
- timeout: "30s", // Optional: Set custom timeout (default: 30s)
86
+ timeout: "30s",
109
87
  });
110
88
 
111
89
  console.log(result.text);
@@ -118,7 +96,7 @@ Method aliases that match CLI command names:
118
96
 
119
97
  ```typescript
120
98
  // All three methods are equivalent:
121
- const result1 = await provider.generateText({ prompt: "Hello" }); // Original
99
+ const result1 = await provider.generate({ prompt: "Hello" }); // Original
122
100
  const result2 = await provider.generate({ prompt: "Hello" }); // Matches CLI 'generate'
123
101
  const result3 = await provider.gen({ prompt: "Hello" }); // Matches CLI 'gen'
124
102
 
@@ -126,7 +104,7 @@ const result3 = await provider.gen({ prompt: "Hello" }); // Matches CLI 'gen'
126
104
  const provider = createBestAIProvider();
127
105
 
128
106
  // Detailed method name
129
- const story = await provider.generateText({
107
+ const story = await provider.generate({
130
108
  prompt: "Write a short story about AI",
131
109
  maxTokens: 200,
132
110
  });
@@ -162,10 +140,10 @@ import { NeuroLink } from "@juspay/neurolink";
162
140
  const neurolink = new NeuroLink();
163
141
 
164
142
  // Basic usage
165
- const result = await neurolink.generateText("Write a story");
143
+ const result = await neurolink.generate("Write a story");
166
144
 
167
145
  // With enhancements (NEW!)
168
- const enhancedResult = await neurolink.generateText({
146
+ const enhancedResult = await neurolink.generate({
169
147
  prompt: "Write a business proposal",
170
148
  enableAnalytics: true, // Get usage & cost data
171
149
  enableEvaluation: true, // Get AI quality scores
@@ -189,7 +167,7 @@ const enhancedContext = createEnhancedContext(
189
167
  {
190
168
  domain: "Business development",
191
169
  role: "Business proposal assistant",
192
- toolsUsed: ["generate-text", "analytics-helper"],
170
+ toolsUsed: ["generate", "analytics-helper"],
193
171
  conversationHistory: [
194
172
  { role: "user", content: "I need help with our Q1 business plan" },
195
173
  {
@@ -273,7 +251,7 @@ console.log("Version:", status.version);
273
251
 
274
252
  // All AI operations are now automatically monitored
275
253
  const provider = await createBestAIProvider();
276
- const result = await provider.generateText({
254
+ const result = await provider.generate({
277
255
  prompt: "Generate business report",
278
256
  });
279
257
  // Telemetry automatically tracks: response time, token usage, cost, errors
@@ -426,7 +404,20 @@ export const POST: RequestHandler = async ({ request }) => {
426
404
  const provider = createBestAIProvider();
427
405
 
428
406
  try {
429
- const result = await provider.streamText({
407
+ // NEW: Primary streaming method (recommended)
408
+ const result = await provider.stream({
409
+ input: { text: message },
410
+ timeout: "2m", // 2 minutes for streaming
411
+ });
412
+
413
+ // Process stream
414
+ for await (const chunk of result.stream) {
415
+ // Handle streaming content
416
+ console.log(chunk.content);
417
+ }
418
+
419
+ // LEGACY: Backward compatibility (still works)
420
+ const legacyResult = await provider.stream({ input: { text:
430
421
  prompt: message,
431
422
  timeout: "2m", // 2 minutes for streaming
432
423
  });
@@ -444,7 +435,7 @@ export async function POST(request: NextRequest) {
444
435
  const { prompt } = await request.json();
445
436
  const provider = createBestAIProvider();
446
437
 
447
- const result = await provider.generateText({
438
+ const result = await provider.generate({
448
439
  prompt,
449
440
  timeout: process.env.AI_TIMEOUT || "30s", // Configurable timeout
450
441
  });
@@ -42,14 +42,15 @@ export class SSEChatHandler {
42
42
  data: { type: "start", sessionId, messageId: userMessage.id },
43
43
  });
44
44
  // Generate AI response with streaming
45
- const aiResponse = await this.provider.streamText({
46
- prompt: message,
45
+ const aiResponse = await this.provider.stream({
46
+ input: { text: message },
47
47
  temperature: options.temperature,
48
48
  maxTokens: options.maxTokens,
49
49
  systemPrompt: options.systemPrompt,
50
50
  });
51
- if (aiResponse?.textStream) {
52
- const reader = aiResponse.textStream.getReader();
51
+ if (aiResponse?.stream) {
52
+ // Convert async iterable to readable stream
53
+ const reader = aiResponse.stream;
53
54
  let fullResponse = "";
54
55
  try {
55
56
  while (true) {
@@ -60,12 +60,12 @@ export class WebSocketChatHandler extends SSEChatHandler {
60
60
  latencyTarget: 200,
61
61
  });
62
62
  // Generate AI response
63
- const result = await this.provider.generateText({
63
+ const result = await this.provider.generate({
64
64
  prompt: request.prompt,
65
65
  temperature: request.options?.temperature,
66
66
  maxTokens: request.options?.maxTokens,
67
67
  });
68
- if (!result || !result.text) {
68
+ if (!result || !result.content) {
69
69
  throw new Error("Invalid AI response");
70
70
  }
71
71
  // Send response via WebSocket
@@ -75,7 +75,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
75
75
  connectionId,
76
76
  timestamp: Date.now(),
77
77
  data: {
78
- text: result.text,
78
+ text: result.content,
79
79
  sessionId: request.sessionId,
80
80
  metadata: {
81
81
  provider: this.provider.constructor.name,
@@ -98,12 +98,12 @@ export class WebSocketChatHandler extends SSEChatHandler {
98
98
  async handleGroupChat(roomId, request) {
99
99
  try {
100
100
  // Process AI request
101
- const result = await this.provider.generateText({
101
+ const result = await this.provider.generate({
102
102
  prompt: request.prompt,
103
103
  temperature: request.options?.temperature,
104
104
  maxTokens: request.options?.maxTokens,
105
105
  });
106
- if (!result || !result.text) {
106
+ if (!result || !result.content) {
107
107
  throw new Error("Invalid AI response");
108
108
  }
109
109
  // Broadcast to room
@@ -113,7 +113,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
113
113
  connectionId: "system",
114
114
  timestamp: Date.now(),
115
115
  data: {
116
- text: result.text,
116
+ text: result.content,
117
117
  sessionId: request.sessionId,
118
118
  userId: request.userId,
119
119
  isGroupMessage: true,
@@ -136,11 +136,11 @@ export class WebSocketChatHandler extends SSEChatHandler {
136
136
  const channelId = randomUUID();
137
137
  const channel = this.wsServer.createStreamingChannel(connectionId, channelId);
138
138
  // Generate response
139
- const result = await this.provider.generateText({
139
+ const result = await this.provider.generate({
140
140
  prompt: request.prompt,
141
141
  ...request.options,
142
142
  });
143
- if (!result || !result.text) {
143
+ if (!result || !result.content) {
144
144
  throw new Error("Invalid AI response");
145
145
  }
146
146
  // Send complete response
@@ -150,7 +150,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
150
150
  connectionId,
151
151
  timestamp: Date.now(),
152
152
  data: {
153
- text: result.text,
153
+ text: result.content,
154
154
  isStreamingComplete: true,
155
155
  channelId,
156
156
  },
@@ -891,7 +891,7 @@ export function addMCPCommands(yargs) {
891
891
  type: "boolean",
892
892
  description: "Disable fallback to other sources",
893
893
  })
894
- .example('$0 mcp run generate-text --params \'{"prompt": "Hello world"}\'', "Run tool with fallback")
894
+ .example('$0 mcp run generate --params \'{"prompt": "Hello world"}\'', "Run tool with fallback")
895
895
  .example('$0 mcp run read_file --params \'{"path": "README.md"}\' --source manual', "Prefer manual config"), async (argv) => {
896
896
  console.log(chalk.blue(`🚀 Executing tool: ${argv.tool}`));
897
897
  const spinner = ora("Initializing NeuroLink MCP...").start();
@@ -53,7 +53,7 @@ async function pullModelHandler(argv) {
53
53
  try {
54
54
  execSync(`ollama pull ${model}`, { stdio: "inherit" });
55
55
  console.log(chalk.green(`\n✅ Successfully downloaded ${model}`));
56
- console.log(chalk.blue(`\nTest it with: npx @juspay/neurolink generate-text "Hello!" --provider ollama --model ${model}`));
56
+ console.log(chalk.blue(`\nTest it with: npx @juspay/neurolink generate "Hello!" --provider ollama --model ${model}`));
57
57
  }
58
58
  catch (error) {
59
59
  console.error(chalk.red(`\n❌ Failed to download ${model}`));
@@ -322,9 +322,9 @@ async function setupHandler() {
322
322
  console.log(chalk.blue("Next steps:"));
323
323
  console.log("1. List models: " + chalk.gray("neurolink ollama list-models"));
324
324
  console.log("2. Generate text: " +
325
- chalk.gray('neurolink generate-text "Hello!" --provider ollama'));
325
+ chalk.gray('neurolink generate "Hello!" --provider ollama'));
326
326
  console.log("3. Use specific model: " +
327
- chalk.gray('neurolink generate-text "Hello!" --provider ollama --model codellama'));
327
+ chalk.gray('neurolink generate "Hello!" --provider ollama --model codellama'));
328
328
  console.log(chalk.gray("\nFor more information, see: https://docs.neurolink.ai/providers/ollama"));
329
329
  }
330
330
  export default addOllamaCommands;
@@ -0,0 +1,14 @@
1
+ import type { CommandModule } from "yargs";
2
+ /**
3
+ * CLI Command Factory for generate commands
4
+ */
5
+ export declare class CLICommandFactory {
6
+ /**
7
+ * Create the new primary 'generate' command
8
+ */
9
+ static createGenerateCommand(): CommandModule;
10
+ /**
11
+ * Execute the generate command
12
+ */
13
+ private static executeGenerate;
14
+ }
@@ -0,0 +1,129 @@
1
+ import { NeuroLink } from "../../lib/neurolink.js";
2
+ import ora from "ora";
3
+ import chalk from "chalk";
4
+ /**
5
+ * CLI Command Factory for generate commands
6
+ */
7
+ export class CLICommandFactory {
8
+ /**
9
+ * Create the new primary 'generate' command
10
+ */
11
+ static createGenerateCommand() {
12
+ return {
13
+ command: "generate <input>",
14
+ describe: "Generate content using AI (primary command)",
15
+ builder: (yargs) => {
16
+ return yargs
17
+ .positional("input", {
18
+ describe: "Text input for generation",
19
+ type: "string",
20
+ })
21
+ .option("provider", {
22
+ describe: "AI provider to use",
23
+ type: "string",
24
+ choices: [
25
+ "google-ai",
26
+ "vertex",
27
+ "openai",
28
+ "anthropic",
29
+ "bedrock",
30
+ "azure",
31
+ "huggingface",
32
+ "ollama",
33
+ "mistral",
34
+ ],
35
+ default: "google-ai",
36
+ })
37
+ .option("model", {
38
+ describe: "Specific model to use",
39
+ type: "string",
40
+ })
41
+ .option("temperature", {
42
+ describe: "Temperature (0-1)",
43
+ type: "number",
44
+ })
45
+ .option("max-tokens", {
46
+ describe: "Maximum tokens",
47
+ type: "number",
48
+ })
49
+ .option("system-prompt", {
50
+ describe: "System prompt",
51
+ type: "string",
52
+ })
53
+ .option("timeout", {
54
+ describe: "Timeout (e.g., 30s, 2m)",
55
+ type: "string",
56
+ })
57
+ .option("disable-tools", {
58
+ describe: "Disable MCP tools",
59
+ type: "boolean",
60
+ default: false,
61
+ })
62
+ .option("enable-analytics", {
63
+ describe: "Enable usage analytics",
64
+ type: "boolean",
65
+ default: false,
66
+ })
67
+ .option("enable-evaluation", {
68
+ describe: "Enable AI quality evaluation",
69
+ type: "boolean",
70
+ default: false,
71
+ })
72
+ .option("output-format", {
73
+ describe: "Output format",
74
+ type: "string",
75
+ choices: ["text", "structured", "json"],
76
+ default: "text",
77
+ })
78
+ .option("debug", {
79
+ describe: "Enable debug output",
80
+ type: "boolean",
81
+ default: false,
82
+ });
83
+ },
84
+ handler: async (argv) => await CLICommandFactory.executeGenerate(argv),
85
+ };
86
+ }
87
+ /**
88
+ * Execute the generate command
89
+ */
90
+ static async executeGenerate(argv) {
91
+ const spinner = ora("Generating content...").start();
92
+ try {
93
+ const sdk = new NeuroLink();
94
+ const result = await sdk.generate({
95
+ input: { text: argv.input },
96
+ output: { format: argv.outputFormat },
97
+ provider: argv.provider,
98
+ model: argv.model,
99
+ temperature: argv.temperature,
100
+ maxTokens: argv.maxTokens,
101
+ systemPrompt: argv.systemPrompt,
102
+ timeout: argv.timeout,
103
+ enableAnalytics: argv.enableAnalytics,
104
+ enableEvaluation: argv.enableEvaluation,
105
+ });
106
+ spinner.succeed("Content generated successfully!");
107
+ console.log("\n" + chalk.cyan("Generated Content:"));
108
+ console.log(result.content);
109
+ if (argv.debug) {
110
+ console.log("\n" + chalk.yellow("Debug Information:"));
111
+ console.log("Provider:", result.provider);
112
+ console.log("Model:", result.model);
113
+ if (result.analytics) {
114
+ console.log("Analytics:", JSON.stringify(result.analytics, null, 2));
115
+ }
116
+ if (result.evaluation) {
117
+ console.log("Evaluation:", JSON.stringify(result.evaluation, null, 2));
118
+ }
119
+ }
120
+ // Exit successfully
121
+ process.exit(0);
122
+ }
123
+ catch (error) {
124
+ spinner.fail("Generation failed");
125
+ console.error(chalk.red("Error:"), error);
126
+ process.exit(1);
127
+ }
128
+ }
129
+ }