@juspay/neurolink 7.35.0 → 7.37.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/adapters/providerImageAdapter.d.ts +56 -0
- package/dist/adapters/providerImageAdapter.js +257 -0
- package/dist/cli/commands/config.d.ts +20 -20
- package/dist/cli/factories/commandFactory.d.ts +1 -0
- package/dist/cli/factories/commandFactory.js +26 -3
- package/dist/config/taskClassificationConfig.d.ts +51 -0
- package/dist/config/taskClassificationConfig.js +148 -0
- package/dist/core/baseProvider.js +99 -45
- package/dist/core/types.d.ts +3 -0
- package/dist/lib/adapters/providerImageAdapter.d.ts +56 -0
- package/dist/lib/adapters/providerImageAdapter.js +257 -0
- package/dist/lib/config/taskClassificationConfig.d.ts +51 -0
- package/dist/lib/config/taskClassificationConfig.js +148 -0
- package/dist/lib/core/baseProvider.js +99 -45
- package/dist/lib/core/types.d.ts +3 -0
- package/dist/lib/neurolink.d.ts +20 -0
- package/dist/lib/neurolink.js +276 -8
- package/dist/lib/types/content.d.ts +78 -0
- package/dist/lib/types/content.js +5 -0
- package/dist/lib/types/conversation.d.ts +19 -0
- package/dist/lib/types/generateTypes.d.ts +4 -1
- package/dist/lib/types/index.d.ts +2 -0
- package/dist/lib/types/index.js +2 -0
- package/dist/lib/types/streamTypes.d.ts +6 -3
- package/dist/lib/types/taskClassificationTypes.d.ts +52 -0
- package/dist/lib/types/taskClassificationTypes.js +5 -0
- package/dist/lib/utils/imageProcessor.d.ts +84 -0
- package/dist/lib/utils/imageProcessor.js +362 -0
- package/dist/lib/utils/messageBuilder.d.ts +8 -1
- package/dist/lib/utils/messageBuilder.js +279 -0
- package/dist/lib/utils/modelRouter.d.ts +107 -0
- package/dist/lib/utils/modelRouter.js +292 -0
- package/dist/lib/utils/promptRedaction.d.ts +29 -0
- package/dist/lib/utils/promptRedaction.js +62 -0
- package/dist/lib/utils/taskClassificationUtils.d.ts +55 -0
- package/dist/lib/utils/taskClassificationUtils.js +149 -0
- package/dist/lib/utils/taskClassifier.d.ts +23 -0
- package/dist/lib/utils/taskClassifier.js +94 -0
- package/dist/neurolink.d.ts +20 -0
- package/dist/neurolink.js +276 -8
- package/dist/types/content.d.ts +78 -0
- package/dist/types/content.js +5 -0
- package/dist/types/conversation.d.ts +19 -0
- package/dist/types/generateTypes.d.ts +4 -1
- package/dist/types/index.d.ts +2 -0
- package/dist/types/index.js +2 -0
- package/dist/types/streamTypes.d.ts +6 -3
- package/dist/types/taskClassificationTypes.d.ts +52 -0
- package/dist/types/taskClassificationTypes.js +5 -0
- package/dist/utils/imageProcessor.d.ts +84 -0
- package/dist/utils/imageProcessor.js +362 -0
- package/dist/utils/messageBuilder.d.ts +8 -1
- package/dist/utils/messageBuilder.js +279 -0
- package/dist/utils/modelRouter.d.ts +107 -0
- package/dist/utils/modelRouter.js +292 -0
- package/dist/utils/promptRedaction.d.ts +29 -0
- package/dist/utils/promptRedaction.js +62 -0
- package/dist/utils/taskClassificationUtils.d.ts +55 -0
- package/dist/utils/taskClassificationUtils.js +149 -0
- package/dist/utils/taskClassifier.d.ts +23 -0
- package/dist/utils/taskClassifier.js +94 -0
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,3 +1,15 @@
|
|
1
|
+
## [7.37.0](https://github.com/juspay/neurolink/compare/v7.36.0...v7.37.0) (2025-09-10)
|
2
|
+
|
3
|
+
### Features
|
4
|
+
|
5
|
+
- **(sdk):** Add advanced orchestration of model and providers BZ-43839 ([840d697](https://github.com/juspay/neurolink/commit/840d697aa6ef3e5e4c511a9482fc7e80006d2534))
|
6
|
+
|
7
|
+
## [7.36.0](https://github.com/juspay/neurolink/compare/v7.35.0...v7.36.0) (2025-09-10)
|
8
|
+
|
9
|
+
### Features
|
10
|
+
|
11
|
+
- **(image):** added support for multimodality(image) in cli and sdk ([678b61b](https://github.com/juspay/neurolink/commit/678b61bfef3d0622029d40b8ab06dca9836bcb6c))
|
12
|
+
|
1
13
|
## [7.35.0](https://github.com/juspay/neurolink/compare/v7.34.0...v7.35.0) (2025-09-09)
|
2
14
|
|
3
15
|
### Features
|
@@ -0,0 +1,56 @@
|
|
1
|
+
/**
|
2
|
+
* Provider Image Adapter - Smart routing for multimodal content
|
3
|
+
* Handles provider-specific image formatting and vision capability validation
|
4
|
+
*/
|
5
|
+
import type { Content } from "../types/content.js";
|
6
|
+
/**
|
7
|
+
* Simplified logger for essential error reporting only
|
8
|
+
*/
|
9
|
+
export declare class MultimodalLogger {
|
10
|
+
static logError(step: string, error: Error, context: unknown): void;
|
11
|
+
}
|
12
|
+
/**
|
13
|
+
* Provider Image Adapter - Smart routing and formatting
|
14
|
+
*/
|
15
|
+
export declare class ProviderImageAdapter {
|
16
|
+
/**
|
17
|
+
* Main adapter method - routes to provider-specific formatting
|
18
|
+
*/
|
19
|
+
static adaptForProvider(text: string, images: Array<Buffer | string>, provider: string, model: string): Promise<unknown>;
|
20
|
+
/**
|
21
|
+
* Format content for OpenAI (GPT-4o format)
|
22
|
+
*/
|
23
|
+
private static formatForOpenAI;
|
24
|
+
/**
|
25
|
+
* Format content for Google AI (Gemini format)
|
26
|
+
*/
|
27
|
+
private static formatForGoogleAI;
|
28
|
+
/**
|
29
|
+
* Format content for Anthropic (Claude format)
|
30
|
+
*/
|
31
|
+
private static formatForAnthropic;
|
32
|
+
/**
|
33
|
+
* Format content for Vertex AI (model-specific routing)
|
34
|
+
*/
|
35
|
+
private static formatForVertex;
|
36
|
+
/**
|
37
|
+
* Validate that provider and model support vision
|
38
|
+
*/
|
39
|
+
private static validateVisionSupport;
|
40
|
+
/**
|
41
|
+
* Convert simple images array to advanced content format
|
42
|
+
*/
|
43
|
+
static convertToContent(text: string, images?: Array<Buffer | string>): Content[];
|
44
|
+
/**
|
45
|
+
* Check if provider supports multimodal content
|
46
|
+
*/
|
47
|
+
static supportsVision(provider: string, model?: string): boolean;
|
48
|
+
/**
|
49
|
+
* Get supported models for a provider
|
50
|
+
*/
|
51
|
+
static getSupportedModels(provider: string): string[];
|
52
|
+
/**
|
53
|
+
* Get all vision-capable providers
|
54
|
+
*/
|
55
|
+
static getVisionProviders(): string[];
|
56
|
+
}
|
@@ -0,0 +1,257 @@
|
|
1
|
+
/**
|
2
|
+
* Provider Image Adapter - Smart routing for multimodal content
|
3
|
+
* Handles provider-specific image formatting and vision capability validation
|
4
|
+
*/
|
5
|
+
import { logger } from "../utils/logger.js";
|
6
|
+
import { ImageProcessor } from "../utils/imageProcessor.js";
|
7
|
+
/**
|
8
|
+
* Simplified logger for essential error reporting only
|
9
|
+
*/
|
10
|
+
export class MultimodalLogger {
|
11
|
+
static logError(step, error, context) {
|
12
|
+
logger.error(`Multimodal ${step} failed: ${error.message}`);
|
13
|
+
if (process.env.NODE_ENV === "development") {
|
14
|
+
logger.error("Context:", JSON.stringify(context, null, 2));
|
15
|
+
logger.error("Stack:", error.stack);
|
16
|
+
}
|
17
|
+
}
|
18
|
+
}
|
19
|
+
/**
|
20
|
+
* Vision capability definitions for each provider
|
21
|
+
*/
|
22
|
+
const VISION_CAPABILITIES = {
|
23
|
+
openai: ["gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-vision-preview"],
|
24
|
+
"google-ai": [
|
25
|
+
"gemini-2.5-pro",
|
26
|
+
"gemini-2.5-flash",
|
27
|
+
"gemini-1.5-pro",
|
28
|
+
"gemini-1.5-flash",
|
29
|
+
"gemini-pro-vision",
|
30
|
+
],
|
31
|
+
anthropic: [
|
32
|
+
"claude-3-5-sonnet",
|
33
|
+
"claude-3-opus",
|
34
|
+
"claude-3-sonnet",
|
35
|
+
"claude-3-haiku",
|
36
|
+
],
|
37
|
+
vertex: [
|
38
|
+
// Gemini models on Vertex AI
|
39
|
+
"gemini-2.5-pro",
|
40
|
+
"gemini-2.5-flash",
|
41
|
+
"gemini-1.5-pro",
|
42
|
+
"gemini-1.5-flash",
|
43
|
+
// Claude models on Vertex AI (with actual Vertex naming patterns)
|
44
|
+
"claude-3-5-sonnet",
|
45
|
+
"claude-3-opus",
|
46
|
+
"claude-3-sonnet",
|
47
|
+
"claude-3-haiku",
|
48
|
+
"claude-sonnet-3",
|
49
|
+
"claude-sonnet-4",
|
50
|
+
"claude-opus-3",
|
51
|
+
"claude-haiku-3",
|
52
|
+
// Additional Vertex AI Claude model patterns
|
53
|
+
"claude-3.5-sonnet",
|
54
|
+
"claude-3.5-haiku",
|
55
|
+
"claude-3.0-sonnet",
|
56
|
+
"claude-3.0-opus",
|
57
|
+
// Versioned model names (e.g., claude-sonnet-4@20250514)
|
58
|
+
"claude-sonnet-4@",
|
59
|
+
"claude-opus-3@",
|
60
|
+
"claude-haiku-3@",
|
61
|
+
"claude-3-5-sonnet@",
|
62
|
+
],
|
63
|
+
};
|
64
|
+
/**
|
65
|
+
* Provider Image Adapter - Smart routing and formatting
|
66
|
+
*/
|
67
|
+
export class ProviderImageAdapter {
|
68
|
+
/**
|
69
|
+
* Main adapter method - routes to provider-specific formatting
|
70
|
+
*/
|
71
|
+
static async adaptForProvider(text, images, provider, model) {
|
72
|
+
try {
|
73
|
+
// Validate provider supports vision
|
74
|
+
this.validateVisionSupport(provider, model);
|
75
|
+
let adaptedPayload;
|
76
|
+
// Process images based on provider requirements
|
77
|
+
switch (provider.toLowerCase()) {
|
78
|
+
case "openai":
|
79
|
+
adaptedPayload = this.formatForOpenAI(text, images);
|
80
|
+
break;
|
81
|
+
case "google-ai":
|
82
|
+
case "google":
|
83
|
+
adaptedPayload = this.formatForGoogleAI(text, images);
|
84
|
+
break;
|
85
|
+
case "anthropic":
|
86
|
+
adaptedPayload = this.formatForAnthropic(text, images);
|
87
|
+
break;
|
88
|
+
case "vertex":
|
89
|
+
adaptedPayload = this.formatForVertex(text, images, model);
|
90
|
+
break;
|
91
|
+
default:
|
92
|
+
throw new Error(`Vision not supported for provider: ${provider}`);
|
93
|
+
}
|
94
|
+
return adaptedPayload;
|
95
|
+
}
|
96
|
+
catch (error) {
|
97
|
+
MultimodalLogger.logError("ADAPTATION", error, {
|
98
|
+
provider,
|
99
|
+
model,
|
100
|
+
imageCount: images.length,
|
101
|
+
});
|
102
|
+
throw error;
|
103
|
+
}
|
104
|
+
}
|
105
|
+
/**
|
106
|
+
* Format content for OpenAI (GPT-4o format)
|
107
|
+
*/
|
108
|
+
static formatForOpenAI(text, images) {
|
109
|
+
const content = [{ type: "text", text }];
|
110
|
+
images.forEach((image, index) => {
|
111
|
+
try {
|
112
|
+
const imageUrl = ImageProcessor.processImageForOpenAI(image);
|
113
|
+
content.push({
|
114
|
+
type: "image_url",
|
115
|
+
image_url: { url: imageUrl },
|
116
|
+
});
|
117
|
+
}
|
118
|
+
catch (error) {
|
119
|
+
MultimodalLogger.logError("PROCESS_IMAGE", error, {
|
120
|
+
index,
|
121
|
+
provider: "openai",
|
122
|
+
});
|
123
|
+
throw error;
|
124
|
+
}
|
125
|
+
});
|
126
|
+
return { messages: [{ role: "user", content }] };
|
127
|
+
}
|
128
|
+
/**
|
129
|
+
* Format content for Google AI (Gemini format)
|
130
|
+
*/
|
131
|
+
static formatForGoogleAI(text, images) {
|
132
|
+
const parts = [{ text }];
|
133
|
+
images.forEach((image, index) => {
|
134
|
+
try {
|
135
|
+
const { mimeType, data } = ImageProcessor.processImageForGoogle(image);
|
136
|
+
parts.push({
|
137
|
+
inlineData: { mimeType, data },
|
138
|
+
});
|
139
|
+
}
|
140
|
+
catch (error) {
|
141
|
+
MultimodalLogger.logError("PROCESS_IMAGE", error, {
|
142
|
+
index,
|
143
|
+
provider: "google-ai",
|
144
|
+
});
|
145
|
+
throw error;
|
146
|
+
}
|
147
|
+
});
|
148
|
+
return { contents: [{ parts }] };
|
149
|
+
}
|
150
|
+
/**
|
151
|
+
* Format content for Anthropic (Claude format)
|
152
|
+
*/
|
153
|
+
static formatForAnthropic(text, images) {
|
154
|
+
const content = [{ type: "text", text }];
|
155
|
+
images.forEach((image, index) => {
|
156
|
+
try {
|
157
|
+
const { mediaType, data } = ImageProcessor.processImageForAnthropic(image);
|
158
|
+
content.push({
|
159
|
+
type: "image",
|
160
|
+
source: {
|
161
|
+
type: "base64",
|
162
|
+
media_type: mediaType,
|
163
|
+
data,
|
164
|
+
},
|
165
|
+
});
|
166
|
+
}
|
167
|
+
catch (error) {
|
168
|
+
MultimodalLogger.logError("PROCESS_IMAGE", error, {
|
169
|
+
index,
|
170
|
+
provider: "anthropic",
|
171
|
+
});
|
172
|
+
throw error;
|
173
|
+
}
|
174
|
+
});
|
175
|
+
return { messages: [{ role: "user", content }] };
|
176
|
+
}
|
177
|
+
/**
|
178
|
+
* Format content for Vertex AI (model-specific routing)
|
179
|
+
*/
|
180
|
+
static formatForVertex(text, images, model) {
|
181
|
+
// Route based on model type
|
182
|
+
if (model.includes("gemini")) {
|
183
|
+
return this.formatForGoogleAI(text, images);
|
184
|
+
}
|
185
|
+
else if (model.includes("claude")) {
|
186
|
+
return this.formatForAnthropic(text, images);
|
187
|
+
}
|
188
|
+
else {
|
189
|
+
return this.formatForGoogleAI(text, images);
|
190
|
+
}
|
191
|
+
}
|
192
|
+
/**
|
193
|
+
* Validate that provider and model support vision
|
194
|
+
*/
|
195
|
+
static validateVisionSupport(provider, model) {
|
196
|
+
const normalizedProvider = provider.toLowerCase();
|
197
|
+
const supportedModels = VISION_CAPABILITIES[normalizedProvider];
|
198
|
+
if (!supportedModels) {
|
199
|
+
throw new Error(`Provider ${provider} does not support vision processing. ` +
|
200
|
+
`Supported providers: ${Object.keys(VISION_CAPABILITIES).join(", ")}`);
|
201
|
+
}
|
202
|
+
const isSupported = supportedModels.some((supportedModel) => model.toLowerCase().includes(supportedModel.toLowerCase()));
|
203
|
+
if (!isSupported) {
|
204
|
+
throw new Error(`Provider ${provider} with model ${model} does not support vision processing. ` +
|
205
|
+
`Supported models for ${provider}: ${supportedModels.join(", ")}`);
|
206
|
+
}
|
207
|
+
}
|
208
|
+
/**
|
209
|
+
* Convert simple images array to advanced content format
|
210
|
+
*/
|
211
|
+
static convertToContent(text, images) {
|
212
|
+
const content = [{ type: "text", text }];
|
213
|
+
if (images && images.length > 0) {
|
214
|
+
images.forEach((image) => {
|
215
|
+
content.push({
|
216
|
+
type: "image",
|
217
|
+
data: image,
|
218
|
+
mediaType: ImageProcessor.detectImageType(image),
|
219
|
+
});
|
220
|
+
});
|
221
|
+
}
|
222
|
+
return content;
|
223
|
+
}
|
224
|
+
/**
|
225
|
+
* Check if provider supports multimodal content
|
226
|
+
*/
|
227
|
+
static supportsVision(provider, model) {
|
228
|
+
try {
|
229
|
+
const normalizedProvider = provider.toLowerCase();
|
230
|
+
const supportedModels = VISION_CAPABILITIES[normalizedProvider];
|
231
|
+
if (!supportedModels) {
|
232
|
+
return false;
|
233
|
+
}
|
234
|
+
if (!model) {
|
235
|
+
return true; // Provider supports vision, but need to check specific model
|
236
|
+
}
|
237
|
+
return supportedModels.some((supportedModel) => model.toLowerCase().includes(supportedModel.toLowerCase()));
|
238
|
+
}
|
239
|
+
catch {
|
240
|
+
return false;
|
241
|
+
}
|
242
|
+
}
|
243
|
+
/**
|
244
|
+
* Get supported models for a provider
|
245
|
+
*/
|
246
|
+
static getSupportedModels(provider) {
|
247
|
+
const normalizedProvider = provider.toLowerCase();
|
248
|
+
const models = VISION_CAPABILITIES[normalizedProvider];
|
249
|
+
return models ? [...models] : [];
|
250
|
+
}
|
251
|
+
/**
|
252
|
+
* Get all vision-capable providers
|
253
|
+
*/
|
254
|
+
static getVisionProviders() {
|
255
|
+
return Object.keys(VISION_CAPABILITIES);
|
256
|
+
}
|
257
|
+
}
|
@@ -18,9 +18,9 @@ declare const ConfigSchema: z.ZodObject<{
|
|
18
18
|
apiKey?: string | undefined;
|
19
19
|
baseURL?: string | undefined;
|
20
20
|
}, {
|
21
|
+
model?: string | undefined;
|
21
22
|
apiKey?: string | undefined;
|
22
23
|
baseURL?: string | undefined;
|
23
|
-
model?: string | undefined;
|
24
24
|
}>>;
|
25
25
|
bedrock: z.ZodOptional<z.ZodObject<{
|
26
26
|
region: z.ZodOptional<z.ZodString>;
|
@@ -73,8 +73,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
73
73
|
model: string;
|
74
74
|
apiKey?: string | undefined;
|
75
75
|
}, {
|
76
|
-
apiKey?: string | undefined;
|
77
76
|
model?: string | undefined;
|
77
|
+
apiKey?: string | undefined;
|
78
78
|
}>>;
|
79
79
|
azure: z.ZodOptional<z.ZodObject<{
|
80
80
|
apiKey: z.ZodOptional<z.ZodString>;
|
@@ -87,8 +87,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
87
87
|
endpoint?: string | undefined;
|
88
88
|
deploymentId?: string | undefined;
|
89
89
|
}, {
|
90
|
-
apiKey?: string | undefined;
|
91
90
|
model?: string | undefined;
|
91
|
+
apiKey?: string | undefined;
|
92
92
|
endpoint?: string | undefined;
|
93
93
|
deploymentId?: string | undefined;
|
94
94
|
}>>;
|
@@ -99,8 +99,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
99
99
|
model: string;
|
100
100
|
apiKey?: string | undefined;
|
101
101
|
}, {
|
102
|
-
apiKey?: string | undefined;
|
103
102
|
model?: string | undefined;
|
103
|
+
apiKey?: string | undefined;
|
104
104
|
}>>;
|
105
105
|
huggingface: z.ZodOptional<z.ZodObject<{
|
106
106
|
apiKey: z.ZodOptional<z.ZodString>;
|
@@ -109,8 +109,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
109
109
|
model: string;
|
110
110
|
apiKey?: string | undefined;
|
111
111
|
}, {
|
112
|
-
apiKey?: string | undefined;
|
113
112
|
model?: string | undefined;
|
113
|
+
apiKey?: string | undefined;
|
114
114
|
}>>;
|
115
115
|
ollama: z.ZodOptional<z.ZodObject<{
|
116
116
|
baseUrl: z.ZodDefault<z.ZodString>;
|
@@ -132,8 +132,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
132
132
|
model: string;
|
133
133
|
apiKey?: string | undefined;
|
134
134
|
}, {
|
135
|
-
apiKey?: string | undefined;
|
136
135
|
model?: string | undefined;
|
136
|
+
apiKey?: string | undefined;
|
137
137
|
}>>;
|
138
138
|
}, "strip", z.ZodTypeAny, {
|
139
139
|
openai?: {
|
@@ -186,13 +186,13 @@ declare const ConfigSchema: z.ZodObject<{
|
|
186
186
|
} | undefined;
|
187
187
|
}, {
|
188
188
|
openai?: {
|
189
|
+
model?: string | undefined;
|
189
190
|
apiKey?: string | undefined;
|
190
191
|
baseURL?: string | undefined;
|
191
|
-
model?: string | undefined;
|
192
192
|
} | undefined;
|
193
193
|
anthropic?: {
|
194
|
-
apiKey?: string | undefined;
|
195
194
|
model?: string | undefined;
|
195
|
+
apiKey?: string | undefined;
|
196
196
|
} | undefined;
|
197
197
|
vertex?: {
|
198
198
|
model?: string | undefined;
|
@@ -204,8 +204,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
204
204
|
privateKey?: string | undefined;
|
205
205
|
} | undefined;
|
206
206
|
"google-ai"?: {
|
207
|
-
apiKey?: string | undefined;
|
208
207
|
model?: string | undefined;
|
208
|
+
apiKey?: string | undefined;
|
209
209
|
} | undefined;
|
210
210
|
bedrock?: {
|
211
211
|
model?: string | undefined;
|
@@ -215,14 +215,14 @@ declare const ConfigSchema: z.ZodObject<{
|
|
215
215
|
sessionToken?: string | undefined;
|
216
216
|
} | undefined;
|
217
217
|
azure?: {
|
218
|
-
apiKey?: string | undefined;
|
219
218
|
model?: string | undefined;
|
219
|
+
apiKey?: string | undefined;
|
220
220
|
endpoint?: string | undefined;
|
221
221
|
deploymentId?: string | undefined;
|
222
222
|
} | undefined;
|
223
223
|
huggingface?: {
|
224
|
-
apiKey?: string | undefined;
|
225
224
|
model?: string | undefined;
|
225
|
+
apiKey?: string | undefined;
|
226
226
|
} | undefined;
|
227
227
|
ollama?: {
|
228
228
|
timeout?: number | undefined;
|
@@ -230,8 +230,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
230
230
|
baseUrl?: string | undefined;
|
231
231
|
} | undefined;
|
232
232
|
mistral?: {
|
233
|
-
apiKey?: string | undefined;
|
234
233
|
model?: string | undefined;
|
234
|
+
apiKey?: string | undefined;
|
235
235
|
} | undefined;
|
236
236
|
}>>;
|
237
237
|
profiles: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
@@ -505,7 +505,7 @@ declare const ConfigSchema: z.ZodObject<{
|
|
505
505
|
apiKey?: string | undefined;
|
506
506
|
} | undefined;
|
507
507
|
};
|
508
|
-
defaultProvider: "openai" | "anthropic" | "vertex" | "google-ai" | "bedrock" | "azure" | "huggingface" | "ollama" | "mistral"
|
508
|
+
defaultProvider: "openai" | "anthropic" | "vertex" | "google-ai" | "auto" | "bedrock" | "azure" | "huggingface" | "ollama" | "mistral";
|
509
509
|
profiles: Record<string, any>;
|
510
510
|
preferences: {
|
511
511
|
temperature: number;
|
@@ -555,13 +555,13 @@ declare const ConfigSchema: z.ZodObject<{
|
|
555
555
|
}, {
|
556
556
|
providers?: {
|
557
557
|
openai?: {
|
558
|
+
model?: string | undefined;
|
558
559
|
apiKey?: string | undefined;
|
559
560
|
baseURL?: string | undefined;
|
560
|
-
model?: string | undefined;
|
561
561
|
} | undefined;
|
562
562
|
anthropic?: {
|
563
|
-
apiKey?: string | undefined;
|
564
563
|
model?: string | undefined;
|
564
|
+
apiKey?: string | undefined;
|
565
565
|
} | undefined;
|
566
566
|
vertex?: {
|
567
567
|
model?: string | undefined;
|
@@ -573,8 +573,8 @@ declare const ConfigSchema: z.ZodObject<{
|
|
573
573
|
privateKey?: string | undefined;
|
574
574
|
} | undefined;
|
575
575
|
"google-ai"?: {
|
576
|
-
apiKey?: string | undefined;
|
577
576
|
model?: string | undefined;
|
577
|
+
apiKey?: string | undefined;
|
578
578
|
} | undefined;
|
579
579
|
bedrock?: {
|
580
580
|
model?: string | undefined;
|
@@ -584,14 +584,14 @@ declare const ConfigSchema: z.ZodObject<{
|
|
584
584
|
sessionToken?: string | undefined;
|
585
585
|
} | undefined;
|
586
586
|
azure?: {
|
587
|
-
apiKey?: string | undefined;
|
588
587
|
model?: string | undefined;
|
588
|
+
apiKey?: string | undefined;
|
589
589
|
endpoint?: string | undefined;
|
590
590
|
deploymentId?: string | undefined;
|
591
591
|
} | undefined;
|
592
592
|
huggingface?: {
|
593
|
-
apiKey?: string | undefined;
|
594
593
|
model?: string | undefined;
|
594
|
+
apiKey?: string | undefined;
|
595
595
|
} | undefined;
|
596
596
|
ollama?: {
|
597
597
|
timeout?: number | undefined;
|
@@ -599,11 +599,11 @@ declare const ConfigSchema: z.ZodObject<{
|
|
599
599
|
baseUrl?: string | undefined;
|
600
600
|
} | undefined;
|
601
601
|
mistral?: {
|
602
|
-
apiKey?: string | undefined;
|
603
602
|
model?: string | undefined;
|
603
|
+
apiKey?: string | undefined;
|
604
604
|
} | undefined;
|
605
605
|
} | undefined;
|
606
|
-
defaultProvider?: "openai" | "anthropic" | "vertex" | "google-ai" | "bedrock" | "azure" | "huggingface" | "ollama" | "mistral" |
|
606
|
+
defaultProvider?: "openai" | "anthropic" | "vertex" | "google-ai" | "auto" | "bedrock" | "azure" | "huggingface" | "ollama" | "mistral" | undefined;
|
607
607
|
profiles?: Record<string, any> | undefined;
|
608
608
|
preferences?: {
|
609
609
|
maxTokens?: number | undefined;
|
@@ -5,6 +5,7 @@ import type { CommandModule } from "yargs";
|
|
5
5
|
export declare class CLICommandFactory {
|
6
6
|
private static readonly commonOptions;
|
7
7
|
private static buildOptions;
|
8
|
+
private static processCliImages;
|
8
9
|
private static processOptions;
|
9
10
|
private static handleOutput;
|
10
11
|
private static isValidTokenUsage;
|
@@ -15,7 +15,6 @@ import chalk from "chalk";
|
|
15
15
|
import { logger } from "../../lib/utils/logger.js";
|
16
16
|
import fs from "fs";
|
17
17
|
import { handleSetup } from "../commands/setup.js";
|
18
|
-
// Use specific command interfaces from cli.ts instead of universal interface
|
19
18
|
/**
|
20
19
|
* CLI Command Factory for generate commands
|
21
20
|
*/
|
@@ -44,6 +43,11 @@ export class CLICommandFactory {
|
|
44
43
|
description: "AI provider to use (auto-selects best available)",
|
45
44
|
alias: "p",
|
46
45
|
},
|
46
|
+
image: {
|
47
|
+
type: "string",
|
48
|
+
description: "Add image file for multimodal analysis (can be used multiple times)",
|
49
|
+
alias: "i",
|
50
|
+
},
|
47
51
|
model: {
|
48
52
|
type: "string",
|
49
53
|
description: "Specific model to use (e.g. gemini-2.5-pro, gemini-2.5-flash)",
|
@@ -172,6 +176,17 @@ export class CLICommandFactory {
|
|
172
176
|
...additionalOptions,
|
173
177
|
});
|
174
178
|
}
|
179
|
+
// Helper method to process CLI images with smart auto-detection
|
180
|
+
static processCliImages(images) {
|
181
|
+
if (!images) {
|
182
|
+
return undefined;
|
183
|
+
}
|
184
|
+
const imagePaths = Array.isArray(images) ? images : [images];
|
185
|
+
// Return as-is - let the smart message builder handle URL vs file detection
|
186
|
+
// URLs will be detected and appended to prompt text
|
187
|
+
// File paths will be converted to base64 by the message builder
|
188
|
+
return imagePaths;
|
189
|
+
}
|
175
190
|
// Helper method to process common options
|
176
191
|
static processOptions(argv) {
|
177
192
|
// Handle noColor option by disabling chalk
|
@@ -927,8 +942,12 @@ export class CLICommandFactory {
|
|
927
942
|
toolsEnabled: !options.disableTools,
|
928
943
|
});
|
929
944
|
}
|
945
|
+
// Process CLI images if provided
|
946
|
+
const imageBuffers = CLICommandFactory.processCliImages(argv.image);
|
930
947
|
const result = await sdk.generate({
|
931
|
-
input:
|
948
|
+
input: imageBuffers
|
949
|
+
? { text: inputText, images: imageBuffers }
|
950
|
+
: { text: inputText },
|
932
951
|
provider: enhancedOptions.provider,
|
933
952
|
model: enhancedOptions.model,
|
934
953
|
temperature: enhancedOptions.temperature,
|
@@ -1139,8 +1158,12 @@ export class CLICommandFactory {
|
|
1139
1158
|
const context = sessionId
|
1140
1159
|
? { ...contextMetadata, sessionId }
|
1141
1160
|
: contextMetadata;
|
1161
|
+
// Process CLI images if provided
|
1162
|
+
const imageBuffers = CLICommandFactory.processCliImages(argv.image);
|
1142
1163
|
const stream = await sdk.stream({
|
1143
|
-
input:
|
1164
|
+
input: imageBuffers
|
1165
|
+
? { text: inputText, images: imageBuffers }
|
1166
|
+
: { text: inputText },
|
1144
1167
|
provider: enhancedOptions.provider,
|
1145
1168
|
model: enhancedOptions.model,
|
1146
1169
|
temperature: enhancedOptions.temperature,
|
@@ -0,0 +1,51 @@
|
|
1
|
+
/**
|
2
|
+
* Task Classification Configuration
|
3
|
+
* Contains patterns, keywords, and scoring weights for task classification
|
4
|
+
*/
|
5
|
+
/**
|
6
|
+
* Regular expression patterns that indicate fast response tasks
|
7
|
+
*/
|
8
|
+
export declare const FAST_PATTERNS: RegExp[];
|
9
|
+
/**
|
10
|
+
* Regular expression patterns that indicate reasoning tasks
|
11
|
+
*/
|
12
|
+
export declare const REASONING_PATTERNS: RegExp[];
|
13
|
+
/**
|
14
|
+
* Keywords that indicate fast tasks regardless of context
|
15
|
+
*/
|
16
|
+
export declare const FAST_KEYWORDS: string[];
|
17
|
+
/**
|
18
|
+
* Keywords that indicate reasoning tasks regardless of context
|
19
|
+
*/
|
20
|
+
export declare const REASONING_KEYWORDS: string[];
|
21
|
+
/**
|
22
|
+
* Scoring weights for different classification factors
|
23
|
+
*/
|
24
|
+
export declare const SCORING_WEIGHTS: {
|
25
|
+
readonly SHORT_PROMPT_BONUS: 2;
|
26
|
+
readonly LONG_PROMPT_BONUS: 1;
|
27
|
+
readonly PATTERN_MATCH_SCORE: 3;
|
28
|
+
readonly KEYWORD_MATCH_SCORE: 1;
|
29
|
+
readonly MULTIPLE_QUESTIONS_BONUS: 1;
|
30
|
+
readonly MULTI_SENTENCE_BONUS: 1;
|
31
|
+
readonly TECHNICAL_DOMAIN_BONUS: 1;
|
32
|
+
readonly SIMPLE_DEFINITION_BONUS: 2;
|
33
|
+
};
|
34
|
+
/**
|
35
|
+
* Classification thresholds and constraints
|
36
|
+
*/
|
37
|
+
export declare const CLASSIFICATION_THRESHOLDS: {
|
38
|
+
readonly SHORT_PROMPT_LENGTH: 50;
|
39
|
+
readonly LONG_PROMPT_LENGTH: 200;
|
40
|
+
readonly SIMPLE_DEFINITION_LENGTH: 100;
|
41
|
+
readonly MIN_CONFIDENCE: 0.6;
|
42
|
+
readonly MAX_CONFIDENCE: 0.95;
|
43
|
+
readonly DEFAULT_CONFIDENCE: 0.5;
|
44
|
+
};
|
45
|
+
/**
|
46
|
+
* Domain-specific patterns for enhanced classification
|
47
|
+
*/
|
48
|
+
export declare const DOMAIN_PATTERNS: {
|
49
|
+
readonly TECHNICAL: RegExp;
|
50
|
+
readonly SIMPLE_DEFINITION: RegExp;
|
51
|
+
};
|