@juspay/neurolink 7.1.0 → 7.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/CHANGELOG.md +15 -2
  2. package/README.md +16 -11
  3. package/dist/cli/commands/config.d.ts +2 -2
  4. package/dist/cli/commands/config.js +22 -21
  5. package/dist/cli/commands/mcp.d.ts +79 -0
  6. package/dist/cli/commands/mcp.js +916 -0
  7. package/dist/cli/commands/models.d.ts +63 -0
  8. package/dist/cli/commands/models.js +653 -0
  9. package/dist/cli/commands/ollama.js +56 -55
  10. package/dist/cli/factories/commandFactory.d.ts +14 -0
  11. package/dist/cli/factories/commandFactory.js +346 -47
  12. package/dist/cli/index.js +25 -10
  13. package/dist/cli/utils/completeSetup.js +9 -8
  14. package/dist/cli/utils/envManager.js +7 -6
  15. package/dist/cli/utils/interactiveSetup.js +20 -19
  16. package/dist/core/analytics.js +25 -38
  17. package/dist/core/baseProvider.d.ts +8 -0
  18. package/dist/core/baseProvider.js +177 -68
  19. package/dist/core/constants.d.ts +11 -0
  20. package/dist/core/constants.js +17 -0
  21. package/dist/core/evaluation.js +25 -14
  22. package/dist/core/factory.js +19 -18
  23. package/dist/core/streamAnalytics.d.ts +65 -0
  24. package/dist/core/streamAnalytics.js +125 -0
  25. package/dist/lib/core/analytics.js +25 -38
  26. package/dist/lib/core/baseProvider.d.ts +8 -0
  27. package/dist/lib/core/baseProvider.js +177 -68
  28. package/dist/lib/core/constants.d.ts +11 -0
  29. package/dist/lib/core/constants.js +17 -0
  30. package/dist/lib/core/evaluation.js +25 -14
  31. package/dist/lib/core/factory.js +19 -18
  32. package/dist/lib/core/streamAnalytics.d.ts +65 -0
  33. package/dist/lib/core/streamAnalytics.js +125 -0
  34. package/dist/lib/models/modelRegistry.d.ts +132 -0
  35. package/dist/lib/models/modelRegistry.js +483 -0
  36. package/dist/lib/models/modelResolver.d.ts +115 -0
  37. package/dist/lib/models/modelResolver.js +467 -0
  38. package/dist/lib/neurolink.d.ts +4 -1
  39. package/dist/lib/neurolink.js +101 -67
  40. package/dist/lib/providers/anthropic.js +3 -0
  41. package/dist/lib/providers/googleAiStudio.js +13 -0
  42. package/dist/lib/providers/huggingFace.js +15 -3
  43. package/dist/lib/providers/mistral.js +19 -7
  44. package/dist/lib/providers/ollama.js +31 -7
  45. package/dist/lib/providers/openAI.js +12 -0
  46. package/dist/lib/sdk/toolRegistration.js +2 -2
  47. package/dist/lib/types/cli.d.ts +56 -1
  48. package/dist/lib/types/contextTypes.d.ts +110 -0
  49. package/dist/lib/types/contextTypes.js +176 -0
  50. package/dist/lib/types/index.d.ts +4 -1
  51. package/dist/lib/types/mcpTypes.d.ts +118 -7
  52. package/dist/lib/types/providers.d.ts +81 -0
  53. package/dist/lib/types/streamTypes.d.ts +44 -7
  54. package/dist/lib/types/tools.d.ts +9 -0
  55. package/dist/lib/types/universalProviderOptions.d.ts +3 -1
  56. package/dist/lib/types/universalProviderOptions.js +2 -1
  57. package/dist/lib/utils/logger.d.ts +7 -0
  58. package/dist/lib/utils/logger.js +11 -0
  59. package/dist/lib/utils/performance.d.ts +105 -0
  60. package/dist/lib/utils/performance.js +210 -0
  61. package/dist/lib/utils/retryHandler.d.ts +89 -0
  62. package/dist/lib/utils/retryHandler.js +269 -0
  63. package/dist/models/modelRegistry.d.ts +132 -0
  64. package/dist/models/modelRegistry.js +483 -0
  65. package/dist/models/modelResolver.d.ts +115 -0
  66. package/dist/models/modelResolver.js +468 -0
  67. package/dist/neurolink.d.ts +4 -1
  68. package/dist/neurolink.js +101 -67
  69. package/dist/providers/anthropic.js +3 -0
  70. package/dist/providers/googleAiStudio.js +13 -0
  71. package/dist/providers/huggingFace.js +15 -3
  72. package/dist/providers/mistral.js +19 -7
  73. package/dist/providers/ollama.js +31 -7
  74. package/dist/providers/openAI.js +12 -0
  75. package/dist/sdk/toolRegistration.js +2 -2
  76. package/dist/types/cli.d.ts +56 -1
  77. package/dist/types/contextTypes.d.ts +110 -0
  78. package/dist/types/contextTypes.js +177 -0
  79. package/dist/types/index.d.ts +4 -1
  80. package/dist/types/mcpTypes.d.ts +118 -7
  81. package/dist/types/providers.d.ts +81 -0
  82. package/dist/types/streamTypes.d.ts +44 -7
  83. package/dist/types/tools.d.ts +9 -0
  84. package/dist/types/universalProviderOptions.d.ts +3 -1
  85. package/dist/types/universalProviderOptions.js +3 -1
  86. package/dist/utils/logger.d.ts +7 -0
  87. package/dist/utils/logger.js +11 -0
  88. package/dist/utils/performance.d.ts +105 -0
  89. package/dist/utils/performance.js +210 -0
  90. package/dist/utils/retryHandler.d.ts +89 -0
  91. package/dist/utils/retryHandler.js +269 -0
  92. package/package.json +2 -1
@@ -0,0 +1,483 @@
1
+ /**
2
+ * Model Registry for NeuroLink CLI Commands
3
+ * Provides centralized model data for models command system
4
+ * Part of Phase 4.1 - Models Command System
5
+ */
6
+ import { AIProviderName } from "../core/types.js";
7
+ /**
8
+ * Comprehensive model registry
9
+ */
10
+ export const MODEL_REGISTRY = {
11
+ // OpenAI Models
12
+ "gpt-4o": {
13
+ id: "gpt-4o",
14
+ name: "GPT-4 Omni",
15
+ provider: AIProviderName.OPENAI,
16
+ description: "Most capable OpenAI model with vision and advanced reasoning",
17
+ capabilities: {
18
+ vision: true,
19
+ functionCalling: true,
20
+ codeGeneration: true,
21
+ reasoning: true,
22
+ multimodal: true,
23
+ streaming: true,
24
+ jsonMode: true,
25
+ },
26
+ pricing: {
27
+ inputCostPer1K: 0.005,
28
+ outputCostPer1K: 0.015,
29
+ currency: "USD",
30
+ },
31
+ performance: {
32
+ speed: "medium",
33
+ quality: "high",
34
+ accuracy: "high",
35
+ },
36
+ limits: {
37
+ maxContextTokens: 128000,
38
+ maxOutputTokens: 4096,
39
+ maxRequestsPerMinute: 500,
40
+ },
41
+ useCases: {
42
+ coding: 9,
43
+ creative: 8,
44
+ analysis: 9,
45
+ conversation: 9,
46
+ reasoning: 9,
47
+ translation: 8,
48
+ summarization: 8,
49
+ },
50
+ aliases: ["gpt4o", "gpt-4-omni", "openai-flagship"],
51
+ deprecated: false,
52
+ isLocal: false, // Cloud-based model
53
+ releaseDate: "2024-05-13",
54
+ category: "general",
55
+ },
56
+ "gpt-4o-mini": {
57
+ id: "gpt-4o-mini",
58
+ name: "GPT-4 Omni Mini",
59
+ provider: AIProviderName.OPENAI,
60
+ description: "Fast and cost-effective model with strong performance",
61
+ capabilities: {
62
+ vision: true,
63
+ functionCalling: true,
64
+ codeGeneration: true,
65
+ reasoning: true,
66
+ multimodal: true,
67
+ streaming: true,
68
+ jsonMode: true,
69
+ },
70
+ pricing: {
71
+ inputCostPer1K: 0.00015,
72
+ outputCostPer1K: 0.0006,
73
+ currency: "USD",
74
+ },
75
+ performance: {
76
+ speed: "fast",
77
+ quality: "high",
78
+ accuracy: "high",
79
+ },
80
+ limits: {
81
+ maxContextTokens: 128000,
82
+ maxOutputTokens: 16384,
83
+ maxRequestsPerMinute: 1000,
84
+ },
85
+ useCases: {
86
+ coding: 8,
87
+ creative: 7,
88
+ analysis: 8,
89
+ conversation: 8,
90
+ reasoning: 8,
91
+ translation: 8,
92
+ summarization: 9,
93
+ },
94
+ aliases: ["gpt4o-mini", "gpt-4-mini", "fastest", "cheap"],
95
+ deprecated: false,
96
+ isLocal: false, // Cloud-based model
97
+ releaseDate: "2024-07-18",
98
+ category: "general",
99
+ },
100
+ // Google AI Studio Models
101
+ "gemini-2.5-pro": {
102
+ id: "gemini-2.5-pro",
103
+ name: "Gemini 2.5 Pro",
104
+ provider: AIProviderName.GOOGLE_AI,
105
+ description: "Google's most capable multimodal model with large context window",
106
+ capabilities: {
107
+ vision: true,
108
+ functionCalling: true,
109
+ codeGeneration: true,
110
+ reasoning: true,
111
+ multimodal: true,
112
+ streaming: true,
113
+ jsonMode: true,
114
+ },
115
+ pricing: {
116
+ inputCostPer1K: 0.00125,
117
+ outputCostPer1K: 0.005,
118
+ currency: "USD",
119
+ },
120
+ performance: {
121
+ speed: "medium",
122
+ quality: "high",
123
+ accuracy: "high",
124
+ },
125
+ limits: {
126
+ maxContextTokens: 2097152, // 2M tokens
127
+ maxOutputTokens: 8192,
128
+ maxRequestsPerMinute: 360,
129
+ },
130
+ useCases: {
131
+ coding: 9,
132
+ creative: 8,
133
+ analysis: 10,
134
+ conversation: 8,
135
+ reasoning: 9,
136
+ translation: 9,
137
+ summarization: 9,
138
+ },
139
+ aliases: ["gemini-pro", "google-flagship", "best-analysis"],
140
+ deprecated: false,
141
+ isLocal: false, // Cloud-based model
142
+ releaseDate: "2024-12-11",
143
+ category: "reasoning",
144
+ },
145
+ "gemini-2.5-flash": {
146
+ id: "gemini-2.5-flash",
147
+ name: "Gemini 2.5 Flash",
148
+ provider: AIProviderName.GOOGLE_AI,
149
+ description: "Fast and efficient multimodal model with large context",
150
+ capabilities: {
151
+ vision: true,
152
+ functionCalling: true,
153
+ codeGeneration: true,
154
+ reasoning: true,
155
+ multimodal: true,
156
+ streaming: true,
157
+ jsonMode: true,
158
+ },
159
+ pricing: {
160
+ inputCostPer1K: 0.000075,
161
+ outputCostPer1K: 0.0003,
162
+ currency: "USD",
163
+ },
164
+ performance: {
165
+ speed: "fast",
166
+ quality: "high",
167
+ accuracy: "high",
168
+ },
169
+ limits: {
170
+ maxContextTokens: 1048576, // 1M tokens
171
+ maxOutputTokens: 8192,
172
+ maxRequestsPerMinute: 1000,
173
+ },
174
+ useCases: {
175
+ coding: 8,
176
+ creative: 7,
177
+ analysis: 9,
178
+ conversation: 8,
179
+ reasoning: 8,
180
+ translation: 8,
181
+ summarization: 9,
182
+ },
183
+ aliases: ["gemini-flash", "google-fast", "best-value"],
184
+ deprecated: false,
185
+ isLocal: false, // Cloud-based model
186
+ releaseDate: "2024-12-11",
187
+ category: "general",
188
+ },
189
+ // Anthropic Models
190
+ "claude-3-5-sonnet-20241022": {
191
+ id: "claude-3-5-sonnet-20241022",
192
+ name: "Claude 3.5 Sonnet",
193
+ provider: AIProviderName.ANTHROPIC,
194
+ description: "Anthropic's most capable model with excellent reasoning and coding",
195
+ capabilities: {
196
+ vision: true,
197
+ functionCalling: true,
198
+ codeGeneration: true,
199
+ reasoning: true,
200
+ multimodal: true,
201
+ streaming: true,
202
+ jsonMode: false,
203
+ },
204
+ pricing: {
205
+ inputCostPer1K: 0.003,
206
+ outputCostPer1K: 0.015,
207
+ currency: "USD",
208
+ },
209
+ performance: {
210
+ speed: "medium",
211
+ quality: "high",
212
+ accuracy: "high",
213
+ },
214
+ limits: {
215
+ maxContextTokens: 200000,
216
+ maxOutputTokens: 8192,
217
+ maxRequestsPerMinute: 50,
218
+ },
219
+ useCases: {
220
+ coding: 10,
221
+ creative: 9,
222
+ analysis: 9,
223
+ conversation: 9,
224
+ reasoning: 10,
225
+ translation: 8,
226
+ summarization: 8,
227
+ },
228
+ aliases: [
229
+ "claude-3.5-sonnet",
230
+ "claude-sonnet",
231
+ "best-coding",
232
+ "claude-latest",
233
+ ],
234
+ deprecated: false,
235
+ isLocal: false, // Cloud-based model
236
+ releaseDate: "2024-10-22",
237
+ category: "coding",
238
+ },
239
+ "claude-3-5-haiku-20241022": {
240
+ id: "claude-3-5-haiku-20241022",
241
+ name: "Claude 3.5 Haiku",
242
+ provider: AIProviderName.ANTHROPIC,
243
+ description: "Fast and efficient Claude model for quick tasks",
244
+ capabilities: {
245
+ vision: false,
246
+ functionCalling: true,
247
+ codeGeneration: true,
248
+ reasoning: true,
249
+ multimodal: false,
250
+ streaming: true,
251
+ jsonMode: false,
252
+ },
253
+ pricing: {
254
+ inputCostPer1K: 0.001,
255
+ outputCostPer1K: 0.005,
256
+ currency: "USD",
257
+ },
258
+ performance: {
259
+ speed: "fast",
260
+ quality: "high",
261
+ accuracy: "high",
262
+ },
263
+ limits: {
264
+ maxContextTokens: 200000,
265
+ maxOutputTokens: 8192,
266
+ maxRequestsPerMinute: 100,
267
+ },
268
+ useCases: {
269
+ coding: 8,
270
+ creative: 7,
271
+ analysis: 8,
272
+ conversation: 8,
273
+ reasoning: 8,
274
+ translation: 8,
275
+ summarization: 9,
276
+ },
277
+ aliases: ["claude-3.5-haiku", "claude-haiku", "claude-fast"],
278
+ deprecated: false,
279
+ isLocal: false, // Cloud-based model
280
+ releaseDate: "2024-10-22",
281
+ category: "general",
282
+ },
283
+ // Mistral Models
284
+ "mistral-small-latest": {
285
+ id: "mistral-small-latest",
286
+ name: "Mistral Small",
287
+ provider: AIProviderName.MISTRAL,
288
+ description: "Efficient model for simple tasks and cost-sensitive applications",
289
+ capabilities: {
290
+ vision: false,
291
+ functionCalling: true,
292
+ codeGeneration: true,
293
+ reasoning: true,
294
+ multimodal: false,
295
+ streaming: true,
296
+ jsonMode: true,
297
+ },
298
+ pricing: {
299
+ inputCostPer1K: 0.001,
300
+ outputCostPer1K: 0.003,
301
+ currency: "USD",
302
+ },
303
+ performance: {
304
+ speed: "fast",
305
+ quality: "medium",
306
+ accuracy: "medium",
307
+ },
308
+ limits: {
309
+ maxContextTokens: 32768,
310
+ maxOutputTokens: 8192,
311
+ maxRequestsPerMinute: 200,
312
+ },
313
+ useCases: {
314
+ coding: 6,
315
+ creative: 6,
316
+ analysis: 7,
317
+ conversation: 7,
318
+ reasoning: 6,
319
+ translation: 7,
320
+ summarization: 7,
321
+ },
322
+ aliases: ["mistral-small", "mistral-cheap"],
323
+ deprecated: false,
324
+ isLocal: false, // Cloud-based model
325
+ releaseDate: "2024-02-26",
326
+ category: "general",
327
+ },
328
+ // Ollama Models (local)
329
+ "llama3.2:latest": {
330
+ id: "llama3.2:latest",
331
+ name: "Llama 3.2 Latest",
332
+ provider: AIProviderName.OLLAMA,
333
+ description: "Local Llama model for private, offline AI generation",
334
+ capabilities: {
335
+ vision: false,
336
+ functionCalling: false,
337
+ codeGeneration: true,
338
+ reasoning: true,
339
+ multimodal: false,
340
+ streaming: true,
341
+ jsonMode: false,
342
+ },
343
+ pricing: {
344
+ inputCostPer1K: 0, // Local execution
345
+ outputCostPer1K: 0,
346
+ currency: "USD",
347
+ },
348
+ performance: {
349
+ speed: "slow", // Depends on hardware
350
+ quality: "medium",
351
+ accuracy: "medium",
352
+ },
353
+ limits: {
354
+ maxContextTokens: 4096,
355
+ maxOutputTokens: 2048,
356
+ },
357
+ useCases: {
358
+ coding: 6,
359
+ creative: 7,
360
+ analysis: 6,
361
+ conversation: 7,
362
+ reasoning: 6,
363
+ translation: 6,
364
+ summarization: 6,
365
+ },
366
+ aliases: ["llama3.2", "llama", "local", "offline"],
367
+ deprecated: false,
368
+ isLocal: true, // Ollama runs locally
369
+ releaseDate: "2024-09-25",
370
+ category: "general",
371
+ },
372
+ };
373
+ /**
374
+ * Model aliases registry for quick resolution
375
+ */
376
+ export const MODEL_ALIASES = {};
377
+ // Build aliases from model data
378
+ Object.values(MODEL_REGISTRY).forEach((model) => {
379
+ model.aliases.forEach((alias) => {
380
+ MODEL_ALIASES[alias.toLowerCase()] = model.id;
381
+ });
382
+ });
383
+ // Add common aliases
384
+ Object.assign(MODEL_ALIASES, {
385
+ latest: "gpt-4o", // Default latest model
386
+ fastest: "gpt-4o-mini",
387
+ cheapest: "gemini-2.5-flash",
388
+ "best-coding": "claude-3-5-sonnet-20241022",
389
+ "best-analysis": "gemini-2.5-pro",
390
+ "best-creative": "claude-3-5-sonnet-20241022",
391
+ "best-value": "gemini-2.5-flash",
392
+ local: "llama3.2:latest",
393
+ });
394
+ /**
395
+ * Use case to model mappings
396
+ */
397
+ export const USE_CASE_RECOMMENDATIONS = {
398
+ coding: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
399
+ creative: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
400
+ analysis: ["gemini-2.5-pro", "claude-3-5-sonnet-20241022", "gpt-4o"],
401
+ conversation: [
402
+ "gpt-4o",
403
+ "claude-3-5-sonnet-20241022",
404
+ "claude-3-5-haiku-20241022",
405
+ ],
406
+ reasoning: ["claude-3-5-sonnet-20241022", "gemini-2.5-pro", "gpt-4o"],
407
+ translation: ["gemini-2.5-pro", "gpt-4o", "claude-3-5-haiku-20241022"],
408
+ summarization: [
409
+ "gemini-2.5-flash",
410
+ "gpt-4o-mini",
411
+ "claude-3-5-haiku-20241022",
412
+ ],
413
+ "cost-effective": ["gemini-2.5-flash", "gpt-4o-mini", "mistral-small-latest"],
414
+ "high-quality": ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
415
+ fast: ["gpt-4o-mini", "gemini-2.5-flash", "claude-3-5-haiku-20241022"],
416
+ };
417
+ /**
418
+ * Get all models
419
+ */
420
+ export function getAllModels() {
421
+ return Object.values(MODEL_REGISTRY);
422
+ }
423
+ /**
424
+ * Get model by ID
425
+ */
426
+ export function getModelById(id) {
427
+ return MODEL_REGISTRY[id];
428
+ }
429
+ /**
430
+ * Get models by provider
431
+ */
432
+ export function getModelsByProvider(provider) {
433
+ return Object.values(MODEL_REGISTRY).filter((model) => model.provider === provider);
434
+ }
435
+ /**
436
+ * Get available providers
437
+ */
438
+ export function getAvailableProviders() {
439
+ const providers = new Set();
440
+ Object.values(MODEL_REGISTRY).forEach((model) => {
441
+ providers.add(model.provider);
442
+ });
443
+ return Array.from(providers);
444
+ }
445
+ /**
446
+ * Calculate estimated cost for a request
447
+ */
448
+ export function calculateCost(model, inputTokens, outputTokens) {
449
+ const inputCost = (inputTokens / 1000) * model.pricing.inputCostPer1K;
450
+ const outputCost = (outputTokens / 1000) * model.pricing.outputCostPer1K;
451
+ return inputCost + outputCost;
452
+ }
453
+ /**
454
+ * Format model for display
455
+ */
456
+ export function formatModelForDisplay(model) {
457
+ const result = {
458
+ id: model.id,
459
+ name: model.name,
460
+ provider: model.provider,
461
+ description: model.description,
462
+ category: model.category,
463
+ capabilities: Object.entries(model.capabilities)
464
+ .filter(([_, supported]) => supported)
465
+ .map(([capability]) => capability),
466
+ pricing: {
467
+ input: `$${model.pricing.inputCostPer1K.toFixed(6)}/1K tokens`,
468
+ output: `$${model.pricing.outputCostPer1K.toFixed(6)}/1K tokens`,
469
+ },
470
+ performance: {
471
+ speed: model.performance.speed,
472
+ quality: model.performance.quality,
473
+ accuracy: model.performance.accuracy,
474
+ },
475
+ contextSize: `${(model.limits.maxContextTokens / 1000).toFixed(0)}K tokens`,
476
+ maxOutput: `${(model.limits.maxOutputTokens / 1000).toFixed(0)}K tokens`,
477
+ aliases: model.aliases,
478
+ };
479
+ if (model.releaseDate) {
480
+ result.releaseDate = model.releaseDate;
481
+ }
482
+ return result;
483
+ }
@@ -0,0 +1,115 @@
1
+ /**
2
+ * Model Resolver for NeuroLink CLI Commands
3
+ * Provides model resolution, search, and recommendation functionality
4
+ * Part of Phase 4.1 - Models Command System
5
+ */
6
+ import { AIProviderName } from "../core/types.js";
7
+ import type { JsonValue } from "../types/common.js";
8
+ import { type ModelInfo, type ModelSearchFilters, type ModelSearchResult, type ModelCapabilities, type UseCaseSuitability } from "./modelRegistry.js";
9
+ /**
10
+ * Model recommendation context
11
+ */
12
+ export interface RecommendationContext {
13
+ useCase?: keyof UseCaseSuitability;
14
+ maxCost?: number;
15
+ minQuality?: "low" | "medium" | "high";
16
+ requireCapabilities?: (keyof ModelCapabilities)[];
17
+ excludeProviders?: AIProviderName[];
18
+ contextSize?: number;
19
+ preferLocal?: boolean;
20
+ }
21
+ /**
22
+ * Model recommendation result
23
+ */
24
+ export interface ModelRecommendation {
25
+ model: ModelInfo;
26
+ score: number;
27
+ reasoning: string[];
28
+ alternatives: ModelInfo[];
29
+ }
30
+ /**
31
+ * Model comparison result
32
+ */
33
+ export interface ModelComparison {
34
+ models: ModelInfo[];
35
+ comparison: {
36
+ capabilities: Record<keyof ModelCapabilities, ModelInfo[]>;
37
+ pricing: {
38
+ cheapest: ModelInfo;
39
+ mostExpensive: ModelInfo;
40
+ };
41
+ performance: Record<string, ModelInfo[]>;
42
+ contextSize: {
43
+ largest: ModelInfo;
44
+ smallest: ModelInfo;
45
+ };
46
+ };
47
+ }
48
+ /**
49
+ * Model resolver class with advanced search and recommendation functionality
50
+ */
51
+ export declare class ModelResolver {
52
+ /**
53
+ * Resolve model ID from alias or fuzzy name
54
+ */
55
+ static resolveModel(query: string): ModelInfo | null;
56
+ /**
57
+ * Search models with advanced filtering
58
+ */
59
+ static searchModels(filters: ModelSearchFilters): ModelSearchResult[];
60
+ /**
61
+ * Get best model for specific use case
62
+ */
63
+ static getBestModel(context: RecommendationContext): ModelRecommendation;
64
+ /**
65
+ * Compare multiple models
66
+ */
67
+ static compareModels(modelIds: string[]): ModelComparison;
68
+ /**
69
+ * Get models by category
70
+ */
71
+ static getModelsByCategory(category: ModelInfo["category"]): ModelInfo[];
72
+ /**
73
+ * Get recommended models for use case
74
+ */
75
+ static getRecommendedModelsForUseCase(useCase: string): ModelInfo[];
76
+ /**
77
+ * Calculate cost comparison for models
78
+ */
79
+ static calculateCostComparison(models: ModelInfo[], inputTokens?: number, outputTokens?: number): Array<{
80
+ model: ModelInfo;
81
+ cost: number;
82
+ costPer1K: number;
83
+ }>;
84
+ /**
85
+ * Get model statistics
86
+ */
87
+ static getModelStatistics(): JsonValue;
88
+ /**
89
+ * Evaluate how well a model matches search filters
90
+ */
91
+ private static evaluateModelMatch;
92
+ /**
93
+ * Score model for recommendation context
94
+ */
95
+ private static scoreModelForContext;
96
+ /**
97
+ * Generate recommendation reasoning
98
+ */
99
+ private static generateRecommendationReasoning;
100
+ }
101
+ /**
102
+ * Utility functions for CLI integration
103
+ */
104
+ /**
105
+ * Format search results for CLI display
106
+ */
107
+ export declare function formatSearchResults(results: ModelSearchResult[]): JsonValue;
108
+ /**
109
+ * Format recommendation for CLI display
110
+ */
111
+ export declare function formatRecommendation(recommendation: ModelRecommendation): JsonValue;
112
+ /**
113
+ * Format model comparison for CLI display
114
+ */
115
+ export declare function formatComparison(comparison: ModelComparison): JsonValue;