@contentgrowth/llm-service 1.1.2 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -155,95 +155,6 @@ declare class OpenAIProvider extends BaseLLMProvider {
155
155
  getDeepResearchStatus(operationId: any): Promise<void>;
156
156
  }
157
157
 
158
- declare class GeminiProvider extends BaseLLMProvider {
159
- client: GoogleGenAI;
160
- models: any;
161
- defaultModel: any;
162
- _pendingOperations: Map<any, any>;
163
- chat(userMessage: any, systemPrompt?: string, options?: {}): Promise<{
164
- text: string;
165
- }>;
166
- chatCompletion(messages: any, systemPrompt: any, tools?: any, options?: {}): Promise<{
167
- model: any;
168
- parsedContent?: any;
169
- content: string;
170
- thought_signature: any;
171
- tool_calls: {
172
- type: string;
173
- function: _google_genai.FunctionCall;
174
- thought_signature: any;
175
- }[];
176
- finishReason: string;
177
- _rawFinishReason: _google_genai.FinishReason;
178
- _responseFormat: any;
179
- usage: {
180
- prompt_tokens: number;
181
- completion_tokens: number;
182
- total_tokens: number;
183
- };
184
- }>;
185
- _chatCompletionWithModel(messages: any, systemPrompt: any, tools: any, modelName: any, maxTokens: any, temperature: any, options?: {}): Promise<{
186
- model: any;
187
- parsedContent?: any;
188
- content: string;
189
- thought_signature: any;
190
- tool_calls: {
191
- type: string;
192
- function: _google_genai.FunctionCall;
193
- thought_signature: any;
194
- }[];
195
- finishReason: string;
196
- _rawFinishReason: _google_genai.FinishReason;
197
- _responseFormat: any;
198
- usage: {
199
- prompt_tokens: number;
200
- completion_tokens: number;
201
- total_tokens: number;
202
- };
203
- }>;
204
- _buildGenerationConfig(options: any, maxTokens: any, temperature: any): {
205
- temperature: any;
206
- maxOutputTokens: any;
207
- };
208
- _convertToGeminiSchema(jsonSchema: any): {
209
- type: string;
210
- };
211
- _shouldAutoParse(options: any): boolean;
212
- _safeJsonParse(content: any): any;
213
- executeTools(tool_calls: any, messages: any, tenantId: any, toolImplementations: any, env: any): Promise<void>;
214
- imageGeneration(prompt: any, systemPrompt: any, options?: {}): Promise<{
215
- imageData: string;
216
- mimeType: string;
217
- thought_signature: any;
218
- }>;
219
- _getModelForTier(tier: any): any;
220
- startVideoGeneration(prompt: any, images: any, modelName: any, systemPrompt: any, options?: {}): Promise<{
221
- operationName: string;
222
- }>;
223
- getVideoGenerationStatus(operationName: any): Promise<{
224
- done: any;
225
- progress: any;
226
- state: any;
227
- }>;
228
- startDeepResearch(prompt: any, options?: {}): Promise<{
229
- operationId: string;
230
- }>;
231
- getDeepResearchStatus(operationId: any): Promise<{
232
- state: any;
233
- done: boolean;
234
- error: any;
235
- }>;
236
- /**
237
- * Extract structured data from a file (PDF, Image, etc.) using Gemini Multimodal capabilities.
238
- * @param {Buffer|string} fileData - Base64 string or Buffer of the file
239
- * @param {string} mimeType - Mime type (e.g., 'application/pdf', 'image/png')
240
- * @param {string} prompt - Extraction prompt
241
- * @param {Object} schema - JSON schema for the output
242
- * @param {Object} options - Additional options
243
- */
244
- extractWithLLM(fileData: Buffer | string, mimeType: string, prompt: string, schema?: any, options?: any): Promise<any>;
245
- }
246
-
247
158
  /**
248
159
  * Create a Hono handler for speech transcription.
249
160
  *
@@ -416,26 +327,48 @@ declare class DefaultConfigProvider extends BaseConfigProvider$1 {
416
327
  getConfig(tenantId: any, env: any): Promise<{
417
328
  provider: any;
418
329
  apiKey: any;
330
+ project: any;
331
+ location: any;
419
332
  models: any;
420
333
  temperature: number;
421
334
  maxTokens: number;
335
+ } | {
336
+ provider: any;
337
+ apiKey: any;
338
+ models: any;
339
+ temperature: number;
340
+ maxTokens: number;
341
+ project?: undefined;
342
+ location?: undefined;
422
343
  }>;
423
344
  _loadFromTenantDO(tenantId: any, env: any): Promise<any>;
424
345
  _buildTenantConfig(tenantConfig: any, env: any): {
425
346
  provider: any;
426
- apiKey: any;
427
347
  models: any;
348
+ apiKey: any;
349
+ project: any;
350
+ location: any;
428
351
  temperature: number;
429
352
  maxTokens: number;
430
353
  capabilities: any;
431
354
  isTenantOwned: boolean;
432
355
  };
433
356
  _getSystemConfig(env: any): {
357
+ provider: any;
358
+ apiKey: any;
359
+ project: any;
360
+ location: any;
361
+ models: any;
362
+ temperature: number;
363
+ maxTokens: number;
364
+ } | {
434
365
  provider: any;
435
366
  apiKey: any;
436
367
  models: any;
437
368
  temperature: number;
438
369
  maxTokens: number;
370
+ project?: undefined;
371
+ location?: undefined;
439
372
  };
440
373
  }
441
374
 
@@ -462,6 +395,22 @@ declare namespace MODEL_CONFIGS {
462
395
  export let video: string;
463
396
  export let image: string;
464
397
  }
398
+ namespace vertex {
399
+ let _default_2: string;
400
+ export { _default_2 as default };
401
+ let edge_2: string;
402
+ export { edge_2 as edge };
403
+ let fast_2: string;
404
+ export { fast_2 as fast };
405
+ let cost_2: string;
406
+ export { cost_2 as cost };
407
+ let free_2: string;
408
+ export { free_2 as free };
409
+ let video_1: string;
410
+ export { video_1 as video };
411
+ let image_1: string;
412
+ export { image_1 as image };
413
+ }
465
414
  }
466
415
  declare class ConfigManager {
467
416
  static _provider: DefaultConfigProvider;
@@ -471,12 +420,117 @@ declare class ConfigManager {
471
420
  */
472
421
  static setConfigProvider(provider: BaseConfigProvider): void;
473
422
  static getConfig(tenantId: any, env: any): Promise<{
423
+ provider: any;
424
+ apiKey: any;
425
+ project: any;
426
+ location: any;
427
+ models: any;
428
+ temperature: number;
429
+ maxTokens: number;
430
+ } | {
474
431
  provider: any;
475
432
  apiKey: any;
476
433
  models: any;
477
434
  temperature: number;
478
435
  maxTokens: number;
436
+ project?: undefined;
437
+ location?: undefined;
438
+ }>;
439
+ }
440
+
441
+ /**
442
+ * Unified Provider for Google models (AI Studio and Vertex AI)
443
+ *
444
+ * Client construction follows pi-mono's pattern:
445
+ * - Gemini (AI Studio): { apiKey }
446
+ * - Vertex + API Key: { vertexai: true, apiKey } — no project/location
447
+ * - Vertex + ADC/IAM: { vertexai: true, project, location } — no apiKey
448
+ */
449
+ declare class GoogleProvider extends BaseLLMProvider {
450
+ models: any;
451
+ defaultModel: any;
452
+ _pendingOperations: Map<any, any>;
453
+ client: GoogleGenAI;
454
+ /**
455
+ * Perform the actual API call. Both AI Studio and Vertex AI use the
456
+ * same @google/genai SDK method — the routing is determined by how
457
+ * the client was constructed.
458
+ */
459
+ _generateContent(requestOptions: any): Promise<_google_genai.GenerateContentResponse>;
460
+ chat(userMessage: any, systemPrompt?: string, options?: {}): Promise<{
461
+ text: string;
462
+ }>;
463
+ chatCompletion(messages: any, systemPrompt: any, tools?: any, options?: {}): Promise<{
464
+ model: any;
465
+ parsedContent?: any;
466
+ content: string;
467
+ thought_signature: any;
468
+ tool_calls: {
469
+ type: string;
470
+ function: _google_genai.FunctionCall;
471
+ thought_signature: any;
472
+ }[];
473
+ finishReason: string;
474
+ _rawFinishReason: _google_genai.FinishReason;
475
+ _responseFormat: any;
476
+ usage: {
477
+ prompt_tokens: number;
478
+ completion_tokens: number;
479
+ total_tokens: number;
480
+ };
481
+ }>;
482
+ _chatCompletionWithModel(messages: any, systemPrompt: any, tools: any, modelName: any, maxTokens: any, temperature: any, options?: {}): Promise<{
483
+ model: any;
484
+ parsedContent?: any;
485
+ content: string;
486
+ thought_signature: any;
487
+ tool_calls: {
488
+ type: string;
489
+ function: _google_genai.FunctionCall;
490
+ thought_signature: any;
491
+ }[];
492
+ finishReason: string;
493
+ _rawFinishReason: _google_genai.FinishReason;
494
+ _responseFormat: any;
495
+ usage: {
496
+ prompt_tokens: number;
497
+ completion_tokens: number;
498
+ total_tokens: number;
499
+ };
500
+ }>;
501
+ _buildGenerationConfig(options: any, maxTokens: any, temperature: any): {
502
+ temperature: any;
503
+ maxOutputTokens: any;
504
+ };
505
+ _convertToGeminiSchema(jsonSchema: any): {
506
+ type: string;
507
+ };
508
+ _shouldAutoParse(options: any): boolean;
509
+ _safeJsonParse(content: any): any;
510
+ executeTools(tool_calls: any, messages: any, tenantId: any, toolImplementations: any, env: any): Promise<void>;
511
+ imageGeneration(prompt: any, systemPrompt: any, options?: {}): Promise<{
512
+ imageData: string;
513
+ mimeType: string;
514
+ thought_signature: any;
515
+ }>;
516
+ _getModelForTier(tier: any): any;
517
+ startVideoGeneration(prompt: any, images: any, modelName: any, systemPrompt: any, options?: {}): Promise<{
518
+ operationName: string;
519
+ }>;
520
+ getVideoGenerationStatus(operationName: any): Promise<{
521
+ done: any;
522
+ progress: any;
523
+ state: any;
524
+ }>;
525
+ startDeepResearch(prompt: any, options?: {}): Promise<{
526
+ operationId: string;
527
+ }>;
528
+ getDeepResearchStatus(operationId: any): Promise<{
529
+ state: any;
530
+ done: boolean;
531
+ error: any;
479
532
  }>;
533
+ extractWithLLM(fileData: any, mimeType: any, prompt: any, schema?: any, options?: {}): Promise<any>;
480
534
  }
481
535
 
482
536
  /**
@@ -613,4 +667,4 @@ declare class TranscriptionServiceException extends Error {
613
667
  statusCode: number;
614
668
  }
615
669
 
616
- export { BaseConfigProvider$1 as BaseConfigProvider, ConfigManager, DefaultConfigProvider, FINISH_REASONS, GeminiProvider, LLMService, LLMServiceException, MODEL_CONFIGS, OpenAIProvider, TranscriptionService, TranscriptionServiceException, createSpeechHandler, extractJsonFromResponse, extractTextAndJson, handleApiError, sanitizeError };
670
+ export { BaseConfigProvider$1 as BaseConfigProvider, ConfigManager, DefaultConfigProvider, FINISH_REASONS, GoogleProvider as GeminiProvider, GoogleProvider, LLMService, LLMServiceException, MODEL_CONFIGS, OpenAIProvider, TranscriptionService, TranscriptionServiceException, GoogleProvider as VertexProvider, createSpeechHandler, extractJsonFromResponse, extractTextAndJson, handleApiError, sanitizeError };
package/dist/index.d.ts CHANGED
@@ -155,95 +155,6 @@ declare class OpenAIProvider extends BaseLLMProvider {
155
155
  getDeepResearchStatus(operationId: any): Promise<void>;
156
156
  }
157
157
 
158
- declare class GeminiProvider extends BaseLLMProvider {
159
- client: GoogleGenAI;
160
- models: any;
161
- defaultModel: any;
162
- _pendingOperations: Map<any, any>;
163
- chat(userMessage: any, systemPrompt?: string, options?: {}): Promise<{
164
- text: string;
165
- }>;
166
- chatCompletion(messages: any, systemPrompt: any, tools?: any, options?: {}): Promise<{
167
- model: any;
168
- parsedContent?: any;
169
- content: string;
170
- thought_signature: any;
171
- tool_calls: {
172
- type: string;
173
- function: _google_genai.FunctionCall;
174
- thought_signature: any;
175
- }[];
176
- finishReason: string;
177
- _rawFinishReason: _google_genai.FinishReason;
178
- _responseFormat: any;
179
- usage: {
180
- prompt_tokens: number;
181
- completion_tokens: number;
182
- total_tokens: number;
183
- };
184
- }>;
185
- _chatCompletionWithModel(messages: any, systemPrompt: any, tools: any, modelName: any, maxTokens: any, temperature: any, options?: {}): Promise<{
186
- model: any;
187
- parsedContent?: any;
188
- content: string;
189
- thought_signature: any;
190
- tool_calls: {
191
- type: string;
192
- function: _google_genai.FunctionCall;
193
- thought_signature: any;
194
- }[];
195
- finishReason: string;
196
- _rawFinishReason: _google_genai.FinishReason;
197
- _responseFormat: any;
198
- usage: {
199
- prompt_tokens: number;
200
- completion_tokens: number;
201
- total_tokens: number;
202
- };
203
- }>;
204
- _buildGenerationConfig(options: any, maxTokens: any, temperature: any): {
205
- temperature: any;
206
- maxOutputTokens: any;
207
- };
208
- _convertToGeminiSchema(jsonSchema: any): {
209
- type: string;
210
- };
211
- _shouldAutoParse(options: any): boolean;
212
- _safeJsonParse(content: any): any;
213
- executeTools(tool_calls: any, messages: any, tenantId: any, toolImplementations: any, env: any): Promise<void>;
214
- imageGeneration(prompt: any, systemPrompt: any, options?: {}): Promise<{
215
- imageData: string;
216
- mimeType: string;
217
- thought_signature: any;
218
- }>;
219
- _getModelForTier(tier: any): any;
220
- startVideoGeneration(prompt: any, images: any, modelName: any, systemPrompt: any, options?: {}): Promise<{
221
- operationName: string;
222
- }>;
223
- getVideoGenerationStatus(operationName: any): Promise<{
224
- done: any;
225
- progress: any;
226
- state: any;
227
- }>;
228
- startDeepResearch(prompt: any, options?: {}): Promise<{
229
- operationId: string;
230
- }>;
231
- getDeepResearchStatus(operationId: any): Promise<{
232
- state: any;
233
- done: boolean;
234
- error: any;
235
- }>;
236
- /**
237
- * Extract structured data from a file (PDF, Image, etc.) using Gemini Multimodal capabilities.
238
- * @param {Buffer|string} fileData - Base64 string or Buffer of the file
239
- * @param {string} mimeType - Mime type (e.g., 'application/pdf', 'image/png')
240
- * @param {string} prompt - Extraction prompt
241
- * @param {Object} schema - JSON schema for the output
242
- * @param {Object} options - Additional options
243
- */
244
- extractWithLLM(fileData: Buffer | string, mimeType: string, prompt: string, schema?: any, options?: any): Promise<any>;
245
- }
246
-
247
158
  /**
248
159
  * Create a Hono handler for speech transcription.
249
160
  *
@@ -416,26 +327,48 @@ declare class DefaultConfigProvider extends BaseConfigProvider$1 {
416
327
  getConfig(tenantId: any, env: any): Promise<{
417
328
  provider: any;
418
329
  apiKey: any;
330
+ project: any;
331
+ location: any;
419
332
  models: any;
420
333
  temperature: number;
421
334
  maxTokens: number;
335
+ } | {
336
+ provider: any;
337
+ apiKey: any;
338
+ models: any;
339
+ temperature: number;
340
+ maxTokens: number;
341
+ project?: undefined;
342
+ location?: undefined;
422
343
  }>;
423
344
  _loadFromTenantDO(tenantId: any, env: any): Promise<any>;
424
345
  _buildTenantConfig(tenantConfig: any, env: any): {
425
346
  provider: any;
426
- apiKey: any;
427
347
  models: any;
348
+ apiKey: any;
349
+ project: any;
350
+ location: any;
428
351
  temperature: number;
429
352
  maxTokens: number;
430
353
  capabilities: any;
431
354
  isTenantOwned: boolean;
432
355
  };
433
356
  _getSystemConfig(env: any): {
357
+ provider: any;
358
+ apiKey: any;
359
+ project: any;
360
+ location: any;
361
+ models: any;
362
+ temperature: number;
363
+ maxTokens: number;
364
+ } | {
434
365
  provider: any;
435
366
  apiKey: any;
436
367
  models: any;
437
368
  temperature: number;
438
369
  maxTokens: number;
370
+ project?: undefined;
371
+ location?: undefined;
439
372
  };
440
373
  }
441
374
 
@@ -462,6 +395,22 @@ declare namespace MODEL_CONFIGS {
462
395
  export let video: string;
463
396
  export let image: string;
464
397
  }
398
+ namespace vertex {
399
+ let _default_2: string;
400
+ export { _default_2 as default };
401
+ let edge_2: string;
402
+ export { edge_2 as edge };
403
+ let fast_2: string;
404
+ export { fast_2 as fast };
405
+ let cost_2: string;
406
+ export { cost_2 as cost };
407
+ let free_2: string;
408
+ export { free_2 as free };
409
+ let video_1: string;
410
+ export { video_1 as video };
411
+ let image_1: string;
412
+ export { image_1 as image };
413
+ }
465
414
  }
466
415
  declare class ConfigManager {
467
416
  static _provider: DefaultConfigProvider;
@@ -471,12 +420,117 @@ declare class ConfigManager {
471
420
  */
472
421
  static setConfigProvider(provider: BaseConfigProvider): void;
473
422
  static getConfig(tenantId: any, env: any): Promise<{
423
+ provider: any;
424
+ apiKey: any;
425
+ project: any;
426
+ location: any;
427
+ models: any;
428
+ temperature: number;
429
+ maxTokens: number;
430
+ } | {
474
431
  provider: any;
475
432
  apiKey: any;
476
433
  models: any;
477
434
  temperature: number;
478
435
  maxTokens: number;
436
+ project?: undefined;
437
+ location?: undefined;
438
+ }>;
439
+ }
440
+
441
+ /**
442
+ * Unified Provider for Google models (AI Studio and Vertex AI)
443
+ *
444
+ * Client construction follows pi-mono's pattern:
445
+ * - Gemini (AI Studio): { apiKey }
446
+ * - Vertex + API Key: { vertexai: true, apiKey } — no project/location
447
+ * - Vertex + ADC/IAM: { vertexai: true, project, location } — no apiKey
448
+ */
449
+ declare class GoogleProvider extends BaseLLMProvider {
450
+ models: any;
451
+ defaultModel: any;
452
+ _pendingOperations: Map<any, any>;
453
+ client: GoogleGenAI;
454
+ /**
455
+ * Perform the actual API call. Both AI Studio and Vertex AI use the
456
+ * same @google/genai SDK method — the routing is determined by how
457
+ * the client was constructed.
458
+ */
459
+ _generateContent(requestOptions: any): Promise<_google_genai.GenerateContentResponse>;
460
+ chat(userMessage: any, systemPrompt?: string, options?: {}): Promise<{
461
+ text: string;
462
+ }>;
463
+ chatCompletion(messages: any, systemPrompt: any, tools?: any, options?: {}): Promise<{
464
+ model: any;
465
+ parsedContent?: any;
466
+ content: string;
467
+ thought_signature: any;
468
+ tool_calls: {
469
+ type: string;
470
+ function: _google_genai.FunctionCall;
471
+ thought_signature: any;
472
+ }[];
473
+ finishReason: string;
474
+ _rawFinishReason: _google_genai.FinishReason;
475
+ _responseFormat: any;
476
+ usage: {
477
+ prompt_tokens: number;
478
+ completion_tokens: number;
479
+ total_tokens: number;
480
+ };
481
+ }>;
482
+ _chatCompletionWithModel(messages: any, systemPrompt: any, tools: any, modelName: any, maxTokens: any, temperature: any, options?: {}): Promise<{
483
+ model: any;
484
+ parsedContent?: any;
485
+ content: string;
486
+ thought_signature: any;
487
+ tool_calls: {
488
+ type: string;
489
+ function: _google_genai.FunctionCall;
490
+ thought_signature: any;
491
+ }[];
492
+ finishReason: string;
493
+ _rawFinishReason: _google_genai.FinishReason;
494
+ _responseFormat: any;
495
+ usage: {
496
+ prompt_tokens: number;
497
+ completion_tokens: number;
498
+ total_tokens: number;
499
+ };
500
+ }>;
501
+ _buildGenerationConfig(options: any, maxTokens: any, temperature: any): {
502
+ temperature: any;
503
+ maxOutputTokens: any;
504
+ };
505
+ _convertToGeminiSchema(jsonSchema: any): {
506
+ type: string;
507
+ };
508
+ _shouldAutoParse(options: any): boolean;
509
+ _safeJsonParse(content: any): any;
510
+ executeTools(tool_calls: any, messages: any, tenantId: any, toolImplementations: any, env: any): Promise<void>;
511
+ imageGeneration(prompt: any, systemPrompt: any, options?: {}): Promise<{
512
+ imageData: string;
513
+ mimeType: string;
514
+ thought_signature: any;
515
+ }>;
516
+ _getModelForTier(tier: any): any;
517
+ startVideoGeneration(prompt: any, images: any, modelName: any, systemPrompt: any, options?: {}): Promise<{
518
+ operationName: string;
519
+ }>;
520
+ getVideoGenerationStatus(operationName: any): Promise<{
521
+ done: any;
522
+ progress: any;
523
+ state: any;
524
+ }>;
525
+ startDeepResearch(prompt: any, options?: {}): Promise<{
526
+ operationId: string;
527
+ }>;
528
+ getDeepResearchStatus(operationId: any): Promise<{
529
+ state: any;
530
+ done: boolean;
531
+ error: any;
479
532
  }>;
533
+ extractWithLLM(fileData: any, mimeType: any, prompt: any, schema?: any, options?: {}): Promise<any>;
480
534
  }
481
535
 
482
536
  /**
@@ -613,4 +667,4 @@ declare class TranscriptionServiceException extends Error {
613
667
  statusCode: number;
614
668
  }
615
669
 
616
- export { BaseConfigProvider$1 as BaseConfigProvider, ConfigManager, DefaultConfigProvider, FINISH_REASONS, GeminiProvider, LLMService, LLMServiceException, MODEL_CONFIGS, OpenAIProvider, TranscriptionService, TranscriptionServiceException, createSpeechHandler, extractJsonFromResponse, extractTextAndJson, handleApiError, sanitizeError };
670
+ export { BaseConfigProvider$1 as BaseConfigProvider, ConfigManager, DefaultConfigProvider, FINISH_REASONS, GoogleProvider as GeminiProvider, GoogleProvider, LLMService, LLMServiceException, MODEL_CONFIGS, OpenAIProvider, TranscriptionService, TranscriptionServiceException, GoogleProvider as VertexProvider, createSpeechHandler, extractJsonFromResponse, extractTextAndJson, handleApiError, sanitizeError };