@contentgrowth/llm-service 1.1.1 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +95 -88
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +145 -91
- package/dist/index.d.ts +145 -91
- package/dist/index.js +93 -88
- package/dist/index.js.map +1 -1
- package/package.json +16 -14
package/dist/index.cjs
CHANGED
|
@@ -35,13 +35,15 @@ __export(index_exports, {
|
|
|
35
35
|
ConfigManager: () => ConfigManager,
|
|
36
36
|
DefaultConfigProvider: () => DefaultConfigProvider,
|
|
37
37
|
FINISH_REASONS: () => FINISH_REASONS,
|
|
38
|
-
GeminiProvider: () =>
|
|
38
|
+
GeminiProvider: () => GoogleProvider,
|
|
39
|
+
GoogleProvider: () => GoogleProvider,
|
|
39
40
|
LLMService: () => LLMService,
|
|
40
41
|
LLMServiceException: () => LLMServiceException,
|
|
41
42
|
MODEL_CONFIGS: () => MODEL_CONFIGS,
|
|
42
43
|
OpenAIProvider: () => OpenAIProvider,
|
|
43
44
|
TranscriptionService: () => TranscriptionService,
|
|
44
45
|
TranscriptionServiceException: () => TranscriptionServiceException,
|
|
46
|
+
VertexProvider: () => GoogleProvider,
|
|
45
47
|
createSpeechHandler: () => createSpeechHandler,
|
|
46
48
|
extractJsonFromResponse: () => extractJsonFromResponse,
|
|
47
49
|
extractTextAndJson: () => extractTextAndJson,
|
|
@@ -119,10 +121,12 @@ var DefaultConfigProvider = class extends BaseConfigProvider {
|
|
|
119
121
|
_buildTenantConfig(tenantConfig, env) {
|
|
120
122
|
return {
|
|
121
123
|
provider: tenantConfig.provider,
|
|
122
|
-
apiKey: tenantConfig.api_key,
|
|
123
124
|
models: MODEL_CONFIGS[tenantConfig.provider],
|
|
125
|
+
apiKey: tenantConfig.api_key,
|
|
126
|
+
project: tenantConfig.project,
|
|
127
|
+
location: tenantConfig.location,
|
|
124
128
|
temperature: parseFloat(env.DEFAULT_TEMPERATURE || "0.7"),
|
|
125
|
-
maxTokens: parseInt(env.DEFAULT_MAX_TOKENS || "
|
|
129
|
+
maxTokens: parseInt(env.DEFAULT_MAX_TOKENS || "65536"),
|
|
126
130
|
capabilities: tenantConfig.capabilities || { chat: true, image: false, video: false },
|
|
127
131
|
isTenantOwned: true
|
|
128
132
|
};
|
|
@@ -153,13 +157,35 @@ var DefaultConfigProvider = class extends BaseConfigProvider {
|
|
|
153
157
|
image: env.GEMINI_IMAGE_MODEL || providerDefaults.image,
|
|
154
158
|
video: env.GEMINI_VIDEO_MODEL || providerDefaults.video
|
|
155
159
|
};
|
|
160
|
+
} else if (provider === "vertex") {
|
|
161
|
+
apiKey = env.VERTEX_API_KEY;
|
|
162
|
+
const project = env.VERTEX_PROJECT || env.GOOGLE_CLOUD_PROJECT;
|
|
163
|
+
const location = env.VERTEX_LOCATION || env.GOOGLE_CLOUD_LOCATION || "us-central1";
|
|
164
|
+
models = {
|
|
165
|
+
default: env.VERTEX_MODEL || providerDefaults.default,
|
|
166
|
+
edge: env.VERTEX_MODEL_EDGE || providerDefaults.edge,
|
|
167
|
+
fast: env.VERTEX_MODEL_FAST || providerDefaults.fast,
|
|
168
|
+
cost: env.VERTEX_MODEL_COST || providerDefaults.cost,
|
|
169
|
+
free: env.VERTEX_MODEL_FREE || providerDefaults.free,
|
|
170
|
+
image: env.VERTEX_IMAGE_MODEL || providerDefaults.image,
|
|
171
|
+
video: env.VERTEX_VIDEO_MODEL || providerDefaults.video
|
|
172
|
+
};
|
|
173
|
+
return {
|
|
174
|
+
provider,
|
|
175
|
+
apiKey,
|
|
176
|
+
project,
|
|
177
|
+
location,
|
|
178
|
+
models,
|
|
179
|
+
temperature: parseFloat(env.DEFAULT_TEMPERATURE || "0.7"),
|
|
180
|
+
maxTokens: parseInt(env.DEFAULT_MAX_TOKENS || "65536")
|
|
181
|
+
};
|
|
156
182
|
}
|
|
157
183
|
return {
|
|
158
184
|
provider,
|
|
159
185
|
apiKey,
|
|
160
186
|
models,
|
|
161
187
|
temperature: parseFloat(env.DEFAULT_TEMPERATURE || "0.7"),
|
|
162
|
-
maxTokens: parseInt(env.DEFAULT_MAX_TOKENS || "
|
|
188
|
+
maxTokens: parseInt(env.DEFAULT_MAX_TOKENS || "65536")
|
|
163
189
|
};
|
|
164
190
|
}
|
|
165
191
|
};
|
|
@@ -187,6 +213,15 @@ var MODEL_CONFIGS = {
|
|
|
187
213
|
video: "veo",
|
|
188
214
|
image: "gemini-3-pro-image-preview"
|
|
189
215
|
// Default image generation model
|
|
216
|
+
},
|
|
217
|
+
vertex: {
|
|
218
|
+
default: "gemini-3-flash-preview",
|
|
219
|
+
edge: "gemini-3-pro-preview",
|
|
220
|
+
fast: "gemini-3-flash-preview",
|
|
221
|
+
cost: "gemini-3-flash-preview",
|
|
222
|
+
free: "gemini-3-flash-preview",
|
|
223
|
+
video: "veo",
|
|
224
|
+
image: "gemini-3-pro-image-preview"
|
|
190
225
|
}
|
|
191
226
|
};
|
|
192
227
|
var ConfigManager = class {
|
|
@@ -584,25 +619,43 @@ var OpenAIProvider = class extends BaseLLMProvider {
|
|
|
584
619
|
}
|
|
585
620
|
};
|
|
586
621
|
|
|
587
|
-
// src/llm/providers/
|
|
622
|
+
// src/llm/providers/google-provider.js
|
|
588
623
|
var import_genai = require("@google/genai");
|
|
589
|
-
var
|
|
624
|
+
var GoogleProvider = class extends BaseLLMProvider {
|
|
590
625
|
constructor(config) {
|
|
591
626
|
super(config);
|
|
592
|
-
const clientConfig = {};
|
|
593
|
-
if (config.project || config.location) {
|
|
594
|
-
console.log(`[GeminiProvider] Initializing with Vertex AI (Project: ${config.project}, Location: ${config.location || "us-central1"})`);
|
|
595
|
-
clientConfig.vertexAI = {
|
|
596
|
-
project: config.project,
|
|
597
|
-
location: config.location || "us-central1"
|
|
598
|
-
};
|
|
599
|
-
} else {
|
|
600
|
-
clientConfig.apiKey = config.apiKey;
|
|
601
|
-
}
|
|
602
|
-
this.client = new import_genai.GoogleGenAI(clientConfig);
|
|
603
627
|
this.models = config.models;
|
|
604
628
|
this.defaultModel = config.models.default;
|
|
605
629
|
this._pendingOperations = /* @__PURE__ */ new Map();
|
|
630
|
+
if (config.provider === "vertex") {
|
|
631
|
+
if (config.apiKey) {
|
|
632
|
+
this.client = new import_genai.GoogleGenAI({
|
|
633
|
+
vertexai: true,
|
|
634
|
+
apiKey: config.apiKey
|
|
635
|
+
});
|
|
636
|
+
} else {
|
|
637
|
+
if (!config.project) {
|
|
638
|
+
console.warn("[GoogleProvider] Vertex AI: no project ID and no API key. Calls will likely fail.");
|
|
639
|
+
}
|
|
640
|
+
this.client = new import_genai.GoogleGenAI({
|
|
641
|
+
vertexai: true,
|
|
642
|
+
project: config.project,
|
|
643
|
+
location: config.location || "us-central1"
|
|
644
|
+
});
|
|
645
|
+
}
|
|
646
|
+
} else {
|
|
647
|
+
this.client = new import_genai.GoogleGenAI({
|
|
648
|
+
apiKey: config.apiKey
|
|
649
|
+
});
|
|
650
|
+
}
|
|
651
|
+
}
|
|
652
|
+
/**
|
|
653
|
+
* Perform the actual API call. Both AI Studio and Vertex AI use the
|
|
654
|
+
* same @google/genai SDK method — the routing is determined by how
|
|
655
|
+
* the client was constructed.
|
|
656
|
+
*/
|
|
657
|
+
async _generateContent(requestOptions) {
|
|
658
|
+
return this.client.models.generateContent(requestOptions);
|
|
606
659
|
}
|
|
607
660
|
async chat(userMessage, systemPrompt = "", options = {}) {
|
|
608
661
|
const messages = [{ role: "user", content: userMessage }];
|
|
@@ -742,18 +795,12 @@ ${msg.content}`;
|
|
|
742
795
|
if (tools && tools.length > 0) {
|
|
743
796
|
requestOptions.config.tools = [{ functionDeclarations: tools.map((t) => t.function) }];
|
|
744
797
|
if (requestOptions.config.responseMimeType === "application/json") {
|
|
745
|
-
console.warn(
|
|
798
|
+
console.warn(`[${this.constructor.name}] Disabling strict JSON mode because tools are present. Relying on system prompt.`);
|
|
746
799
|
delete requestOptions.config.responseMimeType;
|
|
747
800
|
delete requestOptions.config.responseSchema;
|
|
748
801
|
}
|
|
749
802
|
}
|
|
750
|
-
|
|
751
|
-
try {
|
|
752
|
-
response = await this.client.models.generateContent(requestOptions);
|
|
753
|
-
} catch (error) {
|
|
754
|
-
console.error(`[GeminiProvider] generateContent failed (API Key: ${this._getMaskedApiKey()}):`, error);
|
|
755
|
-
throw error;
|
|
756
|
-
}
|
|
803
|
+
const response = await this._generateContent(requestOptions);
|
|
757
804
|
const candidate = (_c = response.candidates) == null ? void 0 : _c[0];
|
|
758
805
|
if (!candidate) {
|
|
759
806
|
throw new LLMServiceException("No candidates returned from model", 500);
|
|
@@ -783,10 +830,8 @@ ${msg.content}`;
|
|
|
783
830
|
}
|
|
784
831
|
}
|
|
785
832
|
if (!textContent && (!toolCalls || toolCalls.length === 0)) {
|
|
786
|
-
console.error(
|
|
787
|
-
console.error(
|
|
788
|
-
console.error("[GeminiProvider] Safety Ratings:", JSON.stringify(candidate.safetyRatings, null, 2));
|
|
789
|
-
console.error("[GeminiProvider] Full Candidate:", JSON.stringify(candidate, null, 2));
|
|
833
|
+
console.error(`[${this.constructor.name}] Model returned empty response (no text, no tool calls)`);
|
|
834
|
+
console.error(`[${this.constructor.name}] Finish Reason:`, candidate.finishReason);
|
|
790
835
|
throw new LLMServiceException(
|
|
791
836
|
`Model returned empty response. Finish Reason: ${candidate.finishReason}.`,
|
|
792
837
|
500
|
|
@@ -796,18 +841,14 @@ ${msg.content}`;
|
|
|
796
841
|
return {
|
|
797
842
|
content: textContent,
|
|
798
843
|
thought_signature: responseThoughtSignature,
|
|
799
|
-
// Return signature to caller
|
|
800
844
|
tool_calls: toolCalls ? (Array.isArray(toolCalls) ? toolCalls : [toolCalls]).map((fc) => ({
|
|
801
845
|
type: "function",
|
|
802
846
|
function: fc,
|
|
803
847
|
thought_signature: fc.thought_signature
|
|
804
848
|
})) : null,
|
|
805
849
|
finishReason: normalizedFinishReason,
|
|
806
|
-
// Standardized: 'completed', 'truncated', etc.
|
|
807
850
|
_rawFinishReason: candidate.finishReason,
|
|
808
|
-
// Keep original for debugging
|
|
809
851
|
_responseFormat: options.responseFormat,
|
|
810
|
-
// Return usage stats
|
|
811
852
|
usage: {
|
|
812
853
|
prompt_tokens: ((_e = response.usageMetadata) == null ? void 0 : _e.promptTokenCount) || 0,
|
|
813
854
|
completion_tokens: ((_f = response.usageMetadata) == null ? void 0 : _f.candidatesTokenCount) || 0,
|
|
@@ -833,7 +874,7 @@ ${msg.content}`;
|
|
|
833
874
|
if (schema) {
|
|
834
875
|
config.responseSchema = this._convertToGeminiSchema(schema);
|
|
835
876
|
} else {
|
|
836
|
-
console.warn(
|
|
877
|
+
console.warn(`[${this.constructor.name}] Using legacy JSON mode without schema - may produce markdown wrappers`);
|
|
837
878
|
}
|
|
838
879
|
}
|
|
839
880
|
}
|
|
@@ -891,8 +932,7 @@ ${msg.content}`;
|
|
|
891
932
|
if (!content) return null;
|
|
892
933
|
const parsed = extractJsonFromResponse(content);
|
|
893
934
|
if (!parsed) {
|
|
894
|
-
console.error(
|
|
895
|
-
console.error("[GeminiProvider] Content preview:", content.substring(0, 200));
|
|
935
|
+
console.error(`[${this.constructor.name}] Failed to extract valid JSON from response`);
|
|
896
936
|
}
|
|
897
937
|
return parsed;
|
|
898
938
|
}
|
|
@@ -922,9 +962,9 @@ ${msg.content}`;
|
|
|
922
962
|
toolResults.forEach((result) => messages.push({ role: "tool", tool_call_id: result.tool_call_id, content: result.output }));
|
|
923
963
|
}
|
|
924
964
|
async imageGeneration(prompt, systemPrompt, options = {}) {
|
|
925
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i
|
|
965
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
926
966
|
const modelName = options.model || this.models.image || "gemini-3-pro-image-preview";
|
|
927
|
-
console.log(`[
|
|
967
|
+
console.log(`[${this.constructor.name}] Generating image with model: ${modelName}`);
|
|
928
968
|
const hasReferenceImages = options.images && options.images.length > 0;
|
|
929
969
|
const generationConfig = {
|
|
930
970
|
responseModalities: hasReferenceImages ? ["TEXT", "IMAGE"] : ["IMAGE"]
|
|
@@ -956,7 +996,7 @@ ${msg.content}`;
|
|
|
956
996
|
if (systemPrompt) {
|
|
957
997
|
requestOptions.config.systemInstruction = { parts: [{ text: systemPrompt }] };
|
|
958
998
|
}
|
|
959
|
-
const response = await this.
|
|
999
|
+
const response = await this._generateContent(requestOptions);
|
|
960
1000
|
const imagePart = (_d = (_c = (_b = (_a = response.candidates) == null ? void 0 : _a[0]) == null ? void 0 : _b.content) == null ? void 0 : _c.parts) == null ? void 0 : _d.find(
|
|
961
1001
|
(part) => {
|
|
962
1002
|
var _a2;
|
|
@@ -964,30 +1004,21 @@ ${msg.content}`;
|
|
|
964
1004
|
}
|
|
965
1005
|
);
|
|
966
1006
|
if (!imagePart || !imagePart.inlineData) {
|
|
967
|
-
const
|
|
968
|
-
|
|
969
|
-
console.error("[GeminiProvider] Image generation failed (no image data)");
|
|
970
|
-
if (candidate) {
|
|
971
|
-
console.error("[GeminiProvider] Finish Reason:", candidate.finishReason);
|
|
972
|
-
console.error("[GeminiProvider] Safety Ratings:", JSON.stringify(candidate.safetyRatings, null, 2));
|
|
973
|
-
console.error("[GeminiProvider] Full Candidate:", JSON.stringify(candidate, null, 2));
|
|
974
|
-
}
|
|
975
|
-
if (textPart) {
|
|
976
|
-
console.warn("[GeminiProvider] Model returned text instead of image:", textPart.text);
|
|
977
|
-
}
|
|
1007
|
+
const candidate = (_e = response.candidates) == null ? void 0 : _e[0];
|
|
1008
|
+
console.error(`[${this.constructor.name}] Image generation failed (no image data)`);
|
|
978
1009
|
throw new Error(`No image data in response. Finish Reason: ${candidate == null ? void 0 : candidate.finishReason}`);
|
|
979
1010
|
}
|
|
980
1011
|
let thoughtSignature = null;
|
|
981
1012
|
if (imagePart.thought_signature || imagePart.thoughtSignature) {
|
|
982
1013
|
thoughtSignature = imagePart.thought_signature || imagePart.thoughtSignature;
|
|
983
1014
|
} else {
|
|
984
|
-
const signaturePart = (
|
|
1015
|
+
const signaturePart = (_i = (_h = (_g = (_f = response.candidates) == null ? void 0 : _f[0]) == null ? void 0 : _g.content) == null ? void 0 : _h.parts) == null ? void 0 : _i.find((p) => p.thought_signature || p.thoughtSignature);
|
|
985
1016
|
if (signaturePart) {
|
|
986
1017
|
thoughtSignature = signaturePart.thought_signature || signaturePart.thoughtSignature;
|
|
987
1018
|
}
|
|
988
1019
|
}
|
|
989
1020
|
if (thoughtSignature && thoughtSignature.length > 5e4) {
|
|
990
|
-
console.warn(`[
|
|
1021
|
+
console.warn(`[${this.constructor.name}] \u26A0\uFE0F Thought signature is abnormally large (${thoughtSignature.length} chars). Replacing with bypass token.`);
|
|
991
1022
|
thoughtSignature = "skip_thought_signature_validator";
|
|
992
1023
|
}
|
|
993
1024
|
return {
|
|
@@ -1010,31 +1041,20 @@ ${prompt}` : prompt;
|
|
|
1010
1041
|
durationSeconds: options.durationSeconds || 6,
|
|
1011
1042
|
aspectRatio: options.aspectRatio || "16:9",
|
|
1012
1043
|
numberOfVideos: 1,
|
|
1013
|
-
// Pass reference images if provided
|
|
1014
1044
|
...images && images.length > 0 ? { referenceImages: images } : {}
|
|
1015
1045
|
}
|
|
1016
1046
|
};
|
|
1017
|
-
const logConfig = JSON.parse(JSON.stringify(requestConfig));
|
|
1018
|
-
if (logConfig.config && logConfig.config.referenceImages) {
|
|
1019
|
-
logConfig.config.referenceImages = logConfig.config.referenceImages.map((img) => ({
|
|
1020
|
-
...img,
|
|
1021
|
-
data: `... (${img.data ? img.data.length : 0} bytes)`
|
|
1022
|
-
// Summarize data
|
|
1023
|
-
}));
|
|
1024
|
-
}
|
|
1025
|
-
console.log("[GeminiProvider] startVideoGeneration request:", JSON.stringify(logConfig, null, 2));
|
|
1026
1047
|
try {
|
|
1027
1048
|
const operation = await this.client.models.generateVideos(requestConfig);
|
|
1028
1049
|
this._pendingOperations.set(operation.name, operation);
|
|
1029
1050
|
return { operationName: operation.name };
|
|
1030
1051
|
} catch (error) {
|
|
1031
|
-
console.error(`[
|
|
1052
|
+
console.error(`[${this.constructor.name}] startVideoGeneration failed (API Key: ${this._getMaskedApiKey()}):`, error);
|
|
1032
1053
|
throw error;
|
|
1033
1054
|
}
|
|
1034
1055
|
}
|
|
1035
1056
|
async getVideoGenerationStatus(operationName) {
|
|
1036
1057
|
var _a, _b, _c, _d, _e, _f;
|
|
1037
|
-
console.log(`[GeminiProvider] Checking status for operation: ${operationName}`);
|
|
1038
1058
|
let operation = this._pendingOperations.get(operationName);
|
|
1039
1059
|
if (!operation) {
|
|
1040
1060
|
operation = await this.client.models.getOperation(operationName);
|
|
@@ -1046,11 +1066,9 @@ ${prompt}` : prompt;
|
|
|
1046
1066
|
progress: ((_a = operation.metadata) == null ? void 0 : _a.progressPercent) || 0,
|
|
1047
1067
|
state: ((_b = operation.metadata) == null ? void 0 : _b.state) || (operation.done ? "COMPLETED" : "PROCESSING")
|
|
1048
1068
|
};
|
|
1049
|
-
console.log(`[GeminiProvider] Operation status: ${result.state}, Progress: ${result.progress}%`);
|
|
1050
1069
|
if (operation.done) {
|
|
1051
1070
|
this._pendingOperations.delete(operationName);
|
|
1052
1071
|
if (operation.error) {
|
|
1053
|
-
console.error("[GeminiProvider] Video generation failed:", JSON.stringify(operation.error, null, 2));
|
|
1054
1072
|
result.error = operation.error;
|
|
1055
1073
|
} else {
|
|
1056
1074
|
const videoResult = operation.response;
|
|
@@ -1062,20 +1080,17 @@ ${prompt}` : prompt;
|
|
|
1062
1080
|
}
|
|
1063
1081
|
async startDeepResearch(prompt, options = {}) {
|
|
1064
1082
|
const agent = options.agent || "deep-research-pro-preview-12-2025";
|
|
1065
|
-
console.log(`[
|
|
1083
|
+
console.log(`[${this.constructor.name}] Starting Deep Research with agent: ${agent}`);
|
|
1066
1084
|
try {
|
|
1067
1085
|
const interaction = await this.client.interactions.create({
|
|
1068
1086
|
agent,
|
|
1069
1087
|
input: prompt,
|
|
1070
1088
|
background: true,
|
|
1071
|
-
// Required for long running
|
|
1072
1089
|
store: true
|
|
1073
|
-
// Required for polling
|
|
1074
1090
|
});
|
|
1075
|
-
console.log(`[GeminiProvider] Deep Research started. Interaction ID: ${interaction.id}`);
|
|
1076
1091
|
return { operationId: interaction.id };
|
|
1077
1092
|
} catch (error) {
|
|
1078
|
-
console.error(`[
|
|
1093
|
+
console.error(`[${this.constructor.name}] startDeepResearch failed:`, error);
|
|
1079
1094
|
throw error;
|
|
1080
1095
|
}
|
|
1081
1096
|
}
|
|
@@ -1094,18 +1109,10 @@ ${prompt}` : prompt;
|
|
|
1094
1109
|
}
|
|
1095
1110
|
return response;
|
|
1096
1111
|
} catch (error) {
|
|
1097
|
-
console.error(`[
|
|
1112
|
+
console.error(`[${this.constructor.name}] getDeepResearchStatus failed for ${operationId}:`, error);
|
|
1098
1113
|
throw error;
|
|
1099
1114
|
}
|
|
1100
1115
|
}
|
|
1101
|
-
/**
|
|
1102
|
-
* Extract structured data from a file (PDF, Image, etc.) using Gemini Multimodal capabilities.
|
|
1103
|
-
* @param {Buffer|string} fileData - Base64 string or Buffer of the file
|
|
1104
|
-
* @param {string} mimeType - Mime type (e.g., 'application/pdf', 'image/png')
|
|
1105
|
-
* @param {string} prompt - Extraction prompt
|
|
1106
|
-
* @param {Object} schema - JSON schema for the output
|
|
1107
|
-
* @param {Object} options - Additional options
|
|
1108
|
-
*/
|
|
1109
1116
|
async extractWithLLM(fileData, mimeType, prompt, schema = null, options = {}) {
|
|
1110
1117
|
var _a, _b, _c, _d;
|
|
1111
1118
|
const tier = options.tier || "default";
|
|
@@ -1117,9 +1124,7 @@ ${prompt}` : prompt;
|
|
|
1117
1124
|
maxTokens,
|
|
1118
1125
|
temperature
|
|
1119
1126
|
);
|
|
1120
|
-
const parts = [
|
|
1121
|
-
{ text: prompt }
|
|
1122
|
-
];
|
|
1127
|
+
const parts = [{ text: prompt }];
|
|
1123
1128
|
let base64Data = fileData;
|
|
1124
1129
|
if (typeof fileData !== "string") {
|
|
1125
1130
|
try {
|
|
@@ -1142,7 +1147,7 @@ ${prompt}` : prompt;
|
|
|
1142
1147
|
config: generationConfig
|
|
1143
1148
|
};
|
|
1144
1149
|
try {
|
|
1145
|
-
const response = await this.
|
|
1150
|
+
const response = await this._generateContent(requestOptions);
|
|
1146
1151
|
const candidate = (_a = response.candidates) == null ? void 0 : _a[0];
|
|
1147
1152
|
if (!candidate) {
|
|
1148
1153
|
throw new LLMServiceException("No candidates returned from model during extraction", 500);
|
|
@@ -1153,7 +1158,7 @@ ${prompt}` : prompt;
|
|
|
1153
1158
|
}
|
|
1154
1159
|
return textContent;
|
|
1155
1160
|
} catch (error) {
|
|
1156
|
-
console.error(`[
|
|
1161
|
+
console.error(`[${this.constructor.name}] extractWithLLM failed (API Key: ${this._getMaskedApiKey()}):`, error);
|
|
1157
1162
|
throw error;
|
|
1158
1163
|
}
|
|
1159
1164
|
}
|
|
@@ -1172,14 +1177,14 @@ var LLMService = class {
|
|
|
1172
1177
|
return this.providerCache.get(cacheKey);
|
|
1173
1178
|
}
|
|
1174
1179
|
const config = await ConfigManager.getConfig(tenantId, this.env);
|
|
1175
|
-
if (!config.apiKey) {
|
|
1180
|
+
if (!config.apiKey && config.provider !== "vertex") {
|
|
1176
1181
|
throw new LLMServiceException(`LLM service is not configured for ${config.provider}. Missing API Key.`, 500);
|
|
1177
1182
|
}
|
|
1178
1183
|
let provider;
|
|
1179
1184
|
if (config.provider === "openai") {
|
|
1180
1185
|
provider = new OpenAIProvider(config);
|
|
1181
|
-
} else if (config.provider === "gemini") {
|
|
1182
|
-
provider = new
|
|
1186
|
+
} else if (config.provider === "gemini" || config.provider === "vertex") {
|
|
1187
|
+
provider = new GoogleProvider(config);
|
|
1183
1188
|
} else {
|
|
1184
1189
|
throw new LLMServiceException(`Unsupported LLM provider: ${config.provider}`, 500);
|
|
1185
1190
|
}
|
|
@@ -1753,12 +1758,14 @@ function createSpeechHandler(app, getConfig) {
|
|
|
1753
1758
|
DefaultConfigProvider,
|
|
1754
1759
|
FINISH_REASONS,
|
|
1755
1760
|
GeminiProvider,
|
|
1761
|
+
GoogleProvider,
|
|
1756
1762
|
LLMService,
|
|
1757
1763
|
LLMServiceException,
|
|
1758
1764
|
MODEL_CONFIGS,
|
|
1759
1765
|
OpenAIProvider,
|
|
1760
1766
|
TranscriptionService,
|
|
1761
1767
|
TranscriptionServiceException,
|
|
1768
|
+
VertexProvider,
|
|
1762
1769
|
createSpeechHandler,
|
|
1763
1770
|
extractJsonFromResponse,
|
|
1764
1771
|
extractTextAndJson,
|