@iqai/adk 0.1.18 → 0.1.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +101 -93
- package/dist/index.d.ts +101 -93
- package/dist/index.js +845 -767
- package/dist/index.mjs +849 -771
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -1362,261 +1362,317 @@ var BaseLlm = class {
|
|
|
1362
1362
|
var BaseLLMConnection = class {
|
|
1363
1363
|
};
|
|
1364
1364
|
|
|
1365
|
-
// src/models/
|
|
1365
|
+
// src/models/ai-sdk.ts
|
|
1366
|
+
init_logger();
|
|
1366
1367
|
import {
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
var
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
_liveApiClient;
|
|
1375
|
-
_apiBackend;
|
|
1376
|
-
_trackingHeaders;
|
|
1368
|
+
generateText,
|
|
1369
|
+
jsonSchema,
|
|
1370
|
+
streamText
|
|
1371
|
+
} from "ai";
|
|
1372
|
+
var AiSdkLlm = class extends BaseLlm {
|
|
1373
|
+
modelInstance;
|
|
1374
|
+
logger = new Logger({ name: "AiSdkLlm" });
|
|
1377
1375
|
/**
|
|
1378
|
-
* Constructor
|
|
1376
|
+
* Constructor accepts a pre-configured LanguageModel instance
|
|
1377
|
+
* @param model - Pre-configured LanguageModel from provider(modelName)
|
|
1379
1378
|
*/
|
|
1380
|
-
constructor(
|
|
1381
|
-
super(model);
|
|
1379
|
+
constructor(modelInstance) {
|
|
1380
|
+
super(modelInstance.modelId || "ai-sdk-model");
|
|
1381
|
+
this.modelInstance = modelInstance;
|
|
1382
1382
|
}
|
|
1383
1383
|
/**
|
|
1384
|
-
*
|
|
1384
|
+
* Returns empty array - following Python ADK pattern
|
|
1385
1385
|
*/
|
|
1386
1386
|
static supportedModels() {
|
|
1387
|
-
return [
|
|
1388
|
-
"gemini-.*",
|
|
1389
|
-
// fine-tuned vertex endpoint pattern
|
|
1390
|
-
"projects/.+/locations/.+/endpoints/.+",
|
|
1391
|
-
// vertex gemini long name
|
|
1392
|
-
"projects/.+/locations/.+/publishers/google/models/gemini.+"
|
|
1393
|
-
];
|
|
1387
|
+
return [];
|
|
1394
1388
|
}
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
config
|
|
1408
|
-
}
|
|
1409
|
-
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
response = resp;
|
|
1415
|
-
const llmResponse = LlmResponse.create(resp);
|
|
1416
|
-
usageMetadata = llmResponse.usageMetadata;
|
|
1417
|
-
if (llmResponse.content?.parts?.[0]?.text) {
|
|
1418
|
-
const part0 = llmResponse.content.parts[0];
|
|
1419
|
-
if (part0.thought) {
|
|
1420
|
-
thoughtText += part0.text;
|
|
1421
|
-
} else {
|
|
1422
|
-
text += part0.text;
|
|
1423
|
-
}
|
|
1424
|
-
llmResponse.partial = true;
|
|
1425
|
-
} else if ((thoughtText || text) && (!llmResponse.content || !llmResponse.content.parts || !this.hasInlineData(resp))) {
|
|
1426
|
-
const parts = [];
|
|
1427
|
-
if (thoughtText) {
|
|
1428
|
-
parts.push({ text: thoughtText, thought: true });
|
|
1429
|
-
}
|
|
1430
|
-
if (text) {
|
|
1431
|
-
parts.push({ text });
|
|
1432
|
-
}
|
|
1389
|
+
async *generateContentAsyncImpl(request, stream = false) {
|
|
1390
|
+
try {
|
|
1391
|
+
const messages = this.convertToAiSdkMessages(request);
|
|
1392
|
+
const systemMessage = request.getSystemInstructionText();
|
|
1393
|
+
const tools = this.convertToAiSdkTools(request);
|
|
1394
|
+
const requestParams = {
|
|
1395
|
+
model: this.modelInstance,
|
|
1396
|
+
messages,
|
|
1397
|
+
system: systemMessage,
|
|
1398
|
+
tools: Object.keys(tools).length > 0 ? tools : void 0,
|
|
1399
|
+
maxTokens: request.config?.maxOutputTokens,
|
|
1400
|
+
temperature: request.config?.temperature,
|
|
1401
|
+
topP: request.config?.topP
|
|
1402
|
+
};
|
|
1403
|
+
if (stream) {
|
|
1404
|
+
const result = streamText(requestParams);
|
|
1405
|
+
let accumulatedText = "";
|
|
1406
|
+
for await (const delta of result.textStream) {
|
|
1407
|
+
accumulatedText += delta;
|
|
1433
1408
|
yield new LlmResponse({
|
|
1434
1409
|
content: {
|
|
1435
|
-
|
|
1436
|
-
|
|
1410
|
+
role: "model",
|
|
1411
|
+
parts: [{ text: accumulatedText }]
|
|
1437
1412
|
},
|
|
1438
|
-
|
|
1413
|
+
partial: true
|
|
1439
1414
|
});
|
|
1440
|
-
thoughtText = "";
|
|
1441
|
-
text = "";
|
|
1442
1415
|
}
|
|
1443
|
-
|
|
1444
|
-
}
|
|
1445
|
-
if ((text || thoughtText) && response && response.candidates && response.candidates[0]?.finishReason === FinishReason.STOP) {
|
|
1416
|
+
const toolCalls = await result.toolCalls;
|
|
1446
1417
|
const parts = [];
|
|
1447
|
-
if (
|
|
1448
|
-
parts.push({ text:
|
|
1418
|
+
if (accumulatedText) {
|
|
1419
|
+
parts.push({ text: accumulatedText });
|
|
1449
1420
|
}
|
|
1450
|
-
if (
|
|
1451
|
-
|
|
1421
|
+
if (toolCalls && toolCalls.length > 0) {
|
|
1422
|
+
for (const toolCall of toolCalls) {
|
|
1423
|
+
parts.push({
|
|
1424
|
+
functionCall: {
|
|
1425
|
+
id: toolCall.toolCallId,
|
|
1426
|
+
name: toolCall.toolName,
|
|
1427
|
+
args: toolCall.args
|
|
1428
|
+
}
|
|
1429
|
+
});
|
|
1430
|
+
}
|
|
1452
1431
|
}
|
|
1432
|
+
const finalUsage = await result.usage;
|
|
1433
|
+
const finishReason = await result.finishReason;
|
|
1453
1434
|
yield new LlmResponse({
|
|
1454
1435
|
content: {
|
|
1455
|
-
|
|
1456
|
-
|
|
1436
|
+
role: "model",
|
|
1437
|
+
parts: parts.length > 0 ? parts : [{ text: "" }]
|
|
1457
1438
|
},
|
|
1458
|
-
usageMetadata
|
|
1439
|
+
usageMetadata: finalUsage ? {
|
|
1440
|
+
promptTokenCount: finalUsage.promptTokens,
|
|
1441
|
+
candidatesTokenCount: finalUsage.completionTokens,
|
|
1442
|
+
totalTokenCount: finalUsage.totalTokens
|
|
1443
|
+
} : void 0,
|
|
1444
|
+
finishReason: this.mapFinishReason(finishReason),
|
|
1445
|
+
turnComplete: true
|
|
1446
|
+
});
|
|
1447
|
+
} else {
|
|
1448
|
+
const result = await generateText(requestParams);
|
|
1449
|
+
const parts = [];
|
|
1450
|
+
if (result.text) {
|
|
1451
|
+
parts.push({ text: result.text });
|
|
1452
|
+
}
|
|
1453
|
+
if (result.toolCalls && result.toolCalls.length > 0) {
|
|
1454
|
+
for (const toolCall of result.toolCalls) {
|
|
1455
|
+
parts.push({
|
|
1456
|
+
functionCall: {
|
|
1457
|
+
id: toolCall.toolCallId,
|
|
1458
|
+
name: toolCall.toolName,
|
|
1459
|
+
args: toolCall.args
|
|
1460
|
+
}
|
|
1461
|
+
});
|
|
1462
|
+
}
|
|
1463
|
+
}
|
|
1464
|
+
yield new LlmResponse({
|
|
1465
|
+
content: {
|
|
1466
|
+
role: "model",
|
|
1467
|
+
parts: parts.length > 0 ? parts : [{ text: "" }]
|
|
1468
|
+
},
|
|
1469
|
+
usageMetadata: result.usage ? {
|
|
1470
|
+
promptTokenCount: result.usage.promptTokens,
|
|
1471
|
+
candidatesTokenCount: result.usage.completionTokens,
|
|
1472
|
+
totalTokenCount: result.usage.totalTokens
|
|
1473
|
+
} : void 0,
|
|
1474
|
+
finishReason: this.mapFinishReason(result.finishReason),
|
|
1475
|
+
turnComplete: true
|
|
1459
1476
|
});
|
|
1460
1477
|
}
|
|
1461
|
-
}
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
|
|
1478
|
+
} catch (error) {
|
|
1479
|
+
this.logger.error(`AI SDK Error: ${String(error)}`, { error, request });
|
|
1480
|
+
yield LlmResponse.fromError(error, {
|
|
1481
|
+
errorCode: "AI_SDK_ERROR",
|
|
1482
|
+
model: this.model
|
|
1466
1483
|
});
|
|
1467
|
-
const llmResponse = LlmResponse.create(response);
|
|
1468
|
-
this.logger.debug(
|
|
1469
|
-
`Google response: ${llmResponse.usageMetadata?.candidatesTokenCount || 0} tokens`
|
|
1470
|
-
);
|
|
1471
|
-
yield llmResponse;
|
|
1472
1484
|
}
|
|
1473
1485
|
}
|
|
1474
1486
|
/**
|
|
1475
|
-
*
|
|
1476
|
-
*/
|
|
1477
|
-
connect(_llmRequest) {
|
|
1478
|
-
throw new Error(`Live connection is not supported for ${this.model}.`);
|
|
1479
|
-
}
|
|
1480
|
-
/**
|
|
1481
|
-
* Check if response has inline data
|
|
1487
|
+
* Convert ADK LlmRequest to AI SDK CoreMessage format
|
|
1482
1488
|
*/
|
|
1483
|
-
|
|
1484
|
-
const
|
|
1485
|
-
|
|
1489
|
+
convertToAiSdkMessages(llmRequest) {
|
|
1490
|
+
const messages = [];
|
|
1491
|
+
for (const content of llmRequest.contents || []) {
|
|
1492
|
+
const message = this.contentToAiSdkMessage(content);
|
|
1493
|
+
if (message) {
|
|
1494
|
+
messages.push(message);
|
|
1495
|
+
}
|
|
1496
|
+
}
|
|
1497
|
+
return messages;
|
|
1486
1498
|
}
|
|
1487
1499
|
/**
|
|
1488
|
-
*
|
|
1500
|
+
* Transform JSON schema to use lowercase types for AI SDK compatibility
|
|
1489
1501
|
*/
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1502
|
+
transformSchemaForAiSdk(schema) {
|
|
1503
|
+
if (Array.isArray(schema)) {
|
|
1504
|
+
return schema.map((item) => this.transformSchemaForAiSdk(item));
|
|
1505
|
+
}
|
|
1506
|
+
if (!schema || typeof schema !== "object") {
|
|
1507
|
+
return schema;
|
|
1508
|
+
}
|
|
1509
|
+
const transformedSchema = { ...schema };
|
|
1510
|
+
if (transformedSchema.type && typeof transformedSchema.type === "string") {
|
|
1511
|
+
transformedSchema.type = transformedSchema.type.toLowerCase();
|
|
1512
|
+
}
|
|
1513
|
+
if (transformedSchema.properties) {
|
|
1514
|
+
transformedSchema.properties = Object.fromEntries(
|
|
1515
|
+
Object.entries(transformedSchema.properties).map(([key, value]) => [
|
|
1516
|
+
key,
|
|
1517
|
+
this.transformSchemaForAiSdk(value)
|
|
1518
|
+
])
|
|
1519
|
+
);
|
|
1520
|
+
}
|
|
1521
|
+
if (transformedSchema.items) {
|
|
1522
|
+
transformedSchema.items = this.transformSchemaForAiSdk(
|
|
1523
|
+
transformedSchema.items
|
|
1524
|
+
);
|
|
1525
|
+
}
|
|
1526
|
+
const arrayKeywords = ["anyOf", "oneOf", "allOf"];
|
|
1527
|
+
for (const keyword of arrayKeywords) {
|
|
1528
|
+
if (transformedSchema[keyword]) {
|
|
1529
|
+
transformedSchema[keyword] = this.transformSchemaForAiSdk(
|
|
1530
|
+
transformedSchema[keyword]
|
|
1531
|
+
);
|
|
1532
|
+
}
|
|
1533
|
+
}
|
|
1534
|
+
return transformedSchema;
|
|
1495
1535
|
}
|
|
1496
1536
|
/**
|
|
1497
|
-
*
|
|
1537
|
+
* Convert ADK tools to AI SDK tools format
|
|
1498
1538
|
*/
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1504
|
-
|
|
1505
|
-
|
|
1506
|
-
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
|
|
1539
|
+
convertToAiSdkTools(llmRequest) {
|
|
1540
|
+
const tools = {};
|
|
1541
|
+
if (llmRequest.config?.tools) {
|
|
1542
|
+
for (const toolConfig of llmRequest.config.tools) {
|
|
1543
|
+
if ("functionDeclarations" in toolConfig) {
|
|
1544
|
+
for (const funcDecl of toolConfig.functionDeclarations) {
|
|
1545
|
+
tools[funcDecl.name] = {
|
|
1546
|
+
description: funcDecl.description,
|
|
1547
|
+
parameters: jsonSchema(
|
|
1548
|
+
this.transformSchemaForAiSdk(funcDecl.parameters || {})
|
|
1549
|
+
)
|
|
1550
|
+
};
|
|
1510
1551
|
}
|
|
1511
1552
|
}
|
|
1512
1553
|
}
|
|
1513
1554
|
}
|
|
1555
|
+
return tools;
|
|
1514
1556
|
}
|
|
1515
1557
|
/**
|
|
1516
|
-
*
|
|
1558
|
+
* Convert ADK Content to AI SDK CoreMessage
|
|
1517
1559
|
*/
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
|
|
1560
|
+
contentToAiSdkMessage(content) {
|
|
1561
|
+
const role = this.mapRole(content.role);
|
|
1562
|
+
if (!content.parts || content.parts.length === 0) {
|
|
1563
|
+
return null;
|
|
1521
1564
|
}
|
|
1522
|
-
|
|
1523
|
-
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1565
|
+
if (content.parts.length === 1 && content.parts[0].text) {
|
|
1566
|
+
const textContent = content.parts[0].text;
|
|
1567
|
+
if (role === "system") {
|
|
1568
|
+
return { role: "system", content: textContent };
|
|
1569
|
+
}
|
|
1570
|
+
if (role === "assistant") {
|
|
1571
|
+
return { role: "assistant", content: textContent };
|
|
1572
|
+
}
|
|
1573
|
+
return { role: "user", content: textContent };
|
|
1530
1574
|
}
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1575
|
+
if (content.parts?.some((part) => part.functionCall)) {
|
|
1576
|
+
const textParts = content.parts.filter((part) => part.text);
|
|
1577
|
+
const functionCalls = content.parts.filter((part) => part.functionCall);
|
|
1578
|
+
const contentParts2 = [];
|
|
1579
|
+
for (const textPart of textParts) {
|
|
1580
|
+
if (textPart.text) {
|
|
1581
|
+
contentParts2.push({
|
|
1582
|
+
type: "text",
|
|
1583
|
+
text: textPart.text
|
|
1584
|
+
});
|
|
1585
|
+
}
|
|
1586
|
+
}
|
|
1587
|
+
for (const funcPart of functionCalls) {
|
|
1588
|
+
if (funcPart.functionCall) {
|
|
1589
|
+
contentParts2.push({
|
|
1590
|
+
type: "tool-call",
|
|
1591
|
+
toolCallId: funcPart.functionCall.id,
|
|
1592
|
+
toolName: funcPart.functionCall.name,
|
|
1593
|
+
args: funcPart.functionCall.args
|
|
1594
|
+
});
|
|
1595
|
+
}
|
|
1596
|
+
}
|
|
1597
|
+
return {
|
|
1598
|
+
role: "assistant",
|
|
1599
|
+
content: contentParts2
|
|
1600
|
+
};
|
|
1601
|
+
}
|
|
1602
|
+
if (content.parts?.some((part) => part.functionResponse)) {
|
|
1603
|
+
const functionResponses = content.parts.filter(
|
|
1604
|
+
(part) => part.functionResponse
|
|
1605
|
+
);
|
|
1606
|
+
const contentParts2 = functionResponses.map((part) => ({
|
|
1607
|
+
type: "tool-result",
|
|
1608
|
+
toolCallId: part.functionResponse.id,
|
|
1609
|
+
toolName: part.functionResponse.name || "unknown",
|
|
1610
|
+
result: part.functionResponse.response
|
|
1611
|
+
}));
|
|
1612
|
+
return {
|
|
1613
|
+
role: "tool",
|
|
1614
|
+
content: contentParts2
|
|
1615
|
+
};
|
|
1616
|
+
}
|
|
1617
|
+
const contentParts = [];
|
|
1618
|
+
for (const part of content.parts) {
|
|
1619
|
+
if (part.text) {
|
|
1620
|
+
contentParts.push({
|
|
1621
|
+
type: "text",
|
|
1622
|
+
text: part.text
|
|
1551
1623
|
});
|
|
1552
|
-
} else {
|
|
1553
|
-
throw new Error(
|
|
1554
|
-
"Google API Key or Vertex AI configuration is required. Set GOOGLE_API_KEY or GOOGLE_GENAI_USE_VERTEXAI=true with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION."
|
|
1555
|
-
);
|
|
1556
1624
|
}
|
|
1557
1625
|
}
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
/**
|
|
1561
|
-
* Gets the API backend type.
|
|
1562
|
-
*/
|
|
1563
|
-
get apiBackend() {
|
|
1564
|
-
if (!this._apiBackend) {
|
|
1565
|
-
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
|
|
1566
|
-
this._apiBackend = useVertexAI ? "VERTEX_AI" /* VERTEX_AI */ : "GEMINI_API" /* GEMINI_API */;
|
|
1626
|
+
if (contentParts.length === 0) {
|
|
1627
|
+
return null;
|
|
1567
1628
|
}
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
*/
|
|
1573
|
-
get trackingHeaders() {
|
|
1574
|
-
if (!this._trackingHeaders) {
|
|
1575
|
-
let frameworkLabel = "google-adk/1.0.0";
|
|
1576
|
-
if (process.env[AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME]) {
|
|
1577
|
-
frameworkLabel = `${frameworkLabel}+${AGENT_ENGINE_TELEMETRY_TAG}`;
|
|
1629
|
+
if (contentParts.length === 1) {
|
|
1630
|
+
const textContent = contentParts[0].text;
|
|
1631
|
+
if (role === "system") {
|
|
1632
|
+
return { role: "system", content: textContent };
|
|
1578
1633
|
}
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
"user-agent": versionHeaderValue
|
|
1584
|
-
};
|
|
1634
|
+
if (role === "assistant") {
|
|
1635
|
+
return { role: "assistant", content: textContent };
|
|
1636
|
+
}
|
|
1637
|
+
return { role: "user", content: textContent };
|
|
1585
1638
|
}
|
|
1586
|
-
|
|
1639
|
+
if (role === "system") {
|
|
1640
|
+
const combinedText = contentParts.map((p) => p.text).join("");
|
|
1641
|
+
return { role: "system", content: combinedText };
|
|
1642
|
+
}
|
|
1643
|
+
if (role === "assistant") {
|
|
1644
|
+
return { role: "assistant", content: contentParts };
|
|
1645
|
+
}
|
|
1646
|
+
return { role: "user", content: contentParts };
|
|
1587
1647
|
}
|
|
1588
1648
|
/**
|
|
1589
|
-
*
|
|
1649
|
+
* Map ADK role to AI SDK role
|
|
1590
1650
|
*/
|
|
1591
|
-
|
|
1592
|
-
|
|
1651
|
+
mapRole(role) {
|
|
1652
|
+
switch (role) {
|
|
1653
|
+
case "model":
|
|
1654
|
+
case "assistant":
|
|
1655
|
+
return "assistant";
|
|
1656
|
+
case "system":
|
|
1657
|
+
return "system";
|
|
1658
|
+
default:
|
|
1659
|
+
return "user";
|
|
1660
|
+
}
|
|
1593
1661
|
}
|
|
1594
1662
|
/**
|
|
1595
|
-
*
|
|
1663
|
+
* Map AI SDK finish reason to ADK finish reason
|
|
1596
1664
|
*/
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
location,
|
|
1608
|
-
apiVersion: this.liveApiVersion
|
|
1609
|
-
});
|
|
1610
|
-
} else if (apiKey) {
|
|
1611
|
-
this._liveApiClient = new GoogleGenAI({
|
|
1612
|
-
apiKey,
|
|
1613
|
-
apiVersion: this.liveApiVersion
|
|
1614
|
-
});
|
|
1615
|
-
} else {
|
|
1616
|
-
throw new Error("API configuration required for live client");
|
|
1617
|
-
}
|
|
1665
|
+
mapFinishReason(finishReason) {
|
|
1666
|
+
switch (finishReason) {
|
|
1667
|
+
case "stop":
|
|
1668
|
+
case "end_of_message":
|
|
1669
|
+
return "STOP";
|
|
1670
|
+
case "length":
|
|
1671
|
+
case "max_tokens":
|
|
1672
|
+
return "MAX_TOKENS";
|
|
1673
|
+
default:
|
|
1674
|
+
return "FINISH_REASON_UNSPECIFIED";
|
|
1618
1675
|
}
|
|
1619
|
-
return this._liveApiClient;
|
|
1620
1676
|
}
|
|
1621
1677
|
};
|
|
1622
1678
|
|
|
@@ -1844,21 +1900,35 @@ var AnthropicLlm = class extends BaseLlm {
|
|
|
1844
1900
|
}
|
|
1845
1901
|
};
|
|
1846
1902
|
|
|
1847
|
-
// src/models/
|
|
1848
|
-
import
|
|
1849
|
-
|
|
1850
|
-
|
|
1903
|
+
// src/models/google-llm.ts
|
|
1904
|
+
import {
|
|
1905
|
+
FinishReason,
|
|
1906
|
+
GoogleGenAI
|
|
1907
|
+
} from "@google/genai";
|
|
1908
|
+
var AGENT_ENGINE_TELEMETRY_TAG = "remote_reasoning_engine";
|
|
1909
|
+
var AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID";
|
|
1910
|
+
var GoogleLlm = class extends BaseLlm {
|
|
1911
|
+
_apiClient;
|
|
1912
|
+
_liveApiClient;
|
|
1913
|
+
_apiBackend;
|
|
1914
|
+
_trackingHeaders;
|
|
1851
1915
|
/**
|
|
1852
|
-
* Constructor for
|
|
1916
|
+
* Constructor for Gemini
|
|
1853
1917
|
*/
|
|
1854
|
-
constructor(model = "
|
|
1918
|
+
constructor(model = "gemini-1.5-flash") {
|
|
1855
1919
|
super(model);
|
|
1856
1920
|
}
|
|
1857
1921
|
/**
|
|
1858
|
-
* Provides the list of supported models
|
|
1922
|
+
* Provides the list of supported models.
|
|
1859
1923
|
*/
|
|
1860
1924
|
static supportedModels() {
|
|
1861
|
-
return [
|
|
1925
|
+
return [
|
|
1926
|
+
"gemini-.*",
|
|
1927
|
+
// fine-tuned vertex endpoint pattern
|
|
1928
|
+
"projects/.+/locations/.+/endpoints/.+",
|
|
1929
|
+
// vertex gemini long name
|
|
1930
|
+
"projects/.+/locations/.+/publishers/google/models/gemini.+"
|
|
1931
|
+
];
|
|
1862
1932
|
}
|
|
1863
1933
|
/**
|
|
1864
1934
|
* Main content generation method - handles both streaming and non-streaming
|
|
@@ -1866,59 +1936,31 @@ var OpenAiLlm = class extends BaseLlm {
|
|
|
1866
1936
|
async *generateContentAsyncImpl(llmRequest, stream = false) {
|
|
1867
1937
|
this.preprocessRequest(llmRequest);
|
|
1868
1938
|
const model = llmRequest.model || this.model;
|
|
1869
|
-
const
|
|
1870
|
-
|
|
1871
|
-
);
|
|
1872
|
-
let tools;
|
|
1873
|
-
if (llmRequest.config?.tools?.[0]?.functionDeclarations) {
|
|
1874
|
-
tools = llmRequest.config.tools[0].functionDeclarations.map(
|
|
1875
|
-
(funcDecl) => this.functionDeclarationToOpenAiTool(funcDecl)
|
|
1876
|
-
);
|
|
1877
|
-
}
|
|
1878
|
-
const systemContent = llmRequest.getSystemInstructionText();
|
|
1879
|
-
if (systemContent) {
|
|
1880
|
-
messages.unshift({
|
|
1881
|
-
role: "system",
|
|
1882
|
-
content: systemContent
|
|
1883
|
-
});
|
|
1884
|
-
}
|
|
1885
|
-
const openAiMessages = messages;
|
|
1886
|
-
const requestParams = {
|
|
1887
|
-
model,
|
|
1888
|
-
messages: openAiMessages,
|
|
1889
|
-
tools,
|
|
1890
|
-
tool_choice: tools ? "auto" : void 0,
|
|
1891
|
-
max_tokens: llmRequest.config?.maxOutputTokens,
|
|
1892
|
-
temperature: llmRequest.config?.temperature,
|
|
1893
|
-
top_p: llmRequest.config?.topP,
|
|
1894
|
-
stream
|
|
1895
|
-
};
|
|
1939
|
+
const contents = this.convertContents(llmRequest.contents || []);
|
|
1940
|
+
const config = llmRequest.config;
|
|
1896
1941
|
if (stream) {
|
|
1897
|
-
const
|
|
1898
|
-
|
|
1899
|
-
|
|
1942
|
+
const responses = await this.apiClient.models.generateContentStream({
|
|
1943
|
+
model,
|
|
1944
|
+
contents,
|
|
1945
|
+
config
|
|
1900
1946
|
});
|
|
1947
|
+
let response = null;
|
|
1901
1948
|
let thoughtText = "";
|
|
1902
1949
|
let text = "";
|
|
1903
|
-
let usageMetadata;
|
|
1904
|
-
const
|
|
1905
|
-
|
|
1906
|
-
const
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
usageMetadata = chunk.usage;
|
|
1912
|
-
}
|
|
1913
|
-
if (llmResponse.content?.parts?.[0]?.text) {
|
|
1914
|
-
const part0 = llmResponse.content.parts[0];
|
|
1915
|
-
if (part0.thought) {
|
|
1950
|
+
let usageMetadata = null;
|
|
1951
|
+
for await (const resp of responses) {
|
|
1952
|
+
response = resp;
|
|
1953
|
+
const llmResponse = LlmResponse.create(resp);
|
|
1954
|
+
usageMetadata = llmResponse.usageMetadata;
|
|
1955
|
+
if (llmResponse.content?.parts?.[0]?.text) {
|
|
1956
|
+
const part0 = llmResponse.content.parts[0];
|
|
1957
|
+
if (part0.thought) {
|
|
1916
1958
|
thoughtText += part0.text;
|
|
1917
1959
|
} else {
|
|
1918
1960
|
text += part0.text;
|
|
1919
1961
|
}
|
|
1920
1962
|
llmResponse.partial = true;
|
|
1921
|
-
} else if ((thoughtText || text) && (!llmResponse.content || !llmResponse.content.parts || !this.hasInlineData(
|
|
1963
|
+
} else if ((thoughtText || text) && (!llmResponse.content || !llmResponse.content.parts || !this.hasInlineData(resp))) {
|
|
1922
1964
|
const parts = [];
|
|
1923
1965
|
if (thoughtText) {
|
|
1924
1966
|
parts.push({ text: thoughtText, thought: true });
|
|
@@ -1931,73 +1973,14 @@ var OpenAiLlm = class extends BaseLlm {
|
|
|
1931
1973
|
parts,
|
|
1932
1974
|
role: "model"
|
|
1933
1975
|
},
|
|
1934
|
-
usageMetadata
|
|
1935
|
-
promptTokenCount: usageMetadata.prompt_tokens,
|
|
1936
|
-
candidatesTokenCount: usageMetadata.completion_tokens,
|
|
1937
|
-
totalTokenCount: usageMetadata.total_tokens
|
|
1938
|
-
} : void 0
|
|
1976
|
+
usageMetadata
|
|
1939
1977
|
});
|
|
1940
1978
|
thoughtText = "";
|
|
1941
1979
|
text = "";
|
|
1942
1980
|
}
|
|
1943
|
-
|
|
1944
|
-
for (const toolCall of delta.tool_calls) {
|
|
1945
|
-
const index = toolCall.index || 0;
|
|
1946
|
-
if (!accumulatedToolCalls[index]) {
|
|
1947
|
-
accumulatedToolCalls[index] = {
|
|
1948
|
-
index,
|
|
1949
|
-
id: toolCall.id || "",
|
|
1950
|
-
type: "function",
|
|
1951
|
-
function: { name: "", arguments: "" }
|
|
1952
|
-
};
|
|
1953
|
-
}
|
|
1954
|
-
if (toolCall.function?.name) {
|
|
1955
|
-
accumulatedToolCalls[index].function.name += toolCall.function.name;
|
|
1956
|
-
}
|
|
1957
|
-
if (toolCall.function?.arguments) {
|
|
1958
|
-
accumulatedToolCalls[index].function.arguments += toolCall.function.arguments;
|
|
1959
|
-
}
|
|
1960
|
-
}
|
|
1961
|
-
}
|
|
1962
|
-
if (choice.finish_reason) {
|
|
1963
|
-
const parts = [];
|
|
1964
|
-
if (thoughtText) {
|
|
1965
|
-
parts.push({ text: thoughtText, thought: true });
|
|
1966
|
-
}
|
|
1967
|
-
if (text) {
|
|
1968
|
-
parts.push({ text });
|
|
1969
|
-
}
|
|
1970
|
-
if (accumulatedToolCalls.length > 0) {
|
|
1971
|
-
for (const toolCall of accumulatedToolCalls) {
|
|
1972
|
-
if (toolCall.function?.name) {
|
|
1973
|
-
parts.push({
|
|
1974
|
-
functionCall: {
|
|
1975
|
-
id: toolCall.id,
|
|
1976
|
-
name: toolCall.function.name,
|
|
1977
|
-
args: JSON.parse(toolCall.function.arguments || "{}")
|
|
1978
|
-
}
|
|
1979
|
-
});
|
|
1980
|
-
}
|
|
1981
|
-
}
|
|
1982
|
-
}
|
|
1983
|
-
const finalResponse = new LlmResponse({
|
|
1984
|
-
content: {
|
|
1985
|
-
role: "model",
|
|
1986
|
-
parts
|
|
1987
|
-
},
|
|
1988
|
-
usageMetadata: usageMetadata ? {
|
|
1989
|
-
promptTokenCount: usageMetadata.prompt_tokens,
|
|
1990
|
-
candidatesTokenCount: usageMetadata.completion_tokens,
|
|
1991
|
-
totalTokenCount: usageMetadata.total_tokens
|
|
1992
|
-
} : void 0,
|
|
1993
|
-
finishReason: this.toAdkFinishReason(choice.finish_reason)
|
|
1994
|
-
});
|
|
1995
|
-
yield finalResponse;
|
|
1996
|
-
} else {
|
|
1997
|
-
yield llmResponse;
|
|
1998
|
-
}
|
|
1981
|
+
yield llmResponse;
|
|
1999
1982
|
}
|
|
2000
|
-
if ((text || thoughtText) &&
|
|
1983
|
+
if ((text || thoughtText) && response && response.candidates && response.candidates[0]?.finishReason === FinishReason.STOP) {
|
|
2001
1984
|
const parts = [];
|
|
2002
1985
|
if (thoughtText) {
|
|
2003
1986
|
parts.push({ text: thoughtText, thought: true });
|
|
@@ -2010,560 +1993,655 @@ var OpenAiLlm = class extends BaseLlm {
|
|
|
2010
1993
|
parts,
|
|
2011
1994
|
role: "model"
|
|
2012
1995
|
},
|
|
2013
|
-
usageMetadata
|
|
2014
|
-
promptTokenCount: usageMetadata.prompt_tokens,
|
|
2015
|
-
candidatesTokenCount: usageMetadata.completion_tokens,
|
|
2016
|
-
totalTokenCount: usageMetadata.total_tokens
|
|
2017
|
-
}
|
|
1996
|
+
usageMetadata
|
|
2018
1997
|
});
|
|
2019
1998
|
}
|
|
2020
1999
|
} else {
|
|
2021
|
-
const response = await this.
|
|
2022
|
-
|
|
2023
|
-
|
|
2000
|
+
const response = await this.apiClient.models.generateContent({
|
|
2001
|
+
model,
|
|
2002
|
+
contents,
|
|
2003
|
+
config
|
|
2024
2004
|
});
|
|
2025
|
-
const
|
|
2026
|
-
|
|
2027
|
-
|
|
2028
|
-
|
|
2029
|
-
|
|
2030
|
-
);
|
|
2031
|
-
this.logger.debug(
|
|
2032
|
-
`OpenAI response: ${response.usage?.completion_tokens || 0} tokens`
|
|
2033
|
-
);
|
|
2034
|
-
yield llmResponse;
|
|
2035
|
-
}
|
|
2005
|
+
const llmResponse = LlmResponse.create(response);
|
|
2006
|
+
this.logger.debug(
|
|
2007
|
+
`Google response: ${llmResponse.usageMetadata?.candidatesTokenCount || 0} tokens`
|
|
2008
|
+
);
|
|
2009
|
+
yield llmResponse;
|
|
2036
2010
|
}
|
|
2037
2011
|
}
|
|
2038
2012
|
/**
|
|
2039
|
-
*
|
|
2013
|
+
* Connects to the Gemini model and returns an llm connection.
|
|
2040
2014
|
*/
|
|
2041
2015
|
connect(_llmRequest) {
|
|
2042
2016
|
throw new Error(`Live connection is not supported for ${this.model}.`);
|
|
2043
2017
|
}
|
|
2044
2018
|
/**
|
|
2045
|
-
*
|
|
2019
|
+
* Check if response has inline data
|
|
2046
2020
|
*/
|
|
2047
|
-
|
|
2048
|
-
const parts = [];
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2021
|
+
hasInlineData(response) {
|
|
2022
|
+
const parts = response.candidates?.[0]?.content?.parts;
|
|
2023
|
+
return parts?.some((part) => part?.inlineData) || false;
|
|
2024
|
+
}
|
|
2025
|
+
/**
|
|
2026
|
+
* Convert LlmRequest contents to GoogleGenAI format
|
|
2027
|
+
*/
|
|
2028
|
+
convertContents(contents) {
|
|
2029
|
+
return contents.map((content) => ({
|
|
2030
|
+
role: content.role === "assistant" ? "model" : content.role,
|
|
2031
|
+
parts: content.parts || [{ text: content.content || "" }]
|
|
2032
|
+
}));
|
|
2033
|
+
}
|
|
2034
|
+
/**
|
|
2035
|
+
* Preprocesses the request based on the API backend.
|
|
2036
|
+
*/
|
|
2037
|
+
preprocessRequest(llmRequest) {
|
|
2038
|
+
if (this.apiBackend === "GEMINI_API" /* GEMINI_API */) {
|
|
2039
|
+
if (llmRequest.config) {
|
|
2040
|
+
llmRequest.config.labels = void 0;
|
|
2055
2041
|
}
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
name: toolCall.function.name,
|
|
2064
|
-
args: JSON.parse(toolCall.function.arguments || "{}")
|
|
2065
|
-
}
|
|
2066
|
-
});
|
|
2042
|
+
if (llmRequest.contents) {
|
|
2043
|
+
for (const content of llmRequest.contents) {
|
|
2044
|
+
if (!content.parts) continue;
|
|
2045
|
+
for (const part of content.parts) {
|
|
2046
|
+
this.removeDisplayNameIfPresent(part.inlineData);
|
|
2047
|
+
this.removeDisplayNameIfPresent(part.fileData);
|
|
2048
|
+
}
|
|
2067
2049
|
}
|
|
2068
2050
|
}
|
|
2069
2051
|
}
|
|
2070
|
-
return new LlmResponse({
|
|
2071
|
-
content: parts.length > 0 ? {
|
|
2072
|
-
role: "model",
|
|
2073
|
-
parts
|
|
2074
|
-
} : void 0,
|
|
2075
|
-
usageMetadata: usage ? {
|
|
2076
|
-
promptTokenCount: usage.prompt_tokens,
|
|
2077
|
-
candidatesTokenCount: usage.completion_tokens,
|
|
2078
|
-
totalTokenCount: usage.total_tokens
|
|
2079
|
-
} : void 0
|
|
2080
|
-
});
|
|
2081
2052
|
}
|
|
2082
2053
|
/**
|
|
2083
|
-
*
|
|
2054
|
+
* Sets display_name to null for the Gemini API (non-Vertex) backend.
|
|
2084
2055
|
*/
|
|
2085
|
-
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
if (message.content) {
|
|
2089
|
-
parts.push({ text: message.content });
|
|
2056
|
+
removeDisplayNameIfPresent(dataObj) {
|
|
2057
|
+
if (dataObj?.displayName) {
|
|
2058
|
+
dataObj.displayName = null;
|
|
2090
2059
|
}
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2060
|
+
}
|
|
2061
|
+
/**
|
|
2062
|
+
* Builds function declaration log string.
|
|
2063
|
+
*/
|
|
2064
|
+
buildFunctionDeclarationLog(funcDecl) {
|
|
2065
|
+
let paramStr = "{}";
|
|
2066
|
+
if (funcDecl.parameters?.properties) {
|
|
2067
|
+
paramStr = JSON.stringify(funcDecl.parameters.properties);
|
|
2068
|
+
}
|
|
2069
|
+
return `${funcDecl.name}: ${paramStr}`;
|
|
2070
|
+
}
|
|
2071
|
+
/**
|
|
2072
|
+
* Provides the api client.
|
|
2073
|
+
*/
|
|
2074
|
+
get apiClient() {
|
|
2075
|
+
if (!this._apiClient) {
|
|
2076
|
+
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
|
|
2077
|
+
const apiKey = process.env.GOOGLE_API_KEY;
|
|
2078
|
+
const project = process.env.GOOGLE_CLOUD_PROJECT;
|
|
2079
|
+
const location = process.env.GOOGLE_CLOUD_LOCATION;
|
|
2080
|
+
if (useVertexAI && project && location) {
|
|
2081
|
+
this._apiClient = new GoogleGenAI({
|
|
2082
|
+
vertexai: true,
|
|
2083
|
+
project,
|
|
2084
|
+
location
|
|
2085
|
+
});
|
|
2086
|
+
} else if (apiKey) {
|
|
2087
|
+
this._apiClient = new GoogleGenAI({
|
|
2088
|
+
apiKey
|
|
2089
|
+
});
|
|
2090
|
+
} else {
|
|
2091
|
+
throw new Error(
|
|
2092
|
+
"Google API Key or Vertex AI configuration is required. Set GOOGLE_API_KEY or GOOGLE_GENAI_USE_VERTEXAI=true with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION."
|
|
2093
|
+
);
|
|
2102
2094
|
}
|
|
2103
2095
|
}
|
|
2104
|
-
return
|
|
2105
|
-
content: {
|
|
2106
|
-
role: "model",
|
|
2107
|
-
parts
|
|
2108
|
-
},
|
|
2109
|
-
usageMetadata: usage ? {
|
|
2110
|
-
promptTokenCount: usage.prompt_tokens,
|
|
2111
|
-
candidatesTokenCount: usage.completion_tokens,
|
|
2112
|
-
totalTokenCount: usage.total_tokens
|
|
2113
|
-
} : void 0,
|
|
2114
|
-
finishReason: this.toAdkFinishReason(choice.finish_reason)
|
|
2115
|
-
});
|
|
2096
|
+
return this._apiClient;
|
|
2116
2097
|
}
|
|
2117
2098
|
/**
|
|
2118
|
-
*
|
|
2099
|
+
* Gets the API backend type.
|
|
2119
2100
|
*/
|
|
2120
|
-
|
|
2121
|
-
|
|
2122
|
-
|
|
2123
|
-
|
|
2124
|
-
role: "system",
|
|
2125
|
-
content: content.parts?.[0]?.text || ""
|
|
2126
|
-
};
|
|
2101
|
+
get apiBackend() {
|
|
2102
|
+
if (!this._apiBackend) {
|
|
2103
|
+
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
|
|
2104
|
+
this._apiBackend = useVertexAI ? "VERTEX_AI" /* VERTEX_AI */ : "GEMINI_API" /* GEMINI_API */;
|
|
2127
2105
|
}
|
|
2128
|
-
|
|
2129
|
-
const functionCallPart = content.parts.find(
|
|
2130
|
-
(part) => part.functionCall
|
|
2131
|
-
);
|
|
2132
|
-
return {
|
|
2133
|
-
role: "assistant",
|
|
2134
|
-
tool_calls: [
|
|
2135
|
-
{
|
|
2136
|
-
id: functionCallPart.functionCall.id || "",
|
|
2137
|
-
type: "function",
|
|
2138
|
-
function: {
|
|
2139
|
-
name: functionCallPart.functionCall.name,
|
|
2140
|
-
arguments: JSON.stringify(
|
|
2141
|
-
functionCallPart.functionCall.args || {}
|
|
2142
|
-
)
|
|
2143
|
-
}
|
|
2144
|
-
}
|
|
2145
|
-
]
|
|
2146
|
-
};
|
|
2147
|
-
}
|
|
2148
|
-
if (content.parts?.some((part) => part.functionResponse)) {
|
|
2149
|
-
const functionResponsePart = content.parts.find(
|
|
2150
|
-
(part) => part.functionResponse
|
|
2151
|
-
);
|
|
2152
|
-
return {
|
|
2153
|
-
role: "tool",
|
|
2154
|
-
tool_call_id: functionResponsePart.functionResponse.id || "",
|
|
2155
|
-
content: JSON.stringify(
|
|
2156
|
-
functionResponsePart.functionResponse.response || {}
|
|
2157
|
-
)
|
|
2158
|
-
};
|
|
2159
|
-
}
|
|
2160
|
-
if (content.parts?.length === 1 && content.parts[0].text) {
|
|
2161
|
-
return {
|
|
2162
|
-
role,
|
|
2163
|
-
content: content.parts[0].text
|
|
2164
|
-
};
|
|
2165
|
-
}
|
|
2166
|
-
return {
|
|
2167
|
-
role,
|
|
2168
|
-
content: (content.parts || []).map(
|
|
2169
|
-
(part) => this.partToOpenAiContent(part)
|
|
2170
|
-
)
|
|
2171
|
-
};
|
|
2172
|
-
}
|
|
2173
|
-
/**
|
|
2174
|
-
* Convert ADK Part to OpenAI message content
|
|
2175
|
-
*/
|
|
2176
|
-
partToOpenAiContent(part) {
|
|
2177
|
-
if (part.text) {
|
|
2178
|
-
return {
|
|
2179
|
-
type: "text",
|
|
2180
|
-
text: part.text
|
|
2181
|
-
};
|
|
2182
|
-
}
|
|
2183
|
-
if (part.inline_data?.mime_type && part.inline_data?.data) {
|
|
2184
|
-
return {
|
|
2185
|
-
type: "image_url",
|
|
2186
|
-
image_url: {
|
|
2187
|
-
url: `data:${part.inline_data.mime_type};base64,${part.inline_data.data}`
|
|
2188
|
-
}
|
|
2189
|
-
};
|
|
2190
|
-
}
|
|
2191
|
-
throw new Error("Unsupported part type for OpenAI conversion");
|
|
2106
|
+
return this._apiBackend;
|
|
2192
2107
|
}
|
|
2193
2108
|
/**
|
|
2194
|
-
*
|
|
2109
|
+
* Gets the tracking headers.
|
|
2195
2110
|
*/
|
|
2196
|
-
|
|
2197
|
-
|
|
2198
|
-
|
|
2199
|
-
|
|
2200
|
-
|
|
2201
|
-
description: functionDeclaration.description || "",
|
|
2202
|
-
parameters: functionDeclaration.parameters || {}
|
|
2111
|
+
get trackingHeaders() {
|
|
2112
|
+
if (!this._trackingHeaders) {
|
|
2113
|
+
let frameworkLabel = "google-adk/1.0.0";
|
|
2114
|
+
if (process.env[AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME]) {
|
|
2115
|
+
frameworkLabel = `${frameworkLabel}+${AGENT_ENGINE_TELEMETRY_TAG}`;
|
|
2203
2116
|
}
|
|
2204
|
-
|
|
2205
|
-
|
|
2206
|
-
|
|
2207
|
-
|
|
2208
|
-
|
|
2209
|
-
|
|
2210
|
-
if (role === "model") {
|
|
2211
|
-
return "assistant";
|
|
2212
|
-
}
|
|
2213
|
-
if (role === "system") {
|
|
2214
|
-
return "system";
|
|
2215
|
-
}
|
|
2216
|
-
return "user";
|
|
2217
|
-
}
|
|
2218
|
-
/**
|
|
2219
|
-
* Convert OpenAI finish reason to ADK finish reason
|
|
2220
|
-
*/
|
|
2221
|
-
toAdkFinishReason(openaiFinishReason) {
|
|
2222
|
-
switch (openaiFinishReason) {
|
|
2223
|
-
case "stop":
|
|
2224
|
-
case "tool_calls":
|
|
2225
|
-
return "STOP";
|
|
2226
|
-
case "length":
|
|
2227
|
-
return "MAX_TOKENS";
|
|
2228
|
-
default:
|
|
2229
|
-
return "FINISH_REASON_UNSPECIFIED";
|
|
2117
|
+
const languageLabel = `gl-node/${process.version}`;
|
|
2118
|
+
const versionHeaderValue = `${frameworkLabel} ${languageLabel}`;
|
|
2119
|
+
this._trackingHeaders = {
|
|
2120
|
+
"x-goog-api-client": versionHeaderValue,
|
|
2121
|
+
"user-agent": versionHeaderValue
|
|
2122
|
+
};
|
|
2230
2123
|
}
|
|
2124
|
+
return this._trackingHeaders;
|
|
2231
2125
|
}
|
|
2232
2126
|
/**
|
|
2233
|
-
*
|
|
2127
|
+
* Gets the live API version.
|
|
2234
2128
|
*/
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
llmRequest.config.labels = void 0;
|
|
2238
|
-
if (llmRequest.contents) {
|
|
2239
|
-
for (const content of llmRequest.contents) {
|
|
2240
|
-
if (!content.parts) continue;
|
|
2241
|
-
for (const part of content.parts) {
|
|
2242
|
-
this.preprocessPart(part);
|
|
2243
|
-
}
|
|
2244
|
-
}
|
|
2245
|
-
}
|
|
2246
|
-
}
|
|
2129
|
+
get liveApiVersion() {
|
|
2130
|
+
return this.apiBackend === "VERTEX_AI" /* VERTEX_AI */ ? "v1beta1" : "v1alpha";
|
|
2247
2131
|
}
|
|
2248
2132
|
/**
|
|
2249
|
-
*
|
|
2133
|
+
* Gets the live API client.
|
|
2250
2134
|
*/
|
|
2251
|
-
|
|
2252
|
-
if (
|
|
2253
|
-
|
|
2254
|
-
|
|
2135
|
+
get liveApiClient() {
|
|
2136
|
+
if (!this._liveApiClient) {
|
|
2137
|
+
const useVertexAI = process.env.GOOGLE_GENAI_USE_VERTEXAI === "true";
|
|
2138
|
+
const apiKey = process.env.GOOGLE_API_KEY;
|
|
2139
|
+
const project = process.env.GOOGLE_CLOUD_PROJECT;
|
|
2140
|
+
const location = process.env.GOOGLE_CLOUD_LOCATION;
|
|
2141
|
+
if (useVertexAI && project && location) {
|
|
2142
|
+
this._liveApiClient = new GoogleGenAI({
|
|
2143
|
+
vertexai: true,
|
|
2144
|
+
project,
|
|
2145
|
+
location,
|
|
2146
|
+
apiVersion: this.liveApiVersion
|
|
2147
|
+
});
|
|
2148
|
+
} else if (apiKey) {
|
|
2149
|
+
this._liveApiClient = new GoogleGenAI({
|
|
2150
|
+
apiKey,
|
|
2151
|
+
apiVersion: this.liveApiVersion
|
|
2152
|
+
});
|
|
2153
|
+
} else {
|
|
2154
|
+
throw new Error("API configuration required for live client");
|
|
2255
2155
|
}
|
|
2256
2156
|
}
|
|
2157
|
+
return this._liveApiClient;
|
|
2257
2158
|
}
|
|
2159
|
+
};
|
|
2160
|
+
|
|
2161
|
+
// src/models/openai-llm.ts
|
|
2162
|
+
import OpenAI from "openai";
|
|
2163
|
+
var OpenAiLlm = class extends BaseLlm {
|
|
2164
|
+
_client;
|
|
2258
2165
|
/**
|
|
2259
|
-
*
|
|
2260
|
-
* This is a simplified implementation - you may need to adjust based on your specific requirements
|
|
2166
|
+
* Constructor for OpenAI LLM
|
|
2261
2167
|
*/
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
return "thought";
|
|
2265
|
-
}
|
|
2266
|
-
return "regular";
|
|
2168
|
+
constructor(model = "gpt-4o-mini") {
|
|
2169
|
+
super(model);
|
|
2267
2170
|
}
|
|
2268
2171
|
/**
|
|
2269
|
-
*
|
|
2172
|
+
* Provides the list of supported models
|
|
2270
2173
|
*/
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
return parts?.some((part) => part.inlineData) || false;
|
|
2174
|
+
static supportedModels() {
|
|
2175
|
+
return ["gpt-3.5-.*", "gpt-4.*", "gpt-4o.*", "o1-.*", "o3-.*"];
|
|
2274
2176
|
}
|
|
2275
2177
|
/**
|
|
2276
|
-
*
|
|
2178
|
+
* Main content generation method - handles both streaming and non-streaming
|
|
2277
2179
|
*/
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2180
|
+
async *generateContentAsyncImpl(llmRequest, stream = false) {
|
|
2181
|
+
this.preprocessRequest(llmRequest);
|
|
2182
|
+
const model = llmRequest.model || this.model;
|
|
2183
|
+
const messages = (llmRequest.contents || []).map(
|
|
2184
|
+
(content) => this.contentToOpenAiMessage(content)
|
|
2185
|
+
);
|
|
2186
|
+
let tools;
|
|
2187
|
+
if (llmRequest.config?.tools?.[0]?.functionDeclarations) {
|
|
2188
|
+
tools = llmRequest.config.tools[0].functionDeclarations.map(
|
|
2189
|
+
(funcDecl) => this.functionDeclarationToOpenAiTool(funcDecl)
|
|
2190
|
+
);
|
|
2191
|
+
}
|
|
2192
|
+
const systemContent = llmRequest.getSystemInstructionText();
|
|
2193
|
+
if (systemContent) {
|
|
2194
|
+
messages.unshift({
|
|
2195
|
+
role: "system",
|
|
2196
|
+
content: systemContent
|
|
2288
2197
|
});
|
|
2289
2198
|
}
|
|
2290
|
-
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
|
|
2294
|
-
|
|
2295
|
-
|
|
2296
|
-
|
|
2297
|
-
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
}
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2322
|
-
|
|
2323
|
-
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
2330
|
-
|
|
2331
|
-
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
let accumulatedText = "";
|
|
2335
|
-
for await (const delta of result.textStream) {
|
|
2336
|
-
accumulatedText += delta;
|
|
2199
|
+
const openAiMessages = messages;
|
|
2200
|
+
const requestParams = {
|
|
2201
|
+
model,
|
|
2202
|
+
messages: openAiMessages,
|
|
2203
|
+
tools,
|
|
2204
|
+
tool_choice: tools ? "auto" : void 0,
|
|
2205
|
+
max_tokens: llmRequest.config?.maxOutputTokens,
|
|
2206
|
+
temperature: llmRequest.config?.temperature,
|
|
2207
|
+
top_p: llmRequest.config?.topP,
|
|
2208
|
+
stream
|
|
2209
|
+
};
|
|
2210
|
+
if (stream) {
|
|
2211
|
+
const streamResponse = await this.client.chat.completions.create({
|
|
2212
|
+
...requestParams,
|
|
2213
|
+
stream: true
|
|
2214
|
+
});
|
|
2215
|
+
let thoughtText = "";
|
|
2216
|
+
let text = "";
|
|
2217
|
+
let usageMetadata;
|
|
2218
|
+
const accumulatedToolCalls = [];
|
|
2219
|
+
for await (const chunk of streamResponse) {
|
|
2220
|
+
const choice = chunk.choices[0];
|
|
2221
|
+
if (!choice) continue;
|
|
2222
|
+
const delta = choice.delta;
|
|
2223
|
+
const llmResponse = this.createChunkResponse(delta, chunk.usage);
|
|
2224
|
+
if (chunk.usage) {
|
|
2225
|
+
usageMetadata = chunk.usage;
|
|
2226
|
+
}
|
|
2227
|
+
if (llmResponse.content?.parts?.[0]?.text) {
|
|
2228
|
+
const part0 = llmResponse.content.parts[0];
|
|
2229
|
+
if (part0.thought) {
|
|
2230
|
+
thoughtText += part0.text;
|
|
2231
|
+
} else {
|
|
2232
|
+
text += part0.text;
|
|
2233
|
+
}
|
|
2234
|
+
llmResponse.partial = true;
|
|
2235
|
+
} else if ((thoughtText || text) && (!llmResponse.content || !llmResponse.content.parts || !this.hasInlineData(llmResponse))) {
|
|
2236
|
+
const parts = [];
|
|
2237
|
+
if (thoughtText) {
|
|
2238
|
+
parts.push({ text: thoughtText, thought: true });
|
|
2239
|
+
}
|
|
2240
|
+
if (text) {
|
|
2241
|
+
parts.push({ text });
|
|
2242
|
+
}
|
|
2337
2243
|
yield new LlmResponse({
|
|
2338
2244
|
content: {
|
|
2339
|
-
|
|
2340
|
-
|
|
2245
|
+
parts,
|
|
2246
|
+
role: "model"
|
|
2341
2247
|
},
|
|
2342
|
-
|
|
2248
|
+
usageMetadata: usageMetadata ? {
|
|
2249
|
+
promptTokenCount: usageMetadata.prompt_tokens,
|
|
2250
|
+
candidatesTokenCount: usageMetadata.completion_tokens,
|
|
2251
|
+
totalTokenCount: usageMetadata.total_tokens
|
|
2252
|
+
} : void 0
|
|
2343
2253
|
});
|
|
2254
|
+
thoughtText = "";
|
|
2255
|
+
text = "";
|
|
2344
2256
|
}
|
|
2345
|
-
|
|
2346
|
-
|
|
2347
|
-
|
|
2348
|
-
|
|
2257
|
+
if (delta.tool_calls) {
|
|
2258
|
+
for (const toolCall of delta.tool_calls) {
|
|
2259
|
+
const index = toolCall.index || 0;
|
|
2260
|
+
if (!accumulatedToolCalls[index]) {
|
|
2261
|
+
accumulatedToolCalls[index] = {
|
|
2262
|
+
index,
|
|
2263
|
+
id: toolCall.id || "",
|
|
2264
|
+
type: "function",
|
|
2265
|
+
function: { name: "", arguments: "" }
|
|
2266
|
+
};
|
|
2267
|
+
}
|
|
2268
|
+
if (toolCall.function?.name) {
|
|
2269
|
+
accumulatedToolCalls[index].function.name += toolCall.function.name;
|
|
2270
|
+
}
|
|
2271
|
+
if (toolCall.function?.arguments) {
|
|
2272
|
+
accumulatedToolCalls[index].function.arguments += toolCall.function.arguments;
|
|
2273
|
+
}
|
|
2274
|
+
}
|
|
2349
2275
|
}
|
|
2350
|
-
if (
|
|
2351
|
-
|
|
2352
|
-
|
|
2353
|
-
|
|
2354
|
-
|
|
2355
|
-
|
|
2356
|
-
|
|
2276
|
+
if (choice.finish_reason) {
|
|
2277
|
+
const parts = [];
|
|
2278
|
+
if (thoughtText) {
|
|
2279
|
+
parts.push({ text: thoughtText, thought: true });
|
|
2280
|
+
}
|
|
2281
|
+
if (text) {
|
|
2282
|
+
parts.push({ text });
|
|
2283
|
+
}
|
|
2284
|
+
if (accumulatedToolCalls.length > 0) {
|
|
2285
|
+
for (const toolCall of accumulatedToolCalls) {
|
|
2286
|
+
if (toolCall.function?.name) {
|
|
2287
|
+
parts.push({
|
|
2288
|
+
functionCall: {
|
|
2289
|
+
id: toolCall.id,
|
|
2290
|
+
name: toolCall.function.name,
|
|
2291
|
+
args: JSON.parse(toolCall.function.arguments || "{}")
|
|
2292
|
+
}
|
|
2293
|
+
});
|
|
2357
2294
|
}
|
|
2358
|
-
}
|
|
2295
|
+
}
|
|
2359
2296
|
}
|
|
2297
|
+
const finalResponse = new LlmResponse({
|
|
2298
|
+
content: {
|
|
2299
|
+
role: "model",
|
|
2300
|
+
parts
|
|
2301
|
+
},
|
|
2302
|
+
usageMetadata: usageMetadata ? {
|
|
2303
|
+
promptTokenCount: usageMetadata.prompt_tokens,
|
|
2304
|
+
candidatesTokenCount: usageMetadata.completion_tokens,
|
|
2305
|
+
totalTokenCount: usageMetadata.total_tokens
|
|
2306
|
+
} : void 0,
|
|
2307
|
+
finishReason: this.toAdkFinishReason(choice.finish_reason)
|
|
2308
|
+
});
|
|
2309
|
+
yield finalResponse;
|
|
2310
|
+
} else {
|
|
2311
|
+
yield llmResponse;
|
|
2360
2312
|
}
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
yield new LlmResponse({
|
|
2364
|
-
content: {
|
|
2365
|
-
role: "model",
|
|
2366
|
-
parts: parts.length > 0 ? parts : [{ text: "" }]
|
|
2367
|
-
},
|
|
2368
|
-
usageMetadata: finalUsage ? {
|
|
2369
|
-
promptTokenCount: finalUsage.promptTokens,
|
|
2370
|
-
candidatesTokenCount: finalUsage.completionTokens,
|
|
2371
|
-
totalTokenCount: finalUsage.totalTokens
|
|
2372
|
-
} : void 0,
|
|
2373
|
-
finishReason: this.mapFinishReason(finishReason),
|
|
2374
|
-
turnComplete: true
|
|
2375
|
-
});
|
|
2376
|
-
} else {
|
|
2377
|
-
const result = await generateText(requestParams);
|
|
2313
|
+
}
|
|
2314
|
+
if ((text || thoughtText) && usageMetadata) {
|
|
2378
2315
|
const parts = [];
|
|
2379
|
-
if (
|
|
2380
|
-
parts.push({ text:
|
|
2316
|
+
if (thoughtText) {
|
|
2317
|
+
parts.push({ text: thoughtText, thought: true });
|
|
2381
2318
|
}
|
|
2382
|
-
if (
|
|
2383
|
-
|
|
2384
|
-
parts.push({
|
|
2385
|
-
functionCall: {
|
|
2386
|
-
id: toolCall.toolCallId,
|
|
2387
|
-
name: toolCall.toolName,
|
|
2388
|
-
args: toolCall.args
|
|
2389
|
-
}
|
|
2390
|
-
});
|
|
2391
|
-
}
|
|
2319
|
+
if (text) {
|
|
2320
|
+
parts.push({ text });
|
|
2392
2321
|
}
|
|
2393
2322
|
yield new LlmResponse({
|
|
2394
2323
|
content: {
|
|
2395
|
-
|
|
2396
|
-
|
|
2324
|
+
parts,
|
|
2325
|
+
role: "model"
|
|
2397
2326
|
},
|
|
2398
|
-
usageMetadata:
|
|
2399
|
-
promptTokenCount:
|
|
2400
|
-
candidatesTokenCount:
|
|
2401
|
-
totalTokenCount:
|
|
2402
|
-
}
|
|
2403
|
-
finishReason: this.mapFinishReason(result.finishReason),
|
|
2404
|
-
turnComplete: true
|
|
2327
|
+
usageMetadata: {
|
|
2328
|
+
promptTokenCount: usageMetadata.prompt_tokens,
|
|
2329
|
+
candidatesTokenCount: usageMetadata.completion_tokens,
|
|
2330
|
+
totalTokenCount: usageMetadata.total_tokens
|
|
2331
|
+
}
|
|
2405
2332
|
});
|
|
2406
2333
|
}
|
|
2407
|
-
}
|
|
2408
|
-
this.
|
|
2409
|
-
|
|
2410
|
-
|
|
2411
|
-
model: this.model
|
|
2334
|
+
} else {
|
|
2335
|
+
const response = await this.client.chat.completions.create({
|
|
2336
|
+
...requestParams,
|
|
2337
|
+
stream: false
|
|
2412
2338
|
});
|
|
2339
|
+
const choice = response.choices[0];
|
|
2340
|
+
if (choice) {
|
|
2341
|
+
const llmResponse = this.openAiMessageToLlmResponse(
|
|
2342
|
+
choice,
|
|
2343
|
+
response.usage
|
|
2344
|
+
);
|
|
2345
|
+
this.logger.debug(
|
|
2346
|
+
`OpenAI response: ${response.usage?.completion_tokens || 0} tokens`
|
|
2347
|
+
);
|
|
2348
|
+
yield llmResponse;
|
|
2349
|
+
}
|
|
2413
2350
|
}
|
|
2414
2351
|
}
|
|
2415
2352
|
/**
|
|
2416
|
-
*
|
|
2353
|
+
* Live connection is not supported for OpenAI models
|
|
2417
2354
|
*/
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
for (const content of llmRequest.contents || []) {
|
|
2421
|
-
const message = this.contentToAiSdkMessage(content);
|
|
2422
|
-
if (message) {
|
|
2423
|
-
messages.push(message);
|
|
2424
|
-
}
|
|
2425
|
-
}
|
|
2426
|
-
return messages;
|
|
2355
|
+
connect(_llmRequest) {
|
|
2356
|
+
throw new Error(`Live connection is not supported for ${this.model}.`);
|
|
2427
2357
|
}
|
|
2428
2358
|
/**
|
|
2429
|
-
*
|
|
2359
|
+
* Create LlmResponse from streaming chunk - similar to Google's LlmResponse.create
|
|
2430
2360
|
*/
|
|
2431
|
-
|
|
2432
|
-
const
|
|
2433
|
-
if (
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2437
|
-
|
|
2438
|
-
|
|
2439
|
-
|
|
2440
|
-
|
|
2441
|
-
|
|
2361
|
+
createChunkResponse(delta, usage) {
|
|
2362
|
+
const parts = [];
|
|
2363
|
+
if (delta.content) {
|
|
2364
|
+
const contentType = this.getContentType(delta.content);
|
|
2365
|
+
if (contentType === "thought") {
|
|
2366
|
+
parts.push({ text: delta.content, thought: true });
|
|
2367
|
+
} else {
|
|
2368
|
+
parts.push({ text: delta.content });
|
|
2369
|
+
}
|
|
2370
|
+
}
|
|
2371
|
+
if (delta.tool_calls) {
|
|
2372
|
+
for (const toolCall of delta.tool_calls) {
|
|
2373
|
+
if (toolCall.type === "function" && toolCall.function?.name) {
|
|
2374
|
+
parts.push({
|
|
2375
|
+
functionCall: {
|
|
2376
|
+
id: toolCall.id || "",
|
|
2377
|
+
name: toolCall.function.name,
|
|
2378
|
+
args: JSON.parse(toolCall.function.arguments || "{}")
|
|
2379
|
+
}
|
|
2380
|
+
});
|
|
2442
2381
|
}
|
|
2443
2382
|
}
|
|
2444
2383
|
}
|
|
2445
|
-
return
|
|
2384
|
+
return new LlmResponse({
|
|
2385
|
+
content: parts.length > 0 ? {
|
|
2386
|
+
role: "model",
|
|
2387
|
+
parts
|
|
2388
|
+
} : void 0,
|
|
2389
|
+
usageMetadata: usage ? {
|
|
2390
|
+
promptTokenCount: usage.prompt_tokens,
|
|
2391
|
+
candidatesTokenCount: usage.completion_tokens,
|
|
2392
|
+
totalTokenCount: usage.total_tokens
|
|
2393
|
+
} : void 0
|
|
2394
|
+
});
|
|
2446
2395
|
}
|
|
2447
2396
|
/**
|
|
2448
|
-
* Convert
|
|
2397
|
+
* Convert OpenAI message to ADK LlmResponse
|
|
2449
2398
|
*/
|
|
2450
|
-
|
|
2451
|
-
const
|
|
2452
|
-
|
|
2453
|
-
|
|
2399
|
+
openAiMessageToLlmResponse(choice, usage) {
|
|
2400
|
+
const message = choice.message;
|
|
2401
|
+
const parts = [];
|
|
2402
|
+
if (message.content) {
|
|
2403
|
+
parts.push({ text: message.content });
|
|
2454
2404
|
}
|
|
2455
|
-
if (
|
|
2456
|
-
const
|
|
2457
|
-
|
|
2458
|
-
|
|
2459
|
-
|
|
2460
|
-
|
|
2461
|
-
|
|
2405
|
+
if (message.tool_calls) {
|
|
2406
|
+
for (const toolCall of message.tool_calls) {
|
|
2407
|
+
if (toolCall.type === "function") {
|
|
2408
|
+
parts.push({
|
|
2409
|
+
functionCall: {
|
|
2410
|
+
id: toolCall.id,
|
|
2411
|
+
name: toolCall.function.name,
|
|
2412
|
+
args: JSON.parse(toolCall.function.arguments || "{}")
|
|
2413
|
+
}
|
|
2414
|
+
});
|
|
2415
|
+
}
|
|
2462
2416
|
}
|
|
2463
|
-
|
|
2417
|
+
}
|
|
2418
|
+
return new LlmResponse({
|
|
2419
|
+
content: {
|
|
2420
|
+
role: "model",
|
|
2421
|
+
parts
|
|
2422
|
+
},
|
|
2423
|
+
usageMetadata: usage ? {
|
|
2424
|
+
promptTokenCount: usage.prompt_tokens,
|
|
2425
|
+
candidatesTokenCount: usage.completion_tokens,
|
|
2426
|
+
totalTokenCount: usage.total_tokens
|
|
2427
|
+
} : void 0,
|
|
2428
|
+
finishReason: this.toAdkFinishReason(choice.finish_reason)
|
|
2429
|
+
});
|
|
2430
|
+
}
|
|
2431
|
+
/**
|
|
2432
|
+
* Convert ADK Content to OpenAI ChatCompletionMessage
|
|
2433
|
+
*/
|
|
2434
|
+
contentToOpenAiMessage(content) {
|
|
2435
|
+
const role = this.toOpenAiRole(content.role);
|
|
2436
|
+
if (role === "system") {
|
|
2437
|
+
return {
|
|
2438
|
+
role: "system",
|
|
2439
|
+
content: content.parts?.[0]?.text || ""
|
|
2440
|
+
};
|
|
2464
2441
|
}
|
|
2465
2442
|
if (content.parts?.some((part) => part.functionCall)) {
|
|
2466
|
-
const
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
for (const textPart of textParts) {
|
|
2470
|
-
if (textPart.text) {
|
|
2471
|
-
contentParts2.push({
|
|
2472
|
-
type: "text",
|
|
2473
|
-
text: textPart.text
|
|
2474
|
-
});
|
|
2475
|
-
}
|
|
2476
|
-
}
|
|
2477
|
-
for (const funcPart of functionCalls) {
|
|
2478
|
-
if (funcPart.functionCall) {
|
|
2479
|
-
contentParts2.push({
|
|
2480
|
-
type: "tool-call",
|
|
2481
|
-
toolCallId: funcPart.functionCall.id,
|
|
2482
|
-
toolName: funcPart.functionCall.name,
|
|
2483
|
-
args: funcPart.functionCall.args
|
|
2484
|
-
});
|
|
2485
|
-
}
|
|
2486
|
-
}
|
|
2443
|
+
const functionCallPart = content.parts.find(
|
|
2444
|
+
(part) => part.functionCall
|
|
2445
|
+
);
|
|
2487
2446
|
return {
|
|
2488
2447
|
role: "assistant",
|
|
2489
|
-
|
|
2448
|
+
tool_calls: [
|
|
2449
|
+
{
|
|
2450
|
+
id: functionCallPart.functionCall.id || "",
|
|
2451
|
+
type: "function",
|
|
2452
|
+
function: {
|
|
2453
|
+
name: functionCallPart.functionCall.name,
|
|
2454
|
+
arguments: JSON.stringify(
|
|
2455
|
+
functionCallPart.functionCall.args || {}
|
|
2456
|
+
)
|
|
2457
|
+
}
|
|
2458
|
+
}
|
|
2459
|
+
]
|
|
2490
2460
|
};
|
|
2491
2461
|
}
|
|
2492
2462
|
if (content.parts?.some((part) => part.functionResponse)) {
|
|
2493
|
-
const
|
|
2463
|
+
const functionResponsePart = content.parts.find(
|
|
2494
2464
|
(part) => part.functionResponse
|
|
2495
2465
|
);
|
|
2496
|
-
const contentParts2 = functionResponses.map((part) => ({
|
|
2497
|
-
type: "tool-result",
|
|
2498
|
-
toolCallId: part.functionResponse.id,
|
|
2499
|
-
toolName: part.functionResponse.name || "unknown",
|
|
2500
|
-
result: part.functionResponse.response
|
|
2501
|
-
}));
|
|
2502
2466
|
return {
|
|
2503
2467
|
role: "tool",
|
|
2504
|
-
|
|
2468
|
+
tool_call_id: functionResponsePart.functionResponse.id || "",
|
|
2469
|
+
content: JSON.stringify(
|
|
2470
|
+
functionResponsePart.functionResponse.response || {}
|
|
2471
|
+
)
|
|
2505
2472
|
};
|
|
2506
2473
|
}
|
|
2507
|
-
|
|
2508
|
-
|
|
2509
|
-
|
|
2510
|
-
|
|
2511
|
-
|
|
2512
|
-
text: part.text
|
|
2513
|
-
});
|
|
2514
|
-
}
|
|
2474
|
+
if (content.parts?.length === 1 && content.parts[0].text) {
|
|
2475
|
+
return {
|
|
2476
|
+
role,
|
|
2477
|
+
content: content.parts[0].text
|
|
2478
|
+
};
|
|
2515
2479
|
}
|
|
2516
|
-
|
|
2517
|
-
|
|
2480
|
+
return {
|
|
2481
|
+
role,
|
|
2482
|
+
content: (content.parts || []).map(
|
|
2483
|
+
(part) => this.partToOpenAiContent(part)
|
|
2484
|
+
)
|
|
2485
|
+
};
|
|
2486
|
+
}
|
|
2487
|
+
/**
|
|
2488
|
+
* Convert ADK Part to OpenAI message content
|
|
2489
|
+
*/
|
|
2490
|
+
partToOpenAiContent(part) {
|
|
2491
|
+
if (part.text) {
|
|
2492
|
+
return {
|
|
2493
|
+
type: "text",
|
|
2494
|
+
text: part.text
|
|
2495
|
+
};
|
|
2518
2496
|
}
|
|
2519
|
-
if (
|
|
2520
|
-
|
|
2521
|
-
|
|
2522
|
-
|
|
2523
|
-
|
|
2524
|
-
|
|
2525
|
-
|
|
2526
|
-
}
|
|
2527
|
-
return { role: "user", content: textContent };
|
|
2497
|
+
if (part.inline_data?.mime_type && part.inline_data?.data) {
|
|
2498
|
+
return {
|
|
2499
|
+
type: "image_url",
|
|
2500
|
+
image_url: {
|
|
2501
|
+
url: `data:${part.inline_data.mime_type};base64,${part.inline_data.data}`
|
|
2502
|
+
}
|
|
2503
|
+
};
|
|
2528
2504
|
}
|
|
2529
|
-
|
|
2530
|
-
|
|
2531
|
-
|
|
2505
|
+
throw new Error("Unsupported part type for OpenAI conversion");
|
|
2506
|
+
}
|
|
2507
|
+
/**
|
|
2508
|
+
* Transform JSON schema to use lowercase types for OpenAI compatibility
|
|
2509
|
+
*/
|
|
2510
|
+
transformSchemaForOpenAi(schema) {
|
|
2511
|
+
if (Array.isArray(schema)) {
|
|
2512
|
+
return schema.map((item) => this.transformSchemaForOpenAi(item));
|
|
2532
2513
|
}
|
|
2533
|
-
if (
|
|
2534
|
-
return
|
|
2514
|
+
if (!schema || typeof schema !== "object") {
|
|
2515
|
+
return schema;
|
|
2535
2516
|
}
|
|
2536
|
-
|
|
2517
|
+
const transformedSchema = { ...schema };
|
|
2518
|
+
if (transformedSchema.type && typeof transformedSchema.type === "string") {
|
|
2519
|
+
transformedSchema.type = transformedSchema.type.toLowerCase();
|
|
2520
|
+
}
|
|
2521
|
+
if (transformedSchema.properties) {
|
|
2522
|
+
transformedSchema.properties = Object.fromEntries(
|
|
2523
|
+
Object.entries(transformedSchema.properties).map(([key, value]) => [
|
|
2524
|
+
key,
|
|
2525
|
+
this.transformSchemaForOpenAi(value)
|
|
2526
|
+
])
|
|
2527
|
+
);
|
|
2528
|
+
}
|
|
2529
|
+
if (transformedSchema.items) {
|
|
2530
|
+
transformedSchema.items = this.transformSchemaForOpenAi(
|
|
2531
|
+
transformedSchema.items
|
|
2532
|
+
);
|
|
2533
|
+
}
|
|
2534
|
+
const arrayKeywords = ["anyOf", "oneOf", "allOf"];
|
|
2535
|
+
for (const keyword of arrayKeywords) {
|
|
2536
|
+
if (transformedSchema[keyword]) {
|
|
2537
|
+
transformedSchema[keyword] = this.transformSchemaForOpenAi(
|
|
2538
|
+
transformedSchema[keyword]
|
|
2539
|
+
);
|
|
2540
|
+
}
|
|
2541
|
+
}
|
|
2542
|
+
return transformedSchema;
|
|
2537
2543
|
}
|
|
2538
2544
|
/**
|
|
2539
|
-
*
|
|
2545
|
+
* Convert ADK function declaration to OpenAI tool
|
|
2540
2546
|
*/
|
|
2541
|
-
|
|
2542
|
-
|
|
2543
|
-
|
|
2544
|
-
|
|
2545
|
-
|
|
2546
|
-
|
|
2547
|
-
|
|
2548
|
-
|
|
2549
|
-
|
|
2547
|
+
functionDeclarationToOpenAiTool(functionDeclaration) {
|
|
2548
|
+
return {
|
|
2549
|
+
type: "function",
|
|
2550
|
+
function: {
|
|
2551
|
+
name: functionDeclaration.name,
|
|
2552
|
+
description: functionDeclaration.description || "",
|
|
2553
|
+
parameters: this.transformSchemaForOpenAi(
|
|
2554
|
+
functionDeclaration.parameters || {}
|
|
2555
|
+
)
|
|
2556
|
+
}
|
|
2557
|
+
};
|
|
2558
|
+
}
|
|
2559
|
+
/**
|
|
2560
|
+
* Convert ADK role to OpenAI role format
|
|
2561
|
+
*/
|
|
2562
|
+
toOpenAiRole(role) {
|
|
2563
|
+
if (role === "model") {
|
|
2564
|
+
return "assistant";
|
|
2565
|
+
}
|
|
2566
|
+
if (role === "system") {
|
|
2567
|
+
return "system";
|
|
2550
2568
|
}
|
|
2569
|
+
return "user";
|
|
2551
2570
|
}
|
|
2552
2571
|
/**
|
|
2553
|
-
*
|
|
2572
|
+
* Convert OpenAI finish reason to ADK finish reason
|
|
2554
2573
|
*/
|
|
2555
|
-
|
|
2556
|
-
switch (
|
|
2574
|
+
toAdkFinishReason(openaiFinishReason) {
|
|
2575
|
+
switch (openaiFinishReason) {
|
|
2557
2576
|
case "stop":
|
|
2558
|
-
case "
|
|
2577
|
+
case "tool_calls":
|
|
2559
2578
|
return "STOP";
|
|
2560
2579
|
case "length":
|
|
2561
|
-
case "max_tokens":
|
|
2562
2580
|
return "MAX_TOKENS";
|
|
2563
2581
|
default:
|
|
2564
2582
|
return "FINISH_REASON_UNSPECIFIED";
|
|
2565
2583
|
}
|
|
2566
2584
|
}
|
|
2585
|
+
/**
|
|
2586
|
+
* Preprocess request similar to Google LLM
|
|
2587
|
+
*/
|
|
2588
|
+
preprocessRequest(llmRequest) {
|
|
2589
|
+
if (llmRequest.config) {
|
|
2590
|
+
llmRequest.config.labels = void 0;
|
|
2591
|
+
if (llmRequest.contents) {
|
|
2592
|
+
for (const content of llmRequest.contents) {
|
|
2593
|
+
if (!content.parts) continue;
|
|
2594
|
+
for (const part of content.parts) {
|
|
2595
|
+
this.preprocessPart(part);
|
|
2596
|
+
}
|
|
2597
|
+
}
|
|
2598
|
+
}
|
|
2599
|
+
}
|
|
2600
|
+
}
|
|
2601
|
+
/**
|
|
2602
|
+
* Preprocess individual parts for OpenAI compatibility
|
|
2603
|
+
*/
|
|
2604
|
+
preprocessPart(part) {
|
|
2605
|
+
if (part.inline_data) {
|
|
2606
|
+
if (!part.inline_data.mime_type || !part.inline_data.data) {
|
|
2607
|
+
delete part.inline_data;
|
|
2608
|
+
}
|
|
2609
|
+
}
|
|
2610
|
+
}
|
|
2611
|
+
/**
|
|
2612
|
+
* Detect content type for flow control
|
|
2613
|
+
* This is a simplified implementation - you may need to adjust based on your specific requirements
|
|
2614
|
+
*/
|
|
2615
|
+
getContentType(content) {
|
|
2616
|
+
if (content.includes("<thinking>") || content.includes("[thinking]")) {
|
|
2617
|
+
return "thought";
|
|
2618
|
+
}
|
|
2619
|
+
return "regular";
|
|
2620
|
+
}
|
|
2621
|
+
/**
|
|
2622
|
+
* Check if response has inline data (similar to Google LLM)
|
|
2623
|
+
*/
|
|
2624
|
+
hasInlineData(response) {
|
|
2625
|
+
const parts = response.content?.parts;
|
|
2626
|
+
return parts?.some((part) => part.inlineData) || false;
|
|
2627
|
+
}
|
|
2628
|
+
/**
|
|
2629
|
+
* Gets the OpenAI client
|
|
2630
|
+
*/
|
|
2631
|
+
get client() {
|
|
2632
|
+
if (!this._client) {
|
|
2633
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
2634
|
+
if (!apiKey) {
|
|
2635
|
+
throw new Error(
|
|
2636
|
+
"OPENAI_API_KEY environment variable is required for OpenAI models"
|
|
2637
|
+
);
|
|
2638
|
+
}
|
|
2639
|
+
this._client = new OpenAI({
|
|
2640
|
+
apiKey
|
|
2641
|
+
});
|
|
2642
|
+
}
|
|
2643
|
+
return this._client;
|
|
2644
|
+
}
|
|
2567
2645
|
};
|
|
2568
2646
|
|
|
2569
2647
|
// src/models/llm-registry.ts
|
|
@@ -2640,6 +2718,25 @@ function registerProviders() {
|
|
|
2640
2718
|
}
|
|
2641
2719
|
registerProviders();
|
|
2642
2720
|
|
|
2721
|
+
// src/auth/auth-config.ts
|
|
2722
|
+
var AuthConfig = class {
|
|
2723
|
+
/**
|
|
2724
|
+
* The authentication scheme
|
|
2725
|
+
*/
|
|
2726
|
+
authScheme;
|
|
2727
|
+
/**
|
|
2728
|
+
* Additional context properties
|
|
2729
|
+
*/
|
|
2730
|
+
context;
|
|
2731
|
+
/**
|
|
2732
|
+
* Constructor for AuthConfig
|
|
2733
|
+
*/
|
|
2734
|
+
constructor(config) {
|
|
2735
|
+
this.authScheme = config.authScheme;
|
|
2736
|
+
this.context = config.context;
|
|
2737
|
+
}
|
|
2738
|
+
};
|
|
2739
|
+
|
|
2643
2740
|
// src/auth/auth-credential.ts
|
|
2644
2741
|
var AuthCredentialType = /* @__PURE__ */ ((AuthCredentialType2) => {
|
|
2645
2742
|
AuthCredentialType2["API_KEY"] = "api_key";
|
|
@@ -2842,25 +2939,6 @@ var OAuth2Credential = class extends AuthCredential {
|
|
|
2842
2939
|
}
|
|
2843
2940
|
};
|
|
2844
2941
|
|
|
2845
|
-
// src/auth/auth-config.ts
|
|
2846
|
-
var AuthConfig = class {
|
|
2847
|
-
/**
|
|
2848
|
-
* The authentication scheme
|
|
2849
|
-
*/
|
|
2850
|
-
authScheme;
|
|
2851
|
-
/**
|
|
2852
|
-
* Additional context properties
|
|
2853
|
-
*/
|
|
2854
|
-
context;
|
|
2855
|
-
/**
|
|
2856
|
-
* Constructor for AuthConfig
|
|
2857
|
-
*/
|
|
2858
|
-
constructor(config) {
|
|
2859
|
-
this.authScheme = config.authScheme;
|
|
2860
|
-
this.context = config.context;
|
|
2861
|
-
}
|
|
2862
|
-
};
|
|
2863
|
-
|
|
2864
2942
|
// src/auth/auth-handler.ts
|
|
2865
2943
|
var AuthHandler = class {
|
|
2866
2944
|
/**
|