@iqai/adk 0.1.21 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
- "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { newObj[key] = obj[key]; } } } newObj.default = obj; return newObj; } } function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; } var _class; var _class2; var _class3; var _class4; var _class5; var _class6; var _class7; var _class8; var _class9; var _class10; var _class11; var _class12; var _class13; var _class14; var _class15; var _class16; var _class17; var _class18; var _class19; var _class20; var _class21; var _class22; var _class23; var _class24; var _class25; var _class26; var _class27; var _class28; var _class29; var _class30; var _class31; var _class32; var _class33; var _class34;var __defProp = Object.defineProperty;
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { newObj[key] = obj[key]; } } } newObj.default = obj; return newObj; } } function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; } var _class; var _class2; var _class3; var _class4; var _class5; var _class6; var _class7; var _class8; var _class9; var _class10; var _class11; var _class12; var _class13; var _class14; var _class15; var _class16; var _class17; var _class18; var _class19; var _class20; var _class21; var _class22; var _class23; var _class24; var _class25; var _class26; var _class27; var _class28; var _class29; var _class30; var _class31; var _class32; var _class33; var _class34; var _class35;var __defProp = Object.defineProperty;
2
2
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
3
3
  var __getOwnPropNames = Object.getOwnPropertyNames;
4
4
  var __hasOwnProp = Object.prototype.hasOwnProperty;
@@ -53,7 +53,7 @@ var init_logger = __esm({
53
53
  }
54
54
  info(message, ...args) {
55
55
  const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
56
- console.info(
56
+ console.debug(
57
57
  this.colorize(`[${time}] \u2139\uFE0F [${this.name}] ${message}`),
58
58
  ...args
59
59
  );
@@ -229,7 +229,7 @@ var init_base_tool = __esm({
229
229
  * @param context The context of the tool
230
230
  * @returns The result of running the tool
231
231
  */
232
- async runAsync(args, context) {
232
+ async runAsync(args, context4) {
233
233
  throw new Error(`${this.constructor.name} runAsync is not implemented`);
234
234
  }
235
235
  /**
@@ -253,6 +253,12 @@ var init_base_tool = __esm({
253
253
  if (!toolWithFunctionDeclarations.functionDeclarations) {
254
254
  toolWithFunctionDeclarations.functionDeclarations = [];
255
255
  }
256
+ const alreadyExists = toolWithFunctionDeclarations.functionDeclarations.some(
257
+ (fd) => _optionalChain([fd, 'optionalAccess', _2 => _2.name]) === functionDeclaration.name
258
+ );
259
+ if (alreadyExists) {
260
+ return;
261
+ }
256
262
  toolWithFunctionDeclarations.functionDeclarations.push(
257
263
  functionDeclaration
258
264
  );
@@ -281,7 +287,7 @@ var init_base_tool = __esm({
281
287
  * @param context Tool execution context
282
288
  * @returns Result of the tool execution or error information
283
289
  */
284
- async safeExecute(args, context) {
290
+ async safeExecute(args, context4) {
285
291
  if (!this.validateArguments(args)) {
286
292
  return {
287
293
  error: "Invalid arguments",
@@ -302,7 +308,7 @@ var init_base_tool = __esm({
302
308
  );
303
309
  await new Promise((resolve) => setTimeout(resolve, delay));
304
310
  }
305
- const result = await this.runAsync(args, context);
311
+ const result = await this.runAsync(args, context4);
306
312
  return { result };
307
313
  } catch (error) {
308
314
  lastError = error instanceof Error ? error : new Error(String(error));
@@ -312,7 +318,7 @@ var init_base_tool = __esm({
312
318
  }
313
319
  return {
314
320
  error: "Execution failed",
315
- message: _optionalChain([lastError, 'optionalAccess', _2 => _2.message]) || "Unknown error occurred",
321
+ message: _optionalChain([lastError, 'optionalAccess', _3 => _3.message]) || "Unknown error occurred",
316
322
  tool: this.name
317
323
  };
318
324
  }
@@ -484,23 +490,23 @@ var init_function_tool = __esm({
484
490
  * @param options Optional configuration for the tool
485
491
  */
486
492
  constructor(func, options) {
487
- const name = _optionalChain([options, 'optionalAccess', _3 => _3.name]) || func.name;
488
- const description = _optionalChain([options, 'optionalAccess', _4 => _4.description]) || _optionalChain([(func.toString().match(/\/\*\*([\s\S]*?)\*\//) || []), 'access', _5 => _5[1], 'optionalAccess', _6 => _6.trim, 'call', _7 => _7()]) || "";
493
+ const name = _optionalChain([options, 'optionalAccess', _4 => _4.name]) || func.name;
494
+ const description = _optionalChain([options, 'optionalAccess', _5 => _5.description]) || _optionalChain([(func.toString().match(/\/\*\*([\s\S]*?)\*\//) || []), 'access', _6 => _6[1], 'optionalAccess', _7 => _7.trim, 'call', _8 => _8()]) || "";
489
495
  super({
490
496
  name,
491
497
  description,
492
- isLongRunning: _optionalChain([options, 'optionalAccess', _8 => _8.isLongRunning]) || false,
493
- shouldRetryOnFailure: _optionalChain([options, 'optionalAccess', _9 => _9.shouldRetryOnFailure]) || false,
494
- maxRetryAttempts: _optionalChain([options, 'optionalAccess', _10 => _10.maxRetryAttempts]) || 3
498
+ isLongRunning: _optionalChain([options, 'optionalAccess', _9 => _9.isLongRunning]) || false,
499
+ shouldRetryOnFailure: _optionalChain([options, 'optionalAccess', _10 => _10.shouldRetryOnFailure]) || false,
500
+ maxRetryAttempts: _optionalChain([options, 'optionalAccess', _11 => _11.maxRetryAttempts]) || 3
495
501
  });_class3.prototype.__init6.call(this);_class3.prototype.__init7.call(this);;
496
502
  this.func = func;
497
503
  this.mandatoryArgs = this.getMandatoryArgs(func);
498
- this.parameterTypes = _optionalChain([options, 'optionalAccess', _11 => _11.parameterTypes]) || {};
504
+ this.parameterTypes = _optionalChain([options, 'optionalAccess', _12 => _12.parameterTypes]) || {};
499
505
  }
500
506
  /**
501
507
  * Executes the wrapped function with the provided arguments.
502
508
  */
503
- async runAsync(args, context) {
509
+ async runAsync(args, context4) {
504
510
  try {
505
511
  const missingArgs = this.getMissingMandatoryArgs(args);
506
512
  if (missingArgs.length > 0) {
@@ -513,13 +519,13 @@ You could retry calling this tool, but it is IMPORTANT for you to provide all th
513
519
  }
514
520
  const argsToCall = { ...args };
515
521
  if (this.functionAcceptsToolContext()) {
516
- argsToCall.toolContext = context;
522
+ argsToCall.toolContext = context4;
517
523
  }
518
524
  const funcParams = this.getFunctionParameters();
519
525
  const argValues = [];
520
526
  for (const paramName of funcParams) {
521
527
  if (paramName === "toolContext" && this.functionAcceptsToolContext()) {
522
- argValues.push(context);
528
+ argValues.push(context4);
523
529
  } else if (paramName in argsToCall) {
524
530
  const convertedValue = this.convertArgumentType(
525
531
  argsToCall[paramName],
@@ -549,7 +555,7 @@ You could retry calling this tool, but it is IMPORTANT for you to provide all th
549
555
  description: this.description,
550
556
  ignoreParams: ["toolContext"]
551
557
  });
552
- if (Object.keys(this.parameterTypes).length > 0 && _optionalChain([declaration, 'access', _12 => _12.parameters, 'optionalAccess', _13 => _13.properties])) {
558
+ if (Object.keys(this.parameterTypes).length > 0 && _optionalChain([declaration, 'access', _13 => _13.parameters, 'optionalAccess', _14 => _14.properties])) {
553
559
  for (const [paramName, paramType] of Object.entries(
554
560
  this.parameterTypes
555
561
  )) {
@@ -647,9 +653,9 @@ You could retry calling this tool, but it is IMPORTANT for you to provide all th
647
653
  return this.parameterTypes[paramName].toLowerCase();
648
654
  }
649
655
  const declaration = this.getDeclaration();
650
- if (_optionalChain([declaration, 'optionalAccess', _14 => _14.parameters, 'optionalAccess', _15 => _15.properties])) {
656
+ if (_optionalChain([declaration, 'optionalAccess', _15 => _15.parameters, 'optionalAccess', _16 => _16.properties])) {
651
657
  const paramSchema = declaration.parameters.properties[paramName];
652
- if (_optionalChain([paramSchema, 'optionalAccess', _16 => _16.type])) {
658
+ if (_optionalChain([paramSchema, 'optionalAccess', _17 => _17.type])) {
653
659
  return paramSchema.type.toLowerCase();
654
660
  }
655
661
  }
@@ -735,11 +741,11 @@ var LlmRequest = class {
735
741
  */
736
742
 
737
743
  constructor(data) {
738
- this.model = _optionalChain([data, 'optionalAccess', _17 => _17.model]);
739
- this.contents = _nullishCoalesce(_optionalChain([data, 'optionalAccess', _18 => _18.contents]), () => ( []));
740
- this.config = _optionalChain([data, 'optionalAccess', _19 => _19.config]);
741
- this.liveConnectConfig = _nullishCoalesce(_optionalChain([data, 'optionalAccess', _20 => _20.liveConnectConfig]), () => ( {}));
742
- this.toolsDict = _nullishCoalesce(_optionalChain([data, 'optionalAccess', _21 => _21.toolsDict]), () => ( {}));
744
+ this.model = _optionalChain([data, 'optionalAccess', _18 => _18.model]);
745
+ this.contents = _nullishCoalesce(_optionalChain([data, 'optionalAccess', _19 => _19.contents]), () => ( []));
746
+ this.config = _optionalChain([data, 'optionalAccess', _20 => _20.config]);
747
+ this.liveConnectConfig = _nullishCoalesce(_optionalChain([data, 'optionalAccess', _21 => _21.liveConnectConfig]), () => ( {}));
748
+ this.toolsDict = _nullishCoalesce(_optionalChain([data, 'optionalAccess', _22 => _22.toolsDict]), () => ( {}));
743
749
  }
744
750
  /**
745
751
  * Appends instructions to the system instruction.
@@ -760,10 +766,10 @@ ${instructions.join("\n\n")}`;
760
766
  * @param tools The tools to append.
761
767
  */
762
768
  appendTools(tools) {
763
- if (!_optionalChain([tools, 'optionalAccess', _22 => _22.length])) return;
769
+ if (!_optionalChain([tools, 'optionalAccess', _23 => _23.length])) return;
764
770
  const declarations = [];
765
771
  for (const tool of tools) {
766
- const declaration = _optionalChain([tool, 'access', _23 => _23.getDeclaration, 'optionalCall', _24 => _24()]);
772
+ const declaration = _optionalChain([tool, 'access', _24 => _24.getDeclaration, 'optionalCall', _25 => _25()]);
767
773
  if (declaration) {
768
774
  declarations.push(declaration);
769
775
  this.toolsDict[tool.name] = tool;
@@ -790,7 +796,7 @@ ${instructions.join("\n\n")}`;
790
796
  * @returns The system instruction as a string, or undefined if not set.
791
797
  */
792
798
  getSystemInstructionText() {
793
- if (!_optionalChain([this, 'access', _25 => _25.config, 'optionalAccess', _26 => _26.systemInstruction])) {
799
+ if (!_optionalChain([this, 'access', _26 => _26.config, 'optionalAccess', _27 => _27.systemInstruction])) {
794
800
  return void 0;
795
801
  }
796
802
  const systemInstruction = this.config.systemInstruction;
@@ -818,7 +824,7 @@ ${instructions.join("\n\n")}`;
818
824
  if (Array.isArray(content)) {
819
825
  return content.map((part) => part.text || "").filter(Boolean).join("");
820
826
  }
821
- if (_optionalChain([content, 'optionalAccess', _27 => _27.parts])) {
827
+ if (_optionalChain([content, 'optionalAccess', _28 => _28.parts])) {
822
828
  return content.parts.map((part) => part.text || "").filter(Boolean).join("");
823
829
  }
824
830
  return String(content || "");
@@ -827,70 +833,23 @@ ${instructions.join("\n\n")}`;
827
833
 
828
834
  // src/models/llm-response.ts
829
835
  var LlmResponse = class _LlmResponse {
830
- /**
831
- * Unique identifier for the response.
832
- */
833
836
 
834
- /**
835
- * The content generated by the model.
836
- */
837
837
 
838
- /**
839
- * The grounding metadata of the response.
840
- */
841
838
 
842
- /**
843
- * Indicates whether the text content is part of an unfinished text stream.
844
- */
845
839
 
846
- /**
847
- * Indicates whether the response from the model is complete.
848
- */
849
840
 
850
- /**
851
- * Error code if the response is an error.
852
- */
853
841
 
854
- /**
855
- * Error message if the response is an error.
856
- */
857
842
 
858
- /**
859
- * Flag indicating that LLM was interrupted when generating the content.
860
- */
861
843
 
862
- /**
863
- * The custom metadata of the LlmResponse.
864
- */
865
844
 
866
- /**
867
- * The usage metadata of the LlmResponse.
868
- */
869
845
 
870
- /**
871
- * Index of the candidate response.
872
- */
873
846
 
874
- /**
875
- * Reason why the model finished generating.
876
- */
877
847
 
878
- /**
879
- * Error object if the response is an error.
880
- */
881
848
 
882
- /**
883
- * Creates a new LlmResponse.
884
- */
849
+
885
850
  constructor(data = {}) {
886
851
  Object.assign(this, data);
887
852
  }
888
- /**
889
- * Creates an LlmResponse from a GenerateContentResponse.
890
- *
891
- * @param generateContentResponse The GenerateContentResponse to create the LlmResponse from.
892
- * @returns The LlmResponse.
893
- */
894
853
  static create(generateContentResponse) {
895
854
  const usageMetadata = generateContentResponse.usageMetadata;
896
855
  if (generateContentResponse.candidates && generateContentResponse.candidates.length > 0) {
@@ -922,15 +881,6 @@ var LlmResponse = class _LlmResponse {
922
881
  usageMetadata
923
882
  });
924
883
  }
925
- /**
926
- * Creates an LlmResponse from an error.
927
- *
928
- * @param error The error object or message.
929
- * @param options Additional options for the error response.
930
- * @param options.errorCode A specific error code for the response.
931
- * @param options.model The model that was being used when the error occurred.
932
- * @returns The LlmResponse.
933
- */
934
884
  static fromError(error, options = {}) {
935
885
  const errorMessage = error instanceof Error ? error.message : String(error);
936
886
  const errorCode = options.errorCode || "UNKNOWN_ERROR";
@@ -956,6 +906,7 @@ init_logger();
956
906
 
957
907
 
958
908
 
909
+
959
910
  var _api = require('@opentelemetry/api');
960
911
  var _autoinstrumentationsnode = require('@opentelemetry/auto-instrumentations-node');
961
912
  var _exportertraceotlphttp = require('@opentelemetry/exporter-trace-otlp-http');
@@ -994,13 +945,24 @@ var TelemetryService = (_class4 = class {
994
945
  this.sdk = new (0, _sdknode.NodeSDK)({
995
946
  resource,
996
947
  traceExporter,
997
- instrumentations: [_autoinstrumentationsnode.getNodeAutoInstrumentations.call(void 0, )]
948
+ instrumentations: [
949
+ _autoinstrumentationsnode.getNodeAutoInstrumentations.call(void 0, {
950
+ // Follow Python ADK approach: let all HTTP instrumentation through.
951
+ // This provides transparency and aligns with standard OpenTelemetry behavior.
952
+ // High-level LLM tracing is provided through dedicated ADK spans.
953
+ "@opentelemetry/instrumentation-http": {
954
+ ignoreIncomingRequestHook: (req) => {
955
+ return true;
956
+ }
957
+ }
958
+ })
959
+ ]
998
960
  });
999
961
  try {
1000
962
  this.sdk.start();
1001
963
  this.isInitialized = true;
1002
964
  this.tracer = _api.trace.getTracer("iqai-adk", config.appVersion || "0.1.0");
1003
- _api.diag.info("OpenTelemetry SDK started successfully.");
965
+ _api.diag.debug("OpenTelemetry SDK started successfully.");
1004
966
  } catch (error) {
1005
967
  _api.diag.error("Error starting OpenTelemetry SDK:", error);
1006
968
  throw error;
@@ -1043,7 +1005,7 @@ var TelemetryService = (_class4 = class {
1043
1005
  });
1044
1006
  await Promise.race([this.sdk.shutdown(), timeoutPromise]);
1045
1007
  this.isInitialized = false;
1046
- _api.diag.info("Telemetry terminated successfully.");
1008
+ _api.diag.debug("Telemetry terminated successfully.");
1047
1009
  } catch (error) {
1048
1010
  if (error instanceof Error && error.message.includes("timeout")) {
1049
1011
  _api.diag.warn("Telemetry shutdown timed out, some traces may be lost");
@@ -1063,7 +1025,7 @@ var TelemetryService = (_class4 = class {
1063
1025
  if (!span) return;
1064
1026
  let toolCallId = "<not specified>";
1065
1027
  let toolResponse = "<not specified>";
1066
- if (_optionalChain([functionResponseEvent, 'access', _28 => _28.content, 'optionalAccess', _29 => _29.parts]) && functionResponseEvent.content.parts.length > 0) {
1028
+ if (_optionalChain([functionResponseEvent, 'access', _29 => _29.content, 'optionalAccess', _30 => _30.parts]) && functionResponseEvent.content.parts.length > 0) {
1067
1029
  const functionResponse = functionResponseEvent.content.parts[0].functionResponse;
1068
1030
  if (functionResponse) {
1069
1031
  toolCallId = functionResponse.id || "<not specified>";
@@ -1071,7 +1033,7 @@ var TelemetryService = (_class4 = class {
1071
1033
  }
1072
1034
  }
1073
1035
  span.setAttributes({
1074
- "gen_ai.system.name": "iqai-adk",
1036
+ "gen_ai.system": "iqai-adk",
1075
1037
  "gen_ai.operation.name": "execute_tool",
1076
1038
  "gen_ai.tool.name": tool.name,
1077
1039
  "gen_ai.tool.description": tool.description,
@@ -1085,7 +1047,7 @@ var TelemetryService = (_class4 = class {
1085
1047
  ...process.env.NODE_ENV && {
1086
1048
  "deployment.environment.name": process.env.NODE_ENV
1087
1049
  },
1088
- // Tool-specific data
1050
+ // ADK-specific attributes (matching Python namespace pattern)
1089
1051
  "adk.tool_call_args": this._safeJsonStringify(args),
1090
1052
  "adk.event_id": functionResponseEvent.invocationId,
1091
1053
  "adk.tool_response": this._safeJsonStringify(toolResponse),
@@ -1101,9 +1063,8 @@ var TelemetryService = (_class4 = class {
1101
1063
  if (!span) return;
1102
1064
  const requestData = this._buildLlmRequestForTrace(llmRequest);
1103
1065
  span.setAttributes({
1104
- // Standard OpenTelemetry attributes
1105
- "gen_ai.system.name": "iqai-adk",
1106
- "gen_ai.operation.name": "generate",
1066
+ // Standard OpenTelemetry attributes (following Python pattern)
1067
+ "gen_ai.system": "iqai-adk",
1107
1068
  "gen_ai.request.model": llmRequest.model,
1108
1069
  // Session and user tracking (maps to Langfuse sessionId, userId)
1109
1070
  "session.id": invocationContext.session.id,
@@ -1116,15 +1077,21 @@ var TelemetryService = (_class4 = class {
1116
1077
  "gen_ai.request.max_tokens": llmRequest.config.maxOutputTokens || 0,
1117
1078
  "gen_ai.request.temperature": llmRequest.config.temperature || 0,
1118
1079
  "gen_ai.request.top_p": llmRequest.config.topP || 0,
1119
- // Legacy ADK attributes (keep for backward compatibility)
1120
1080
  "adk.system_name": "iqai-adk",
1121
1081
  "adk.request_model": llmRequest.model,
1122
- "adk.invocation_id": invocationContext.session.id,
1082
+ // ADK-specific attributes (matching Python namespace pattern)
1083
+ "adk.invocation_id": invocationContext.invocationId,
1123
1084
  "adk.session_id": invocationContext.session.id,
1124
1085
  "adk.event_id": eventId,
1125
1086
  "adk.llm_request": this._safeJsonStringify(requestData),
1126
1087
  "adk.llm_response": this._safeJsonStringify(llmResponse)
1127
1088
  });
1089
+ if (llmResponse.usageMetadata) {
1090
+ span.setAttributes({
1091
+ "gen_ai.usage.input_tokens": llmResponse.usageMetadata.promptTokenCount || 0,
1092
+ "gen_ai.usage.output_tokens": llmResponse.usageMetadata.candidatesTokenCount || 0
1093
+ });
1094
+ }
1128
1095
  span.addEvent("gen_ai.content.prompt", {
1129
1096
  "gen_ai.prompt": this._safeJsonStringify(requestData.messages)
1130
1097
  });
@@ -1137,9 +1104,14 @@ var TelemetryService = (_class4 = class {
1137
1104
  */
1138
1105
  async *traceAsyncGenerator(spanName, generator) {
1139
1106
  const span = this.tracer.startSpan(spanName);
1107
+ const spanContext = _api.trace.setSpan(_api.context.active(), span);
1140
1108
  try {
1141
- for await (const item of generator) {
1142
- yield item;
1109
+ while (true) {
1110
+ const result = await _api.context.with(spanContext, () => generator.next());
1111
+ if (result.done) {
1112
+ break;
1113
+ }
1114
+ yield result.value;
1143
1115
  }
1144
1116
  } catch (error) {
1145
1117
  span.recordException(error);
@@ -1171,7 +1143,7 @@ var TelemetryService = (_class4 = class {
1171
1143
  contents: []
1172
1144
  };
1173
1145
  for (const content of llmRequest.contents || []) {
1174
- const parts = _optionalChain([content, 'access', _30 => _30.parts, 'optionalAccess', _31 => _31.filter, 'call', _32 => _32((part) => !part.inlineData)]) || [];
1146
+ const parts = _optionalChain([content, 'access', _31 => _31.parts, 'optionalAccess', _32 => _32.filter, 'call', _33 => _33((part) => !part.inlineData)]) || [];
1175
1147
  result.contents.push({
1176
1148
  role: content.role,
1177
1149
  parts
@@ -1226,7 +1198,7 @@ var traceLlmCall = (invocationContext, eventId, llmRequest, llmResponse) => tele
1226
1198
  // src/models/base-llm.ts
1227
1199
  var BaseLlm = (_class5 = class {
1228
1200
  /**
1229
- * The name of the LLM, e.g. gemini-1.5-flash or gemini-1.5-flash-001.
1201
+ * The name of the LLM, e.g. gemini-2.5-flash or gemini-2.5-flash-001.
1230
1202
  */
1231
1203
 
1232
1204
  __init11() {this.logger = new Logger({ name: "BaseLlm" })}
@@ -1265,14 +1237,14 @@ var BaseLlm = (_class5 = class {
1265
1237
  "gen_ai.system.name": "iqai-adk",
1266
1238
  "gen_ai.operation.name": "generate",
1267
1239
  "gen_ai.request.model": this.model,
1268
- "gen_ai.request.max_tokens": _optionalChain([llmRequest, 'access', _33 => _33.config, 'optionalAccess', _34 => _34.maxOutputTokens]) || 0,
1269
- "gen_ai.request.temperature": _optionalChain([llmRequest, 'access', _35 => _35.config, 'optionalAccess', _36 => _36.temperature]) || 0,
1270
- "gen_ai.request.top_p": _optionalChain([llmRequest, 'access', _37 => _37.config, 'optionalAccess', _38 => _38.topP]) || 0,
1240
+ "gen_ai.request.max_tokens": _optionalChain([llmRequest, 'access', _34 => _34.config, 'optionalAccess', _35 => _35.maxOutputTokens]) || 0,
1241
+ "gen_ai.request.temperature": _optionalChain([llmRequest, 'access', _36 => _36.config, 'optionalAccess', _37 => _37.temperature]) || 0,
1242
+ "gen_ai.request.top_p": _optionalChain([llmRequest, 'access', _38 => _38.config, 'optionalAccess', _39 => _39.topP]) || 0,
1271
1243
  "adk.llm_request": JSON.stringify({
1272
1244
  model: this.model,
1273
- contents: _optionalChain([llmRequest, 'access', _39 => _39.contents, 'optionalAccess', _40 => _40.map, 'call', _41 => _41((content) => ({
1245
+ contents: _optionalChain([llmRequest, 'access', _40 => _40.contents, 'optionalAccess', _41 => _41.map, 'call', _42 => _42((content) => ({
1274
1246
  role: content.role,
1275
- parts: _optionalChain([content, 'access', _42 => _42.parts, 'optionalAccess', _43 => _43.map, 'call', _44 => _44((part) => ({
1247
+ parts: _optionalChain([content, 'access', _43 => _43.parts, 'optionalAccess', _44 => _44.map, 'call', _45 => _45((part) => ({
1276
1248
  text: typeof part.text === "string" ? part.text.substring(0, 200) + (part.text.length > 200 ? "..." : "") : "[non_text_content]"
1277
1249
  }))])
1278
1250
  }))]),
@@ -1396,9 +1368,9 @@ var AiSdkLlm = (_class6 = class extends BaseLlm {
1396
1368
  messages,
1397
1369
  system: systemMessage,
1398
1370
  tools: Object.keys(tools).length > 0 ? tools : void 0,
1399
- maxTokens: _optionalChain([request, 'access', _45 => _45.config, 'optionalAccess', _46 => _46.maxOutputTokens]),
1400
- temperature: _optionalChain([request, 'access', _47 => _47.config, 'optionalAccess', _48 => _48.temperature]),
1401
- topP: _optionalChain([request, 'access', _49 => _49.config, 'optionalAccess', _50 => _50.topP])
1371
+ maxTokens: _optionalChain([request, 'access', _46 => _46.config, 'optionalAccess', _47 => _47.maxOutputTokens]),
1372
+ temperature: _optionalChain([request, 'access', _48 => _48.config, 'optionalAccess', _49 => _49.temperature]),
1373
+ topP: _optionalChain([request, 'access', _50 => _50.config, 'optionalAccess', _51 => _51.topP])
1402
1374
  };
1403
1375
  if (stream) {
1404
1376
  const result = _ai.streamText.call(void 0, requestParams);
@@ -1538,7 +1510,7 @@ var AiSdkLlm = (_class6 = class extends BaseLlm {
1538
1510
  */
1539
1511
  convertToAiSdkTools(llmRequest) {
1540
1512
  const tools = {};
1541
- if (_optionalChain([llmRequest, 'access', _51 => _51.config, 'optionalAccess', _52 => _52.tools])) {
1513
+ if (_optionalChain([llmRequest, 'access', _52 => _52.config, 'optionalAccess', _53 => _53.tools])) {
1542
1514
  for (const toolConfig of llmRequest.config.tools) {
1543
1515
  if ("functionDeclarations" in toolConfig) {
1544
1516
  for (const funcDecl of toolConfig.functionDeclarations) {
@@ -1572,7 +1544,7 @@ var AiSdkLlm = (_class6 = class extends BaseLlm {
1572
1544
  }
1573
1545
  return { role: "user", content: textContent };
1574
1546
  }
1575
- if (_optionalChain([content, 'access', _53 => _53.parts, 'optionalAccess', _54 => _54.some, 'call', _55 => _55((part) => part.functionCall)])) {
1547
+ if (_optionalChain([content, 'access', _54 => _54.parts, 'optionalAccess', _55 => _55.some, 'call', _56 => _56((part) => part.functionCall)])) {
1576
1548
  const textParts = content.parts.filter((part) => part.text);
1577
1549
  const functionCalls = content.parts.filter((part) => part.functionCall);
1578
1550
  const contentParts2 = [];
@@ -1599,7 +1571,7 @@ var AiSdkLlm = (_class6 = class extends BaseLlm {
1599
1571
  content: contentParts2
1600
1572
  };
1601
1573
  }
1602
- if (_optionalChain([content, 'access', _56 => _56.parts, 'optionalAccess', _57 => _57.some, 'call', _58 => _58((part) => part.functionResponse)])) {
1574
+ if (_optionalChain([content, 'access', _57 => _57.parts, 'optionalAccess', _58 => _58.some, 'call', _59 => _59((part) => part.functionResponse)])) {
1603
1575
  const functionResponses = content.parts.filter(
1604
1576
  (part) => part.functionResponse
1605
1577
  );
@@ -1704,7 +1676,7 @@ var AnthropicLlm = (_class7 = class extends BaseLlm {
1704
1676
  (content) => this.contentToAnthropicMessage(content)
1705
1677
  );
1706
1678
  let tools;
1707
- if (_optionalChain([llmRequest, 'access', _59 => _59.config, 'optionalAccess', _60 => _60.tools, 'optionalAccess', _61 => _61[0], 'optionalAccess', _62 => _62.functionDeclarations])) {
1679
+ if (_optionalChain([llmRequest, 'access', _60 => _60.config, 'optionalAccess', _61 => _61.tools, 'optionalAccess', _62 => _62[0], 'optionalAccess', _63 => _63.functionDeclarations])) {
1708
1680
  tools = llmRequest.config.tools[0].functionDeclarations.map(
1709
1681
  (decl) => this.functionDeclarationToAnthropicTool(decl)
1710
1682
  );
@@ -1726,9 +1698,9 @@ var AnthropicLlm = (_class7 = class extends BaseLlm {
1726
1698
  messages: anthropicMessages,
1727
1699
  tools,
1728
1700
  tool_choice: tools ? { type: "auto" } : void 0,
1729
- max_tokens: _optionalChain([llmRequest, 'access', _63 => _63.config, 'optionalAccess', _64 => _64.maxOutputTokens]) || MAX_TOKENS,
1730
- temperature: _optionalChain([llmRequest, 'access', _65 => _65.config, 'optionalAccess', _66 => _66.temperature]),
1731
- top_p: _optionalChain([llmRequest, 'access', _67 => _67.config, 'optionalAccess', _68 => _68.topP])
1701
+ max_tokens: _optionalChain([llmRequest, 'access', _64 => _64.config, 'optionalAccess', _65 => _65.maxOutputTokens]) || MAX_TOKENS,
1702
+ temperature: _optionalChain([llmRequest, 'access', _66 => _66.config, 'optionalAccess', _67 => _67.temperature]),
1703
+ top_p: _optionalChain([llmRequest, 'access', _68 => _68.config, 'optionalAccess', _69 => _69.topP])
1732
1704
  });
1733
1705
  yield this.anthropicMessageToLlmResponse(message);
1734
1706
  }
@@ -1789,7 +1761,7 @@ var AnthropicLlm = (_class7 = class extends BaseLlm {
1789
1761
  }
1790
1762
  if (part.function_response) {
1791
1763
  let content = "";
1792
- if (_optionalChain([part, 'access', _69 => _69.function_response, 'access', _70 => _70.response, 'optionalAccess', _71 => _71.result])) {
1764
+ if (_optionalChain([part, 'access', _70 => _70.function_response, 'access', _71 => _71.response, 'optionalAccess', _72 => _72.result])) {
1793
1765
  content = String(part.function_response.response.result);
1794
1766
  }
1795
1767
  return {
@@ -1824,7 +1796,7 @@ var AnthropicLlm = (_class7 = class extends BaseLlm {
1824
1796
  */
1825
1797
  functionDeclarationToAnthropicTool(functionDeclaration) {
1826
1798
  const properties = {};
1827
- if (_optionalChain([functionDeclaration, 'access', _72 => _72.parameters, 'optionalAccess', _73 => _73.properties])) {
1799
+ if (_optionalChain([functionDeclaration, 'access', _73 => _73.parameters, 'optionalAccess', _74 => _74.properties])) {
1828
1800
  for (const [key, value] of Object.entries(
1829
1801
  functionDeclaration.parameters.properties
1830
1802
  )) {
@@ -1915,7 +1887,7 @@ var GoogleLlm = class extends BaseLlm {
1915
1887
  /**
1916
1888
  * Constructor for Gemini
1917
1889
  */
1918
- constructor(model = "gemini-1.5-flash") {
1890
+ constructor(model = "gemini-2.5-flash") {
1919
1891
  super(model);
1920
1892
  }
1921
1893
  /**
@@ -1952,7 +1924,7 @@ var GoogleLlm = class extends BaseLlm {
1952
1924
  response = resp;
1953
1925
  const llmResponse = LlmResponse.create(resp);
1954
1926
  usageMetadata = llmResponse.usageMetadata;
1955
- if (_optionalChain([llmResponse, 'access', _74 => _74.content, 'optionalAccess', _75 => _75.parts, 'optionalAccess', _76 => _76[0], 'optionalAccess', _77 => _77.text])) {
1927
+ if (_optionalChain([llmResponse, 'access', _75 => _75.content, 'optionalAccess', _76 => _76.parts, 'optionalAccess', _77 => _77[0], 'optionalAccess', _78 => _78.text])) {
1956
1928
  const part0 = llmResponse.content.parts[0];
1957
1929
  if (part0.thought) {
1958
1930
  thoughtText += part0.text;
@@ -1980,7 +1952,7 @@ var GoogleLlm = class extends BaseLlm {
1980
1952
  }
1981
1953
  yield llmResponse;
1982
1954
  }
1983
- if ((text || thoughtText) && response && response.candidates && _optionalChain([response, 'access', _78 => _78.candidates, 'access', _79 => _79[0], 'optionalAccess', _80 => _80.finishReason]) === _genai.FinishReason.STOP) {
1955
+ if ((text || thoughtText) && response && response.candidates && _optionalChain([response, 'access', _79 => _79.candidates, 'access', _80 => _80[0], 'optionalAccess', _81 => _81.finishReason]) === _genai.FinishReason.STOP) {
1984
1956
  const parts = [];
1985
1957
  if (thoughtText) {
1986
1958
  parts.push({ text: thoughtText, thought: true });
@@ -2004,7 +1976,7 @@ var GoogleLlm = class extends BaseLlm {
2004
1976
  });
2005
1977
  const llmResponse = LlmResponse.create(response);
2006
1978
  this.logger.debug(
2007
- `Google response: ${_optionalChain([llmResponse, 'access', _81 => _81.usageMetadata, 'optionalAccess', _82 => _82.candidatesTokenCount]) || 0} tokens`
1979
+ `Google response: ${_optionalChain([llmResponse, 'access', _82 => _82.usageMetadata, 'optionalAccess', _83 => _83.candidatesTokenCount]) || 0} tokens`
2008
1980
  );
2009
1981
  yield llmResponse;
2010
1982
  }
@@ -2019,8 +1991,8 @@ var GoogleLlm = class extends BaseLlm {
2019
1991
  * Check if response has inline data
2020
1992
  */
2021
1993
  hasInlineData(response) {
2022
- const parts = _optionalChain([response, 'access', _83 => _83.candidates, 'optionalAccess', _84 => _84[0], 'optionalAccess', _85 => _85.content, 'optionalAccess', _86 => _86.parts]);
2023
- return _optionalChain([parts, 'optionalAccess', _87 => _87.some, 'call', _88 => _88((part) => _optionalChain([part, 'optionalAccess', _89 => _89.inlineData]))]) || false;
1994
+ const parts = _optionalChain([response, 'access', _84 => _84.candidates, 'optionalAccess', _85 => _85[0], 'optionalAccess', _86 => _86.content, 'optionalAccess', _87 => _87.parts]);
1995
+ return _optionalChain([parts, 'optionalAccess', _88 => _88.some, 'call', _89 => _89((part) => _optionalChain([part, 'optionalAccess', _90 => _90.inlineData]))]) || false;
2024
1996
  }
2025
1997
  /**
2026
1998
  * Convert LlmRequest contents to GoogleGenAI format
@@ -2054,7 +2026,7 @@ var GoogleLlm = class extends BaseLlm {
2054
2026
  * Sets display_name to null for the Gemini API (non-Vertex) backend.
2055
2027
  */
2056
2028
  removeDisplayNameIfPresent(dataObj) {
2057
- if (_optionalChain([dataObj, 'optionalAccess', _90 => _90.displayName])) {
2029
+ if (_optionalChain([dataObj, 'optionalAccess', _91 => _91.displayName])) {
2058
2030
  dataObj.displayName = null;
2059
2031
  }
2060
2032
  }
@@ -2063,7 +2035,7 @@ var GoogleLlm = class extends BaseLlm {
2063
2035
  */
2064
2036
  buildFunctionDeclarationLog(funcDecl) {
2065
2037
  let paramStr = "{}";
2066
- if (_optionalChain([funcDecl, 'access', _91 => _91.parameters, 'optionalAccess', _92 => _92.properties])) {
2038
+ if (_optionalChain([funcDecl, 'access', _92 => _92.parameters, 'optionalAccess', _93 => _93.properties])) {
2067
2039
  paramStr = JSON.stringify(funcDecl.parameters.properties);
2068
2040
  }
2069
2041
  return `${funcDecl.name}: ${paramStr}`;
@@ -2184,7 +2156,7 @@ var OpenAiLlm = class extends BaseLlm {
2184
2156
  (content) => this.contentToOpenAiMessage(content)
2185
2157
  );
2186
2158
  let tools;
2187
- if (_optionalChain([llmRequest, 'access', _93 => _93.config, 'optionalAccess', _94 => _94.tools, 'optionalAccess', _95 => _95[0], 'optionalAccess', _96 => _96.functionDeclarations])) {
2159
+ if (_optionalChain([llmRequest, 'access', _94 => _94.config, 'optionalAccess', _95 => _95.tools, 'optionalAccess', _96 => _96[0], 'optionalAccess', _97 => _97.functionDeclarations])) {
2188
2160
  tools = llmRequest.config.tools[0].functionDeclarations.map(
2189
2161
  (funcDecl) => this.functionDeclarationToOpenAiTool(funcDecl)
2190
2162
  );
@@ -2202,9 +2174,9 @@ var OpenAiLlm = class extends BaseLlm {
2202
2174
  messages: openAiMessages,
2203
2175
  tools,
2204
2176
  tool_choice: tools ? "auto" : void 0,
2205
- max_tokens: _optionalChain([llmRequest, 'access', _97 => _97.config, 'optionalAccess', _98 => _98.maxOutputTokens]),
2206
- temperature: _optionalChain([llmRequest, 'access', _99 => _99.config, 'optionalAccess', _100 => _100.temperature]),
2207
- top_p: _optionalChain([llmRequest, 'access', _101 => _101.config, 'optionalAccess', _102 => _102.topP]),
2177
+ max_tokens: _optionalChain([llmRequest, 'access', _98 => _98.config, 'optionalAccess', _99 => _99.maxOutputTokens]),
2178
+ temperature: _optionalChain([llmRequest, 'access', _100 => _100.config, 'optionalAccess', _101 => _101.temperature]),
2179
+ top_p: _optionalChain([llmRequest, 'access', _102 => _102.config, 'optionalAccess', _103 => _103.topP]),
2208
2180
  stream
2209
2181
  };
2210
2182
  if (stream) {
@@ -2224,7 +2196,7 @@ var OpenAiLlm = class extends BaseLlm {
2224
2196
  if (chunk.usage) {
2225
2197
  usageMetadata = chunk.usage;
2226
2198
  }
2227
- if (_optionalChain([llmResponse, 'access', _103 => _103.content, 'optionalAccess', _104 => _104.parts, 'optionalAccess', _105 => _105[0], 'optionalAccess', _106 => _106.text])) {
2199
+ if (_optionalChain([llmResponse, 'access', _104 => _104.content, 'optionalAccess', _105 => _105.parts, 'optionalAccess', _106 => _106[0], 'optionalAccess', _107 => _107.text])) {
2228
2200
  const part0 = llmResponse.content.parts[0];
2229
2201
  if (part0.thought) {
2230
2202
  thoughtText += part0.text;
@@ -2265,10 +2237,10 @@ var OpenAiLlm = class extends BaseLlm {
2265
2237
  function: { name: "", arguments: "" }
2266
2238
  };
2267
2239
  }
2268
- if (_optionalChain([toolCall, 'access', _107 => _107.function, 'optionalAccess', _108 => _108.name])) {
2240
+ if (_optionalChain([toolCall, 'access', _108 => _108.function, 'optionalAccess', _109 => _109.name])) {
2269
2241
  accumulatedToolCalls[index].function.name += toolCall.function.name;
2270
2242
  }
2271
- if (_optionalChain([toolCall, 'access', _109 => _109.function, 'optionalAccess', _110 => _110.arguments])) {
2243
+ if (_optionalChain([toolCall, 'access', _110 => _110.function, 'optionalAccess', _111 => _111.arguments])) {
2272
2244
  accumulatedToolCalls[index].function.arguments += toolCall.function.arguments;
2273
2245
  }
2274
2246
  }
@@ -2283,7 +2255,7 @@ var OpenAiLlm = class extends BaseLlm {
2283
2255
  }
2284
2256
  if (accumulatedToolCalls.length > 0) {
2285
2257
  for (const toolCall of accumulatedToolCalls) {
2286
- if (_optionalChain([toolCall, 'access', _111 => _111.function, 'optionalAccess', _112 => _112.name])) {
2258
+ if (_optionalChain([toolCall, 'access', _112 => _112.function, 'optionalAccess', _113 => _113.name])) {
2287
2259
  parts.push({
2288
2260
  functionCall: {
2289
2261
  id: toolCall.id,
@@ -2343,7 +2315,7 @@ var OpenAiLlm = class extends BaseLlm {
2343
2315
  response.usage
2344
2316
  );
2345
2317
  this.logger.debug(
2346
- `OpenAI response: ${_optionalChain([response, 'access', _113 => _113.usage, 'optionalAccess', _114 => _114.completion_tokens]) || 0} tokens`
2318
+ `OpenAI response: ${_optionalChain([response, 'access', _114 => _114.usage, 'optionalAccess', _115 => _115.completion_tokens]) || 0} tokens`
2347
2319
  );
2348
2320
  yield llmResponse;
2349
2321
  }
@@ -2370,7 +2342,7 @@ var OpenAiLlm = class extends BaseLlm {
2370
2342
  }
2371
2343
  if (delta.tool_calls) {
2372
2344
  for (const toolCall of delta.tool_calls) {
2373
- if (toolCall.type === "function" && _optionalChain([toolCall, 'access', _115 => _115.function, 'optionalAccess', _116 => _116.name])) {
2345
+ if (toolCall.type === "function" && _optionalChain([toolCall, 'access', _116 => _116.function, 'optionalAccess', _117 => _117.name])) {
2374
2346
  parts.push({
2375
2347
  functionCall: {
2376
2348
  id: toolCall.id || "",
@@ -2436,10 +2408,10 @@ var OpenAiLlm = class extends BaseLlm {
2436
2408
  if (role === "system") {
2437
2409
  return {
2438
2410
  role: "system",
2439
- content: _optionalChain([content, 'access', _117 => _117.parts, 'optionalAccess', _118 => _118[0], 'optionalAccess', _119 => _119.text]) || ""
2411
+ content: _optionalChain([content, 'access', _118 => _118.parts, 'optionalAccess', _119 => _119[0], 'optionalAccess', _120 => _120.text]) || ""
2440
2412
  };
2441
2413
  }
2442
- if (_optionalChain([content, 'access', _120 => _120.parts, 'optionalAccess', _121 => _121.some, 'call', _122 => _122((part) => part.functionCall)])) {
2414
+ if (_optionalChain([content, 'access', _121 => _121.parts, 'optionalAccess', _122 => _122.some, 'call', _123 => _123((part) => part.functionCall)])) {
2443
2415
  const functionCallPart = content.parts.find(
2444
2416
  (part) => part.functionCall
2445
2417
  );
@@ -2459,7 +2431,7 @@ var OpenAiLlm = class extends BaseLlm {
2459
2431
  ]
2460
2432
  };
2461
2433
  }
2462
- if (_optionalChain([content, 'access', _123 => _123.parts, 'optionalAccess', _124 => _124.some, 'call', _125 => _125((part) => part.functionResponse)])) {
2434
+ if (_optionalChain([content, 'access', _124 => _124.parts, 'optionalAccess', _125 => _125.some, 'call', _126 => _126((part) => part.functionResponse)])) {
2463
2435
  const functionResponsePart = content.parts.find(
2464
2436
  (part) => part.functionResponse
2465
2437
  );
@@ -2471,7 +2443,7 @@ var OpenAiLlm = class extends BaseLlm {
2471
2443
  )
2472
2444
  };
2473
2445
  }
2474
- if (_optionalChain([content, 'access', _126 => _126.parts, 'optionalAccess', _127 => _127.length]) === 1 && content.parts[0].text) {
2446
+ if (_optionalChain([content, 'access', _127 => _127.parts, 'optionalAccess', _128 => _128.length]) === 1 && content.parts[0].text) {
2475
2447
  return {
2476
2448
  role,
2477
2449
  content: content.parts[0].text
@@ -2494,7 +2466,7 @@ var OpenAiLlm = class extends BaseLlm {
2494
2466
  text: part.text
2495
2467
  };
2496
2468
  }
2497
- if (_optionalChain([part, 'access', _128 => _128.inline_data, 'optionalAccess', _129 => _129.mime_type]) && _optionalChain([part, 'access', _130 => _130.inline_data, 'optionalAccess', _131 => _131.data])) {
2469
+ if (_optionalChain([part, 'access', _129 => _129.inline_data, 'optionalAccess', _130 => _130.mime_type]) && _optionalChain([part, 'access', _131 => _131.inline_data, 'optionalAccess', _132 => _132.data])) {
2498
2470
  return {
2499
2471
  type: "image_url",
2500
2472
  image_url: {
@@ -2622,8 +2594,8 @@ var OpenAiLlm = class extends BaseLlm {
2622
2594
  * Check if response has inline data (similar to Google LLM)
2623
2595
  */
2624
2596
  hasInlineData(response) {
2625
- const parts = _optionalChain([response, 'access', _132 => _132.content, 'optionalAccess', _133 => _133.parts]);
2626
- return _optionalChain([parts, 'optionalAccess', _134 => _134.some, 'call', _135 => _135((part) => part.inlineData)]) || false;
2597
+ const parts = _optionalChain([response, 'access', _133 => _133.content, 'optionalAccess', _134 => _134.parts]);
2598
+ return _optionalChain([parts, 'optionalAccess', _135 => _135.some, 'call', _136 => _136((part) => part.inlineData)]) || false;
2627
2599
  }
2628
2600
  /**
2629
2601
  * Gets the OpenAI client
@@ -2647,30 +2619,16 @@ var OpenAiLlm = class extends BaseLlm {
2647
2619
  // src/models/llm-registry.ts
2648
2620
  init_logger();
2649
2621
  var LLMRegistry = (_class8 = class _LLMRegistry {
2650
- /**
2651
- * Map of model name regex to LLM class
2652
- */
2653
2622
  static __initStatic() {this.llmRegistry = /* @__PURE__ */ new Map()}
2654
- static __initStatic2() {this.logger = new Logger({ name: "LLMRegistry" })}
2655
- /**
2656
- * Creates a new LLM instance
2657
- *
2658
- * @param model The model name
2659
- * @returns The LLM instance
2660
- */
2623
+ static __initStatic2() {this.modelInstances = /* @__PURE__ */ new Map()}
2624
+ static __initStatic3() {this.logger = new Logger({ name: "LLMRegistry" })}
2661
2625
  static newLLM(model) {
2662
2626
  const llmClass = _LLMRegistry.resolve(model);
2663
2627
  if (!llmClass) {
2664
- throw new Error(`No LLM found for model: ${model}`);
2628
+ throw new Error(`No LLM class found for model: ${model}`);
2665
2629
  }
2666
2630
  return new llmClass(model);
2667
2631
  }
2668
- /**
2669
- * Resolves the LLM class from the model name
2670
- *
2671
- * @param model The model name
2672
- * @returns The LLM class
2673
- */
2674
2632
  static resolve(model) {
2675
2633
  for (const [regex, llmClass] of _LLMRegistry.llmRegistry.entries()) {
2676
2634
  if (regex.test(model)) {
@@ -2679,36 +2637,56 @@ var LLMRegistry = (_class8 = class _LLMRegistry {
2679
2637
  }
2680
2638
  return null;
2681
2639
  }
2682
- /**
2683
- * Registers a new LLM class
2684
- *
2685
- * @param modelNameRegex The regex to match model names
2686
- * @param llmClass The LLM class
2687
- */
2688
2640
  static register(modelNameRegex, llmClass) {
2689
2641
  _LLMRegistry.llmRegistry.set(new RegExp(modelNameRegex), llmClass);
2690
2642
  }
2691
- /**
2692
- * Registers all model patterns from an LLM class
2693
- *
2694
- * @param llmClass The LLM class
2695
- */
2696
2643
  static registerLLM(llmClass) {
2697
2644
  const modelPatterns = llmClass.supportedModels();
2698
2645
  for (const pattern of modelPatterns) {
2699
2646
  _LLMRegistry.register(pattern, llmClass);
2700
2647
  }
2701
2648
  }
2702
- /**
2703
- * Logs all registered models for debugging
2704
- */
2649
+ static registerModel(name, model) {
2650
+ _LLMRegistry.modelInstances.set(name, model);
2651
+ }
2652
+ static getModel(name) {
2653
+ const model = _LLMRegistry.modelInstances.get(name);
2654
+ if (!model) {
2655
+ throw new Error(`Model '${name}' not found in registry`);
2656
+ }
2657
+ return model;
2658
+ }
2659
+ static hasModel(name) {
2660
+ return _LLMRegistry.modelInstances.has(name);
2661
+ }
2662
+ static unregisterModel(name) {
2663
+ _LLMRegistry.modelInstances.delete(name);
2664
+ }
2665
+ static getModelOrCreate(name) {
2666
+ if (_LLMRegistry.hasModel(name)) {
2667
+ return _LLMRegistry.getModel(name);
2668
+ }
2669
+ return _LLMRegistry.newLLM(name);
2670
+ }
2671
+ static clear() {
2672
+ _LLMRegistry.llmRegistry.clear();
2673
+ _LLMRegistry.modelInstances.clear();
2674
+ }
2675
+ static clearModels() {
2676
+ _LLMRegistry.modelInstances.clear();
2677
+ }
2678
+ static clearClasses() {
2679
+ _LLMRegistry.llmRegistry.clear();
2680
+ }
2705
2681
  static logRegisteredModels() {
2706
- _LLMRegistry.logger.debug(
2707
- "Registered LLM models:",
2708
- [..._LLMRegistry.llmRegistry.entries()].map(([regex]) => regex.toString())
2682
+ const classPatterns = [..._LLMRegistry.llmRegistry.entries()].map(
2683
+ ([regex]) => regex.toString()
2709
2684
  );
2685
+ const instanceNames = [..._LLMRegistry.modelInstances.keys()];
2686
+ _LLMRegistry.logger.debug("Registered LLM class patterns:", classPatterns);
2687
+ _LLMRegistry.logger.debug("Registered LLM instances:", instanceNames);
2710
2688
  }
2711
- }, _class8.__initStatic(), _class8.__initStatic2(), _class8);
2689
+ }, _class8.__initStatic(), _class8.__initStatic2(), _class8.__initStatic3(), _class8);
2712
2690
 
2713
2691
  // src/models/registry.ts
2714
2692
  function registerProviders() {
@@ -2925,7 +2903,7 @@ var OAuth2Credential = class extends AuthCredential {
2925
2903
  "Cannot refresh token: no refresh token or refresh function"
2926
2904
  );
2927
2905
  }
2928
- const result = await _optionalChain([this, 'access', _136 => _136.refreshFunction, 'optionalCall', _137 => _137(this.refreshToken)]);
2906
+ const result = await _optionalChain([this, 'access', _137 => _137.refreshFunction, 'optionalCall', _138 => _138(this.refreshToken)]);
2929
2907
  if (!result) {
2930
2908
  throw new Error("Failed to refresh token");
2931
2909
  }
@@ -2960,7 +2938,7 @@ var AuthHandler = class {
2960
2938
  * Gets the authentication token
2961
2939
  */
2962
2940
  getToken() {
2963
- return _optionalChain([this, 'access', _138 => _138.credential, 'optionalAccess', _139 => _139.getToken, 'call', _140 => _140()]);
2941
+ return _optionalChain([this, 'access', _139 => _139.credential, 'optionalAccess', _140 => _140.getToken, 'call', _141 => _141()]);
2964
2942
  }
2965
2943
  /**
2966
2944
  * Gets headers for HTTP requests
@@ -2975,7 +2953,7 @@ var AuthHandler = class {
2975
2953
  * Refreshes the token if necessary
2976
2954
  */
2977
2955
  async refreshToken() {
2978
- if (_optionalChain([this, 'access', _141 => _141.credential, 'optionalAccess', _142 => _142.canRefresh, 'call', _143 => _143()])) {
2956
+ if (_optionalChain([this, 'access', _142 => _142.credential, 'optionalAccess', _143 => _143.canRefresh, 'call', _144 => _144()])) {
2979
2957
  await this.credential.refresh();
2980
2958
  }
2981
2959
  }
@@ -3083,9 +3061,9 @@ var OpenIdConnectScheme = class extends AuthScheme {
3083
3061
 
3084
3062
  // src/sessions/state.ts
3085
3063
  var State = (_class9 = class _State {
3086
- static __initStatic3() {this.APP_PREFIX = "app:"}
3087
- static __initStatic4() {this.USER_PREFIX = "user:"}
3088
- static __initStatic5() {this.TEMP_PREFIX = "temp:"}
3064
+ static __initStatic4() {this.APP_PREFIX = "app:"}
3065
+ static __initStatic5() {this.USER_PREFIX = "user:"}
3066
+ static __initStatic6() {this.TEMP_PREFIX = "temp:"}
3089
3067
 
3090
3068
 
3091
3069
  /**
@@ -3179,7 +3157,7 @@ var State = (_class9 = class _State {
3179
3157
  const state = new _State(value, delta);
3180
3158
  return _State.createProxy(state);
3181
3159
  }
3182
- }, _class9.__initStatic3(), _class9.__initStatic4(), _class9.__initStatic5(), _class9);
3160
+ }, _class9.__initStatic4(), _class9.__initStatic5(), _class9.__initStatic6(), _class9);
3183
3161
 
3184
3162
  // src/events/event.ts
3185
3163
  var _uuid = require('uuid');
@@ -3954,10 +3932,10 @@ var CreatedTool = class extends BaseTool {
3954
3932
  /**
3955
3933
  * Executes the tool function with validation
3956
3934
  */
3957
- async runAsync(args, context) {
3935
+ async runAsync(args, context4) {
3958
3936
  try {
3959
3937
  const validatedArgs = this.schema.parse(args);
3960
- const result = await Promise.resolve(this.func(validatedArgs, context));
3938
+ const result = await Promise.resolve(this.func(validatedArgs, context4));
3961
3939
  return _nullishCoalesce(result, () => ( {}));
3962
3940
  } catch (error) {
3963
3941
  if (error instanceof z.ZodError) {
@@ -4215,7 +4193,7 @@ var AgentTool = (_class15 = class extends BaseTool {
4215
4193
  /**
4216
4194
  * Execute the tool by running the agent with the provided input
4217
4195
  */
4218
- async runAsync(params, context) {
4196
+ async runAsync(params, context4) {
4219
4197
  try {
4220
4198
  const input = params.input || Object.values(params)[0];
4221
4199
  if (!isLlmAgent(this.agent)) {
@@ -4223,7 +4201,7 @@ var AgentTool = (_class15 = class extends BaseTool {
4223
4201
  `Agent ${this.name} does not support running as a tool`
4224
4202
  );
4225
4203
  }
4226
- const parentInvocation = context._invocationContext;
4204
+ const parentInvocation = context4._invocationContext;
4227
4205
  const childInvocationContext = new InvocationContext({
4228
4206
  invocationId: _uuid.v4.call(void 0, ),
4229
4207
  agent: this.agent,
@@ -4260,8 +4238,8 @@ var AgentTool = (_class15 = class extends BaseTool {
4260
4238
  } catch (e2) {
4261
4239
  toolResult = mergedText;
4262
4240
  }
4263
- if (this.outputKey && _optionalChain([context, 'optionalAccess', _144 => _144.state])) {
4264
- context.state[this.outputKey] = toolResult;
4241
+ if (this.outputKey && _optionalChain([context4, 'optionalAccess', _145 => _145.state])) {
4242
+ context4.state[this.outputKey] = toolResult;
4265
4243
  }
4266
4244
  return toolResult;
4267
4245
  } catch (error) {
@@ -4517,8 +4495,8 @@ var HttpRequestTool = class extends BaseTool {
4517
4495
 
4518
4496
  // src/tools/common/file-operations-tool.ts
4519
4497
  init_base_tool();
4520
- var _promises = require('fs/promises'); var _promises2 = _interopRequireDefault(_promises);
4521
- var _path = require('path'); var _path2 = _interopRequireDefault(_path);
4498
+ var _promises = require('fs/promises'); var fs2 = _interopRequireWildcard(_promises);
4499
+ var _path = require('path'); var path2 = _interopRequireWildcard(_path);
4522
4500
 
4523
4501
  var FileOperationsTool = class extends BaseTool {
4524
4502
 
@@ -4527,7 +4505,7 @@ var FileOperationsTool = class extends BaseTool {
4527
4505
  name: "file_operations",
4528
4506
  description: "Perform file system operations like reading, writing, and managing files"
4529
4507
  });
4530
- this.basePath = _optionalChain([options, 'optionalAccess', _145 => _145.basePath]) || process.cwd();
4508
+ this.basePath = _optionalChain([options, 'optionalAccess', _146 => _146.basePath]) || process.cwd();
4531
4509
  }
4532
4510
  /**
4533
4511
  * Get the function declaration for the tool
@@ -4615,14 +4593,14 @@ var FileOperationsTool = class extends BaseTool {
4615
4593
  * Resolve a file path relative to the base path
4616
4594
  */
4617
4595
  resolvePath(filepath) {
4618
- return _path2.default.isAbsolute(filepath) ? filepath : _path2.default.resolve(this.basePath, filepath);
4596
+ return path2.default.isAbsolute(filepath) ? filepath : path2.default.resolve(this.basePath, filepath);
4619
4597
  }
4620
4598
  /**
4621
4599
  * Validate that a path is within the base path for security
4622
4600
  */
4623
4601
  validatePath(filepath) {
4624
- const normalizedPath = _path2.default.normalize(filepath);
4625
- const normalizedBasePath = _path2.default.normalize(this.basePath);
4602
+ const normalizedPath = path2.default.normalize(filepath);
4603
+ const normalizedBasePath = path2.default.normalize(this.basePath);
4626
4604
  if (!normalizedPath.startsWith(normalizedBasePath)) {
4627
4605
  throw new Error(
4628
4606
  `Access denied: Can't access paths outside the base directory`
@@ -4634,7 +4612,7 @@ var FileOperationsTool = class extends BaseTool {
4634
4612
  */
4635
4613
  async readFile(filepath, encoding) {
4636
4614
  try {
4637
- const content = await _promises2.default.readFile(filepath, { encoding });
4615
+ const content = await fs2.default.readFile(filepath, { encoding });
4638
4616
  return {
4639
4617
  success: true,
4640
4618
  data: content
@@ -4651,9 +4629,9 @@ var FileOperationsTool = class extends BaseTool {
4651
4629
  */
4652
4630
  async writeFile(filepath, content, encoding) {
4653
4631
  try {
4654
- const dir = _path2.default.dirname(filepath);
4655
- await _promises2.default.mkdir(dir, { recursive: true });
4656
- await _promises2.default.writeFile(filepath, content, { encoding });
4632
+ const dir = path2.default.dirname(filepath);
4633
+ await fs2.default.mkdir(dir, { recursive: true });
4634
+ await fs2.default.writeFile(filepath, content, { encoding });
4657
4635
  return {
4658
4636
  success: true
4659
4637
  };
@@ -4669,9 +4647,9 @@ var FileOperationsTool = class extends BaseTool {
4669
4647
  */
4670
4648
  async appendFile(filepath, content, encoding) {
4671
4649
  try {
4672
- const dir = _path2.default.dirname(filepath);
4673
- await _promises2.default.mkdir(dir, { recursive: true });
4674
- await _promises2.default.appendFile(filepath, content, { encoding });
4650
+ const dir = path2.default.dirname(filepath);
4651
+ await fs2.default.mkdir(dir, { recursive: true });
4652
+ await fs2.default.appendFile(filepath, content, { encoding });
4675
4653
  return {
4676
4654
  success: true
4677
4655
  };
@@ -4687,7 +4665,7 @@ var FileOperationsTool = class extends BaseTool {
4687
4665
  */
4688
4666
  async deleteFile(filepath) {
4689
4667
  try {
4690
- await _promises2.default.unlink(filepath);
4668
+ await fs2.default.unlink(filepath);
4691
4669
  return {
4692
4670
  success: true
4693
4671
  };
@@ -4703,7 +4681,7 @@ var FileOperationsTool = class extends BaseTool {
4703
4681
  */
4704
4682
  async fileExists(filepath) {
4705
4683
  try {
4706
- await _promises2.default.access(filepath);
4684
+ await fs2.default.access(filepath);
4707
4685
  return {
4708
4686
  success: true,
4709
4687
  data: true
@@ -4720,11 +4698,11 @@ var FileOperationsTool = class extends BaseTool {
4720
4698
  */
4721
4699
  async listDirectory(dirpath) {
4722
4700
  try {
4723
- const entries = await _promises2.default.readdir(dirpath, { withFileTypes: true });
4701
+ const entries = await fs2.default.readdir(dirpath, { withFileTypes: true });
4724
4702
  const results = await Promise.all(
4725
4703
  entries.map(async (entry) => {
4726
- const entryPath = _path2.default.join(dirpath, entry.name);
4727
- const stats = await _promises2.default.stat(entryPath);
4704
+ const entryPath = path2.default.join(dirpath, entry.name);
4705
+ const stats = await fs2.default.stat(entryPath);
4728
4706
  return {
4729
4707
  name: entry.name,
4730
4708
  path: entryPath,
@@ -4752,7 +4730,7 @@ var FileOperationsTool = class extends BaseTool {
4752
4730
  */
4753
4731
  async makeDirectory(dirpath) {
4754
4732
  try {
4755
- await _promises2.default.mkdir(dirpath, { recursive: true });
4733
+ await fs2.default.mkdir(dirpath, { recursive: true });
4756
4734
  return {
4757
4735
  success: true
4758
4736
  };
@@ -4809,9 +4787,9 @@ var UserInteractionTool = class extends BaseTool {
4809
4787
  /**
4810
4788
  * Execute the user interaction
4811
4789
  */
4812
- async runAsync(args, context) {
4790
+ async runAsync(args, context4) {
4813
4791
  try {
4814
- const actions = context.actions;
4792
+ const actions = context4.actions;
4815
4793
  if (!actions || !actions.promptUser) {
4816
4794
  return {
4817
4795
  success: false,
@@ -4859,9 +4837,9 @@ var ExitLoopTool = (_class17 = class extends BaseTool {
4859
4837
  /**
4860
4838
  * Execute the exit loop action
4861
4839
  */
4862
- async runAsync(_args, context) {
4840
+ async runAsync(_args, context4) {
4863
4841
  this.logger.debug("Executing exit loop tool");
4864
- context.actions.escalate = true;
4842
+ context4.actions.escalate = true;
4865
4843
  }
4866
4844
  }, _class17);
4867
4845
 
@@ -4912,14 +4890,14 @@ var GetUserChoiceTool = (_class18 = class extends BaseTool {
4912
4890
  * This is a long running operation that will return null initially
4913
4891
  * and the actual choice will be provided asynchronously
4914
4892
  */
4915
- async runAsync(args, context) {
4893
+ async runAsync(args, context4) {
4916
4894
  this.logger.debug(
4917
4895
  `Executing get_user_choice with options: ${args.options.join(", ")}`
4918
4896
  );
4919
4897
  if (args.question) {
4920
4898
  this.logger.debug(`Question: ${args.question}`);
4921
4899
  }
4922
- context.actions.skipSummarization = true;
4900
+ context4.actions.skipSummarization = true;
4923
4901
  return null;
4924
4902
  }
4925
4903
  }, _class18);
@@ -4961,9 +4939,9 @@ var TransferToAgentTool = (_class19 = class extends BaseTool {
4961
4939
  /**
4962
4940
  * Execute the transfer to agent action
4963
4941
  */
4964
- async runAsync(args, context) {
4942
+ async runAsync(args, context4) {
4965
4943
  this.logger.debug(`Executing transfer to agent: ${args.agent_name}`);
4966
- context.actions.transferToAgent = args.agent_name;
4944
+ context4.actions.transferToAgent = args.agent_name;
4967
4945
  }
4968
4946
  }, _class19);
4969
4947
 
@@ -5004,13 +4982,13 @@ var LoadMemoryTool = (_class20 = class extends BaseTool {
5004
4982
  /**
5005
4983
  * Execute the memory loading action
5006
4984
  */
5007
- async runAsync(args, context) {
4985
+ async runAsync(args, context4) {
5008
4986
  this.logger.debug(`Executing load_memory with query: ${args.query}`);
5009
4987
  try {
5010
- const searchResult = await context.searchMemory(args.query);
4988
+ const searchResult = await context4.searchMemory(args.query);
5011
4989
  return {
5012
4990
  memories: searchResult.memories || [],
5013
- count: _optionalChain([searchResult, 'access', _146 => _146.memories, 'optionalAccess', _147 => _147.length]) || 0
4991
+ count: _optionalChain([searchResult, 'access', _147 => _147.memories, 'optionalAccess', _148 => _148.length]) || 0
5014
4992
  };
5015
4993
  } catch (error) {
5016
4994
  console.error("Error searching memory:", error);
@@ -5057,7 +5035,7 @@ var LoadArtifactsTool = class extends BaseTool {
5057
5035
  /**
5058
5036
  * Execute the load artifacts operation
5059
5037
  */
5060
- async runAsync(args, context) {
5038
+ async runAsync(args, context4) {
5061
5039
  const artifactNames = args.artifact_names || [];
5062
5040
  return { artifact_names: artifactNames };
5063
5041
  }
@@ -5567,7 +5545,7 @@ var McpClientService = (_class22 = class {
5567
5545
  },
5568
5546
  this,
5569
5547
  async (instance) => await instance.reinitialize(),
5570
- _optionalChain([this, 'access', _148 => _148.config, 'access', _149 => _149.retryOptions, 'optionalAccess', _150 => _150.maxRetries]) || 2
5548
+ _optionalChain([this, 'access', _149 => _149.config, 'access', _150 => _150.retryOptions, 'optionalAccess', _151 => _151.maxRetries]) || 2
5571
5549
  );
5572
5550
  return await wrappedCall();
5573
5551
  } catch (error) {
@@ -5651,7 +5629,7 @@ var McpClientService = (_class22 = class {
5651
5629
  this.mcpSamplingHandler = null;
5652
5630
  if (this.client) {
5653
5631
  try {
5654
- _optionalChain([this, 'access', _151 => _151.client, 'access', _152 => _152.removeRequestHandler, 'optionalCall', _153 => _153("sampling/createMessage")]);
5632
+ _optionalChain([this, 'access', _152 => _152.client, 'access', _153 => _153.removeRequestHandler, 'optionalCall', _154 => _154("sampling/createMessage")]);
5655
5633
  } catch (error) {
5656
5634
  this.logger.error("Failed to remove sampling handler:", error);
5657
5635
  }
@@ -6088,12 +6066,12 @@ var McpToolset = (_class24 = class {
6088
6066
  * Checks if a tool should be included based on the tool filter.
6089
6067
  * Similar to Python's _is_selected method.
6090
6068
  */
6091
- isSelected(tool, context) {
6069
+ isSelected(tool, context4) {
6092
6070
  if (!this.toolFilter) {
6093
6071
  return true;
6094
6072
  }
6095
6073
  if (typeof this.toolFilter === "function") {
6096
- return this.toolFilter(tool, context);
6074
+ return this.toolFilter(tool, context4);
6097
6075
  }
6098
6076
  if (Array.isArray(this.toolFilter)) {
6099
6077
  return this.toolFilter.includes(tool.name);
@@ -6146,7 +6124,7 @@ var McpToolset = (_class24 = class {
6146
6124
  * Retrieves tools from the MCP server and converts them to BaseTool instances.
6147
6125
  * Similar to Python's get_tools method.
6148
6126
  */
6149
- async getTools(context) {
6127
+ async getTools(context4) {
6150
6128
  try {
6151
6129
  if (this.isClosing) {
6152
6130
  throw new McpError(
@@ -6154,7 +6132,7 @@ var McpToolset = (_class24 = class {
6154
6132
  "resource_closed_error" /* RESOURCE_CLOSED_ERROR */
6155
6133
  );
6156
6134
  }
6157
- if (this.tools.length > 0 && !_optionalChain([this, 'access', _154 => _154.config, 'access', _155 => _155.cacheConfig, 'optionalAccess', _156 => _156.enabled]) === false) {
6135
+ if (this.tools.length > 0 && !_optionalChain([this, 'access', _155 => _155.config, 'access', _156 => _156.cacheConfig, 'optionalAccess', _157 => _157.enabled]) === false) {
6158
6136
  return this.tools;
6159
6137
  }
6160
6138
  if (!this.clientService) {
@@ -6168,7 +6146,7 @@ var McpToolset = (_class24 = class {
6168
6146
  }
6169
6147
  const tools = [];
6170
6148
  for (const mcpTool of toolsResponse.tools) {
6171
- if (this.isSelected(mcpTool, context)) {
6149
+ if (this.isSelected(mcpTool, context4)) {
6172
6150
  try {
6173
6151
  const tool = await createTool2(mcpTool, client);
6174
6152
  tools.push(tool);
@@ -6180,7 +6158,7 @@ var McpToolset = (_class24 = class {
6180
6158
  }
6181
6159
  }
6182
6160
  }
6183
- if (_optionalChain([this, 'access', _157 => _157.config, 'access', _158 => _158.cacheConfig, 'optionalAccess', _159 => _159.enabled]) !== false) {
6161
+ if (_optionalChain([this, 'access', _158 => _158.config, 'access', _159 => _159.cacheConfig, 'optionalAccess', _160 => _160.enabled]) !== false) {
6184
6162
  this.tools = tools;
6185
6163
  }
6186
6164
  return tools;
@@ -6205,9 +6183,9 @@ var McpToolset = (_class24 = class {
6205
6183
  /**
6206
6184
  * Refreshes the tool cache by clearing it and fetching tools again
6207
6185
  */
6208
- async refreshTools(context) {
6186
+ async refreshTools(context4) {
6209
6187
  this.tools = [];
6210
- return this.getTools(context);
6188
+ return this.getTools(context4);
6211
6189
  }
6212
6190
  /**
6213
6191
  * Closes the connection to the MCP server.
@@ -6251,6 +6229,7 @@ async function getMcpTools(config, toolFilter) {
6251
6229
  }
6252
6230
 
6253
6231
  // src/flows/llm-flows/functions.ts
6232
+
6254
6233
  var AF_FUNCTION_CALL_ID_PREFIX = "adk-";
6255
6234
  var REQUEST_EUC_FUNCTION_CALL_NAME = "adk_request_credential";
6256
6235
  function generateClientFunctionCallId() {
@@ -6268,12 +6247,12 @@ function populateClientFunctionCallId(modelResponseEvent) {
6268
6247
  }
6269
6248
  }
6270
6249
  function removeClientFunctionCallId(content) {
6271
- if (_optionalChain([content, 'optionalAccess', _160 => _160.parts])) {
6250
+ if (_optionalChain([content, 'optionalAccess', _161 => _161.parts])) {
6272
6251
  for (const part of content.parts) {
6273
- if (_optionalChain([part, 'access', _161 => _161.functionCall, 'optionalAccess', _162 => _162.id, 'optionalAccess', _163 => _163.startsWith, 'call', _164 => _164(AF_FUNCTION_CALL_ID_PREFIX)])) {
6252
+ if (_optionalChain([part, 'access', _162 => _162.functionCall, 'optionalAccess', _163 => _163.id, 'optionalAccess', _164 => _164.startsWith, 'call', _165 => _165(AF_FUNCTION_CALL_ID_PREFIX)])) {
6274
6253
  part.functionCall.id = void 0;
6275
6254
  }
6276
- if (_optionalChain([part, 'access', _165 => _165.functionResponse, 'optionalAccess', _166 => _166.id, 'optionalAccess', _167 => _167.startsWith, 'call', _168 => _168(AF_FUNCTION_CALL_ID_PREFIX)])) {
6255
+ if (_optionalChain([part, 'access', _166 => _166.functionResponse, 'optionalAccess', _167 => _167.id, 'optionalAccess', _168 => _168.startsWith, 'call', _169 => _169(AF_FUNCTION_CALL_ID_PREFIX)])) {
6277
6256
  part.functionResponse.id = void 0;
6278
6257
  }
6279
6258
  }
@@ -6340,23 +6319,40 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6340
6319
  toolsDict
6341
6320
  );
6342
6321
  const functionArgs = functionCall.args || {};
6343
- const functionResponse = await callToolAsync(
6344
- tool,
6345
- functionArgs,
6346
- toolContext
6347
- );
6348
- if (tool.isLongRunning) {
6322
+ const tracer2 = telemetryService.getTracer();
6323
+ const span = tracer2.startSpan(`execute_tool ${tool.name}`);
6324
+ const spanContext = _api.trace.setSpan(_api.context.active(), span);
6325
+ try {
6326
+ const functionResponse = await _api.context.with(spanContext, async () => {
6327
+ const result = await callToolAsync(tool, functionArgs, toolContext);
6328
+ if (tool.isLongRunning && !result) {
6329
+ return null;
6330
+ }
6331
+ const functionResponseEvent = buildResponseEvent(
6332
+ tool,
6333
+ result,
6334
+ toolContext,
6335
+ invocationContext
6336
+ );
6337
+ telemetryService.traceToolCall(
6338
+ tool,
6339
+ functionArgs,
6340
+ functionResponseEvent
6341
+ );
6342
+ return { result, event: functionResponseEvent };
6343
+ });
6349
6344
  if (!functionResponse) {
6350
6345
  continue;
6351
6346
  }
6347
+ functionResponseEvents.push(functionResponse.event);
6348
+ span.setStatus({ code: 1 });
6349
+ } catch (error) {
6350
+ span.recordException(error);
6351
+ span.setStatus({ code: 2, message: error.message });
6352
+ throw error;
6353
+ } finally {
6354
+ span.end();
6352
6355
  }
6353
- const functionResponseEvent = buildResponseEvent(
6354
- tool,
6355
- functionResponse,
6356
- toolContext,
6357
- invocationContext
6358
- );
6359
- functionResponseEvents.push(functionResponseEvent);
6360
6356
  }
6361
6357
  if (!functionResponseEvents.length) {
6362
6358
  return null;
@@ -6418,7 +6414,7 @@ function mergeParallelFunctionResponseEvents(functionResponseEvents) {
6418
6414
  }
6419
6415
  const mergedParts = [];
6420
6416
  for (const event of functionResponseEvents) {
6421
- if (_optionalChain([event, 'access', _169 => _169.content, 'optionalAccess', _170 => _170.parts])) {
6417
+ if (_optionalChain([event, 'access', _170 => _170.content, 'optionalAccess', _171 => _171.parts])) {
6422
6418
  for (const part of event.content.parts) {
6423
6419
  mergedParts.push(part);
6424
6420
  }
@@ -6456,7 +6452,7 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6456
6452
  __init44() {this.responseProcessors = []}
6457
6453
  __init45() {this.logger = new Logger({ name: "BaseLlmFlow" })}
6458
6454
  async *runAsync(invocationContext) {
6459
- this.logger.info(`Agent '${invocationContext.agent.name}' started.`);
6455
+ this.logger.debug(`Agent '${invocationContext.agent.name}' started.`);
6460
6456
  let stepCount = 0;
6461
6457
  while (true) {
6462
6458
  stepCount++;
@@ -6466,7 +6462,7 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6466
6462
  yield event;
6467
6463
  }
6468
6464
  if (!lastEvent || lastEvent.isFinalResponse()) {
6469
- this.logger.info(
6465
+ this.logger.debug(
6470
6466
  `Agent '${invocationContext.agent.name}' finished after ${stepCount} steps.`
6471
6467
  );
6472
6468
  break;
@@ -6496,7 +6492,7 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6496
6492
  yield event;
6497
6493
  }
6498
6494
  if (invocationContext.endInvocation) {
6499
- this.logger.info("Invocation ended during preprocessing.");
6495
+ this.logger.debug("Invocation ended during preprocessing.");
6500
6496
  return;
6501
6497
  }
6502
6498
  const modelResponseEvent = new Event({
@@ -6536,9 +6532,23 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6536
6532
  yield event;
6537
6533
  }
6538
6534
  }
6539
- const tools = await agent.canonicalTools(
6535
+ let tools = await agent.canonicalTools(
6540
6536
  new ReadonlyContext(invocationContext)
6541
6537
  );
6538
+ if (tools.length > 1) {
6539
+ const seen = /* @__PURE__ */ new Set();
6540
+ const filtered = [];
6541
+ for (const t of tools) {
6542
+ const name = _optionalChain([t, 'optionalAccess', _172 => _172.name]);
6543
+ if (!name) continue;
6544
+ if (seen.has(name)) {
6545
+ continue;
6546
+ }
6547
+ seen.add(name);
6548
+ filtered.push(t);
6549
+ }
6550
+ tools = filtered;
6551
+ }
6542
6552
  for (const tool of tools) {
6543
6553
  const toolContext = new ToolContext(invocationContext);
6544
6554
  await tool.processLlmRequest(toolContext, llmRequest);
@@ -6546,7 +6556,7 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6546
6556
  if (tools.length > 0) {
6547
6557
  const toolsData = tools.map((tool) => ({
6548
6558
  Name: tool.name,
6549
- Description: _optionalChain([tool, 'access', _171 => _171.description, 'optionalAccess', _172 => _172.substring, 'call', _173 => _173(0, 50)]) + (_optionalChain([tool, 'access', _174 => _174.description, 'optionalAccess', _175 => _175.length]) > 50 ? "..." : ""),
6559
+ Description: _optionalChain([tool, 'access', _173 => _173.description, 'optionalAccess', _174 => _174.substring, 'call', _175 => _175(0, 50)]) + (_optionalChain([tool, 'access', _176 => _176.description, 'optionalAccess', _177 => _177.length]) > 50 ? "..." : ""),
6550
6560
  "Long Running": tool.isLongRunning ? "Yes" : "No"
6551
6561
  }));
6552
6562
  this.logger.debugArray("\u{1F6E0}\uFE0F Available Tools", toolsData);
@@ -6609,14 +6619,14 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6609
6619
  );
6610
6620
  if (functionResponseEvent) {
6611
6621
  yield functionResponseEvent;
6612
- const transferToAgent = _optionalChain([functionResponseEvent, 'access', _176 => _176.actions, 'optionalAccess', _177 => _177.transferToAgent]);
6622
+ const transferToAgent = _optionalChain([functionResponseEvent, 'access', _178 => _178.actions, 'optionalAccess', _179 => _179.transferToAgent]);
6613
6623
  if (transferToAgent) {
6614
- this.logger.info(`\u{1F504} Live transfer to agent '${transferToAgent}'`);
6624
+ this.logger.debug(`\u{1F504} Live transfer to agent '${transferToAgent}'`);
6615
6625
  const agentToRun = this._getAgentToRun(
6616
6626
  invocationContext,
6617
6627
  transferToAgent
6618
6628
  );
6619
- for await (const event of _optionalChain([agentToRun, 'access', _178 => _178.runLive, 'optionalCall', _179 => _179(invocationContext)]) || agentToRun.runAsync(invocationContext)) {
6629
+ for await (const event of _optionalChain([agentToRun, 'access', _180 => _180.runLive, 'optionalCall', _181 => _181(invocationContext)]) || agentToRun.runAsync(invocationContext)) {
6620
6630
  yield event;
6621
6631
  }
6622
6632
  }
@@ -6648,9 +6658,9 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6648
6658
  yield authEvent;
6649
6659
  }
6650
6660
  yield functionResponseEvent;
6651
- const transferToAgent = _optionalChain([functionResponseEvent, 'access', _180 => _180.actions, 'optionalAccess', _181 => _181.transferToAgent]);
6661
+ const transferToAgent = _optionalChain([functionResponseEvent, 'access', _182 => _182.actions, 'optionalAccess', _183 => _183.transferToAgent]);
6652
6662
  if (transferToAgent) {
6653
- this.logger.info(`\u{1F504} Transferring to agent '${transferToAgent}'`);
6663
+ this.logger.debug(`\u{1F504} Transferring to agent '${transferToAgent}'`);
6654
6664
  const agentToRun = this._getAgentToRun(
6655
6665
  invocationContext,
6656
6666
  transferToAgent
@@ -6694,27 +6704,62 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6694
6704
  }
6695
6705
  invocationContext.incrementLlmCallCount();
6696
6706
  const isStreaming = invocationContext.runConfig.streamingMode === "sse" /* SSE */;
6697
- const tools = _optionalChain([llmRequest, 'access', _182 => _182.config, 'optionalAccess', _183 => _183.tools]) || [];
6707
+ let tools = _optionalChain([llmRequest, 'access', _184 => _184.config, 'optionalAccess', _185 => _185.tools]) || [];
6708
+ if (tools.length) {
6709
+ const deduped = [];
6710
+ const seenFn = /* @__PURE__ */ new Set();
6711
+ for (const t of tools) {
6712
+ const tool = t;
6713
+ if (tool && Array.isArray(tool.functionDeclarations)) {
6714
+ const newFds = tool.functionDeclarations.filter(
6715
+ (fd) => {
6716
+ if (_optionalChain([fd, 'optionalAccess', _186 => _186.name])) {
6717
+ if (seenFn.has(fd.name)) {
6718
+ return false;
6719
+ }
6720
+ seenFn.add(fd.name);
6721
+ }
6722
+ return true;
6723
+ }
6724
+ );
6725
+ if (newFds.length) {
6726
+ deduped.push({ ...tool, functionDeclarations: newFds });
6727
+ }
6728
+ } else if (_optionalChain([tool, 'optionalAccess', _187 => _187.name])) {
6729
+ if (seenFn.has(tool.name)) continue;
6730
+ seenFn.add(tool.name);
6731
+ deduped.push(tool);
6732
+ } else {
6733
+ deduped.push(tool);
6734
+ }
6735
+ }
6736
+ if (deduped.length !== tools.length) {
6737
+ this.logger.debug(
6738
+ `\u{1F501} Deduplicated tool/function declarations: ${tools.length} -> ${deduped.length}`
6739
+ );
6740
+ }
6741
+ llmRequest.config.tools = tools = deduped;
6742
+ }
6698
6743
  const toolNames = tools.map((tool) => {
6699
6744
  if (tool.functionDeclarations && Array.isArray(tool.functionDeclarations)) {
6700
6745
  return tool.functionDeclarations.map((fn) => fn.name).join(", ");
6701
6746
  }
6702
6747
  if (tool.name) return tool.name;
6703
- if (_optionalChain([tool, 'access', _184 => _184.function, 'optionalAccess', _185 => _185.name])) return tool.function.name;
6704
- if (_optionalChain([tool, 'access', _186 => _186.function, 'optionalAccess', _187 => _187.function, 'optionalAccess', _188 => _188.name])) return tool.function.function.name;
6748
+ if (_optionalChain([tool, 'access', _188 => _188.function, 'optionalAccess', _189 => _189.name])) return tool.function.name;
6749
+ if (_optionalChain([tool, 'access', _190 => _190.function, 'optionalAccess', _191 => _191.function, 'optionalAccess', _192 => _192.name])) return tool.function.function.name;
6705
6750
  return "unknown";
6706
6751
  }).join(", ");
6707
6752
  const systemInstruction = llmRequest.getSystemInstructionText() || "";
6708
6753
  const truncatedSystemInstruction = systemInstruction.length > 100 ? `${systemInstruction.substring(0, 100)}...` : systemInstruction;
6709
- const contentPreview = _optionalChain([llmRequest, 'access', _189 => _189.contents, 'optionalAccess', _190 => _190.length]) > 0 ? LogFormatter.formatContentPreview(llmRequest.contents[0]) : "none";
6754
+ const contentPreview = _optionalChain([llmRequest, 'access', _193 => _193.contents, 'optionalAccess', _194 => _194.length]) > 0 ? LogFormatter.formatContentPreview(llmRequest.contents[0]) : "none";
6710
6755
  this.logger.debugStructured("\u{1F4E4} LLM Request", {
6711
6756
  Model: llm.model,
6712
6757
  Agent: invocationContext.agent.name,
6713
- "Content Items": _optionalChain([llmRequest, 'access', _191 => _191.contents, 'optionalAccess', _192 => _192.length]) || 0,
6758
+ "Content Items": _optionalChain([llmRequest, 'access', _195 => _195.contents, 'optionalAccess', _196 => _196.length]) || 0,
6714
6759
  "Content Preview": contentPreview,
6715
6760
  "System Instruction": truncatedSystemInstruction || "none",
6716
6761
  "Available Tools": toolNames || "none",
6717
- "Tool Count": _optionalChain([llmRequest, 'access', _193 => _193.config, 'optionalAccess', _194 => _194.tools, 'optionalAccess', _195 => _195.length]) || 0,
6762
+ "Tool Count": _optionalChain([llmRequest, 'access', _197 => _197.config, 'optionalAccess', _198 => _198.tools, 'optionalAccess', _199 => _199.length]) || 0,
6718
6763
  Streaming: isStreaming ? "Yes" : "No"
6719
6764
  });
6720
6765
  let responseCount = 0;
@@ -6729,8 +6774,8 @@ var BaseLlmFlow = (_class25 = class {constructor() { _class25.prototype.__init43
6729
6774
  llmRequest,
6730
6775
  llmResponse
6731
6776
  );
6732
- const tokenCount = _optionalChain([llmResponse, 'access', _196 => _196.usageMetadata, 'optionalAccess', _197 => _197.totalTokenCount]) || "unknown";
6733
- const functionCalls = _optionalChain([llmResponse, 'access', _198 => _198.content, 'optionalAccess', _199 => _199.parts, 'optionalAccess', _200 => _200.filter, 'call', _201 => _201((part) => part.functionCall)]) || [];
6777
+ const tokenCount = _optionalChain([llmResponse, 'access', _200 => _200.usageMetadata, 'optionalAccess', _201 => _201.totalTokenCount]) || "unknown";
6778
+ const functionCalls = _optionalChain([llmResponse, 'access', _202 => _202.content, 'optionalAccess', _203 => _203.parts, 'optionalAccess', _204 => _204.filter, 'call', _205 => _205((part) => part.functionCall)]) || [];
6734
6779
  const functionCallsDisplay = LogFormatter.formatFunctionCalls(functionCalls);
6735
6780
  const responsePreview = LogFormatter.formatResponsePreview(llmResponse);
6736
6781
  this.logger.debugStructured("\u{1F4E5} LLM Response", {
@@ -6874,7 +6919,7 @@ var EnhancedAuthConfig = class {
6874
6919
  */
6875
6920
  generateCredentialKey() {
6876
6921
  const schemeKey = this.authScheme.type || "unknown";
6877
- const credentialKey = _optionalChain([this, 'access', _202 => _202.rawAuthCredential, 'optionalAccess', _203 => _203.type]) || "none";
6922
+ const credentialKey = _optionalChain([this, 'access', _206 => _206.rawAuthCredential, 'optionalAccess', _207 => _207.type]) || "none";
6878
6923
  const timestamp = Date.now();
6879
6924
  return `adk_${schemeKey}_${credentialKey}_${timestamp}`;
6880
6925
  }
@@ -7031,7 +7076,7 @@ var AuthLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7031
7076
  */
7032
7077
  parseAndStoreAuthResponse(authHandler, invocationContext) {
7033
7078
  try {
7034
- const credentialKey = _optionalChain([authHandler, 'access', _204 => _204.authConfig, 'access', _205 => _205.context, 'optionalAccess', _206 => _206.credentialKey]) || `temp:${Date.now()}`;
7079
+ const credentialKey = _optionalChain([authHandler, 'access', _208 => _208.authConfig, 'access', _209 => _209.context, 'optionalAccess', _210 => _210.credentialKey]) || `temp:${Date.now()}`;
7035
7080
  const fullCredentialKey = credentialKey.startsWith("temp:") ? credentialKey : `temp:${credentialKey}`;
7036
7081
  invocationContext.session.state[fullCredentialKey] = authHandler.credential;
7037
7082
  if (authHandler.authConfig.authScheme.type === "oauth2" || authHandler.authConfig.authScheme.type === "openIdConnect") {
@@ -7074,8 +7119,6 @@ var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7074
7119
  llmRequest.liveConnectConfig.realtimeInputConfig = runConfig.realtimeInputConfig;
7075
7120
  llmRequest.liveConnectConfig.enableAffectiveDialog = runConfig.enableAffectiveDialog;
7076
7121
  llmRequest.liveConnectConfig.proactivity = runConfig.proactivity;
7077
- const tools = await agent.canonicalTools();
7078
- llmRequest.appendTools(tools);
7079
7122
  for await (const _ of []) {
7080
7123
  yield _;
7081
7124
  }
@@ -7139,7 +7182,7 @@ var BuiltInCodeExecutor = class extends BaseCodeExecutor {
7139
7182
  * Pre-process the LLM request for Gemini 2.0+ models to use the code execution tool
7140
7183
  */
7141
7184
  processLlmRequest(llmRequest) {
7142
- if (!_optionalChain([llmRequest, 'access', _207 => _207.model, 'optionalAccess', _208 => _208.startsWith, 'call', _209 => _209("gemini-2")])) {
7185
+ if (!_optionalChain([llmRequest, 'access', _211 => _211.model, 'optionalAccess', _212 => _212.startsWith, 'call', _213 => _213("gemini-2")])) {
7143
7186
  throw new Error(
7144
7187
  `Gemini code execution tool is not supported for model ${llmRequest.model}`
7145
7188
  );
@@ -7184,7 +7227,7 @@ var CodeExecutionUtils = class _CodeExecutionUtils {
7184
7227
  * Extracts the first code block from the content and truncates everything after it
7185
7228
  */
7186
7229
  static extractCodeAndTruncateContent(content, codeBlockDelimiters) {
7187
- if (!_optionalChain([content, 'optionalAccess', _210 => _210.parts, 'optionalAccess', _211 => _211.length])) {
7230
+ if (!_optionalChain([content, 'optionalAccess', _214 => _214.parts, 'optionalAccess', _215 => _215.length])) {
7188
7231
  return null;
7189
7232
  }
7190
7233
  for (let idx = 0; idx < content.parts.length; idx++) {
@@ -7270,7 +7313,7 @@ ${fileNames}`);
7270
7313
  * Converts the code execution parts to text parts in a Content
7271
7314
  */
7272
7315
  static convertCodeExecutionParts(content, codeBlockDelimiter, executionResultDelimiters) {
7273
- if (!_optionalChain([content, 'access', _212 => _212.parts, 'optionalAccess', _213 => _213.length])) {
7316
+ if (!_optionalChain([content, 'access', _216 => _216.parts, 'optionalAccess', _217 => _217.length])) {
7274
7317
  return;
7275
7318
  }
7276
7319
  const lastPart = content.parts[content.parts.length - 1];
@@ -7663,7 +7706,7 @@ async function* runPostProcessor(invocationContext, llmResponse) {
7663
7706
  function extractAndReplaceInlineFiles(codeExecutorContext, llmRequest) {
7664
7707
  const allInputFiles = codeExecutorContext.getInputFiles();
7665
7708
  const savedFileNames = new Set(allInputFiles.map((f) => f.name));
7666
- for (let i = 0; i < (_optionalChain([llmRequest, 'access', _214 => _214.contents, 'optionalAccess', _215 => _215.length]) || 0); i++) {
7709
+ for (let i = 0; i < (_optionalChain([llmRequest, 'access', _218 => _218.contents, 'optionalAccess', _219 => _219.length]) || 0); i++) {
7667
7710
  const content = llmRequest.contents[i];
7668
7711
  if (content.role !== "user" || !content.parts) {
7669
7712
  continue;
@@ -7695,7 +7738,7 @@ Available file: \`${fileName}\`
7695
7738
  }
7696
7739
  function getOrSetExecutionId(invocationContext, codeExecutorContext) {
7697
7740
  const agent = invocationContext.agent;
7698
- if (!hasCodeExecutor(agent) || !_optionalChain([agent, 'access', _216 => _216.codeExecutor, 'optionalAccess', _217 => _217.stateful])) {
7741
+ if (!hasCodeExecutor(agent) || !_optionalChain([agent, 'access', _220 => _220.codeExecutor, 'optionalAccess', _221 => _221.stateful])) {
7699
7742
  return void 0;
7700
7743
  }
7701
7744
  let executionId = codeExecutorContext.getExecutionId();
@@ -7926,7 +7969,7 @@ function rearrangeEventsForLatestFunctionResponse(events) {
7926
7969
  continue;
7927
7970
  }
7928
7971
  const functionResponses2 = event.getFunctionResponses();
7929
- if (_optionalChain([functionResponses2, 'optionalAccess', _218 => _218.some, 'call', _219 => _219((fr) => fr.id && functionResponsesIds.has(fr.id))])) {
7972
+ if (_optionalChain([functionResponses2, 'optionalAccess', _222 => _222.some, 'call', _223 => _223((fr) => fr.id && functionResponsesIds.has(fr.id))])) {
7930
7973
  functionResponseEvents.push(event);
7931
7974
  }
7932
7975
  }
@@ -8025,7 +8068,7 @@ function mergeFunctionResponseEvents(functionResponseEvents) {
8025
8068
  const partIndicesInMergedEvent = {};
8026
8069
  for (let idx = 0; idx < partsInMergedEvent.length; idx++) {
8027
8070
  const part = partsInMergedEvent[idx];
8028
- if (_optionalChain([part, 'access', _220 => _220.functionResponse, 'optionalAccess', _221 => _221.id])) {
8071
+ if (_optionalChain([part, 'access', _224 => _224.functionResponse, 'optionalAccess', _225 => _225.id])) {
8029
8072
  partIndicesInMergedEvent[part.functionResponse.id] = idx;
8030
8073
  }
8031
8074
  }
@@ -8034,7 +8077,7 @@ function mergeFunctionResponseEvents(functionResponseEvents) {
8034
8077
  throw new Error("There should be at least one function_response part.");
8035
8078
  }
8036
8079
  for (const part of event.content.parts) {
8037
- if (_optionalChain([part, 'access', _222 => _222.functionResponse, 'optionalAccess', _223 => _223.id])) {
8080
+ if (_optionalChain([part, 'access', _226 => _226.functionResponse, 'optionalAccess', _227 => _227.id])) {
8038
8081
  const functionCallId = part.functionResponse.id;
8039
8082
  if (functionCallId in partIndicesInMergedEvent) {
8040
8083
  partsInMergedEvent[partIndicesInMergedEvent[functionCallId]] = part;
@@ -8303,7 +8346,7 @@ var PlanReActPlanner = class extends BasePlanner {
8303
8346
  let firstFcPartIndex = -1;
8304
8347
  for (let i = 0; i < responseParts.length; i++) {
8305
8348
  if (responseParts[i].functionCall) {
8306
- if (!_optionalChain([responseParts, 'access', _224 => _224[i], 'access', _225 => _225.functionCall, 'optionalAccess', _226 => _226.name])) {
8349
+ if (!_optionalChain([responseParts, 'access', _228 => _228[i], 'access', _229 => _229.functionCall, 'optionalAccess', _230 => _230.name])) {
8307
8350
  continue;
8308
8351
  }
8309
8352
  preservedParts.push(responseParts[i]);
@@ -8342,7 +8385,7 @@ var PlanReActPlanner = class extends BasePlanner {
8342
8385
  * Handles non-function-call parts of the response
8343
8386
  */
8344
8387
  _handleNonFunctionCallParts(responsePart, preservedParts) {
8345
- if (_optionalChain([responsePart, 'access', _227 => _227.text, 'optionalAccess', _228 => _228.includes, 'call', _229 => _229(FINAL_ANSWER_TAG)])) {
8388
+ if (_optionalChain([responsePart, 'access', _231 => _231.text, 'optionalAccess', _232 => _232.includes, 'call', _233 => _233(FINAL_ANSWER_TAG)])) {
8346
8389
  const [reasoningText, finalAnswerText] = this._splitByLastPattern(
8347
8390
  responsePart.text,
8348
8391
  FINAL_ANSWER_TAG
@@ -8591,7 +8634,7 @@ var SharedMemoryRequestProcessor = class extends BaseLlmRequestProcessor {
8591
8634
  const memoryService = invocationContext.memoryService;
8592
8635
  if (!memoryService) return;
8593
8636
  const lastUserEvent = invocationContext.session.events.findLast(
8594
- (e) => e.author === "user" && _optionalChain([e, 'access', _230 => _230.content, 'optionalAccess', _231 => _231.parts, 'optionalAccess', _232 => _232.length])
8637
+ (e) => e.author === "user" && _optionalChain([e, 'access', _234 => _234.content, 'optionalAccess', _235 => _235.parts, 'optionalAccess', _236 => _236.length])
8595
8638
  );
8596
8639
  if (!lastUserEvent) return;
8597
8640
  const query = (_nullishCoalesce(lastUserEvent.content.parts, () => ( []))).map((p) => p.text || "").join(" ");
@@ -8602,7 +8645,7 @@ var SharedMemoryRequestProcessor = class extends BaseLlmRequestProcessor {
8602
8645
  });
8603
8646
  const sessionTexts = new Set(
8604
8647
  (llmRequest.contents || []).flatMap(
8605
- (c) => _optionalChain([c, 'access', _233 => _233.parts, 'optionalAccess', _234 => _234.map, 'call', _235 => _235((p) => p.text)]) || []
8648
+ (c) => _optionalChain([c, 'access', _237 => _237.parts, 'optionalAccess', _238 => _238.map, 'call', _239 => _239((p) => p.text)]) || []
8606
8649
  )
8607
8650
  );
8608
8651
  for (const memory of results.memories) {
@@ -9025,7 +9068,7 @@ var LlmAgent = (_class27 = class _LlmAgent extends BaseAgent {
9025
9068
  * This matches the Python implementation's _llm_flow property
9026
9069
  */
9027
9070
  get llmFlow() {
9028
- if (this.disallowTransferToParent && this.disallowTransferToPeers && !_optionalChain([this, 'access', _236 => _236.subAgents, 'optionalAccess', _237 => _237.length])) {
9071
+ if (this.disallowTransferToParent && this.disallowTransferToPeers && !_optionalChain([this, 'access', _240 => _240.subAgents, 'optionalAccess', _241 => _241.length])) {
9029
9072
  return new SingleFlow();
9030
9073
  }
9031
9074
  return new AutoFlow();
@@ -9041,7 +9084,7 @@ var LlmAgent = (_class27 = class _LlmAgent extends BaseAgent {
9041
9084
  );
9042
9085
  return;
9043
9086
  }
9044
- if (this.outputKey && event.isFinalResponse() && _optionalChain([event, 'access', _238 => _238.content, 'optionalAccess', _239 => _239.parts])) {
9087
+ if (this.outputKey && event.isFinalResponse() && _optionalChain([event, 'access', _242 => _242.content, 'optionalAccess', _243 => _243.parts])) {
9045
9088
  let result = event.content.parts.map((part) => part.text || "").join("");
9046
9089
  if (this.outputSchema) {
9047
9090
  if (!result.trim()) {
@@ -9069,19 +9112,19 @@ var LlmAgent = (_class27 = class _LlmAgent extends BaseAgent {
9069
9112
  * Core logic to run this agent via text-based conversation
9070
9113
  * This matches the Python implementation's _run_async_impl
9071
9114
  */
9072
- async *runAsyncImpl(context) {
9115
+ async *runAsyncImpl(context4) {
9073
9116
  this.logger.debug(`Starting LlmAgent execution for "${this.name}"`);
9074
9117
  try {
9075
- for await (const event of this.llmFlow.runAsync(context)) {
9118
+ for await (const event of this.llmFlow.runAsync(context4)) {
9076
9119
  this.maybeSaveOutputToState(event);
9077
9120
  yield event;
9078
9121
  }
9079
9122
  } catch (error) {
9080
9123
  this.logger.error("Error in LlmAgent execution:", error);
9081
9124
  const errorEvent = new Event({
9082
- invocationId: context.invocationId,
9125
+ invocationId: context4.invocationId,
9083
9126
  author: this.name,
9084
- branch: context.branch,
9127
+ branch: context4.branch,
9085
9128
  content: {
9086
9129
  parts: [
9087
9130
  {
@@ -9269,7 +9312,7 @@ var LoopAgent = class extends BaseAgent {
9269
9312
  for (const subAgent of this.subAgents) {
9270
9313
  for await (const event of subAgent.runAsync(ctx)) {
9271
9314
  yield event;
9272
- if (_optionalChain([event, 'access', _240 => _240.actions, 'optionalAccess', _241 => _241.escalate])) {
9315
+ if (_optionalChain([event, 'access', _244 => _244.actions, 'optionalAccess', _245 => _245.escalate])) {
9273
9316
  return;
9274
9317
  }
9275
9318
  }
@@ -9349,7 +9392,7 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9349
9392
  /**
9350
9393
  * Gets the next nodes to execute based on the current node and its result
9351
9394
  */
9352
- async getNextNodes(currentNode, lastEvent, context) {
9395
+ async getNextNodes(currentNode, lastEvent, context4) {
9353
9396
  if (!currentNode.targets || currentNode.targets.length === 0) {
9354
9397
  return [];
9355
9398
  }
@@ -9361,7 +9404,7 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9361
9404
  continue;
9362
9405
  }
9363
9406
  if (targetNode.condition) {
9364
- const shouldExecute = await targetNode.condition(lastEvent, context);
9407
+ const shouldExecute = await targetNode.condition(lastEvent, context4);
9365
9408
  if (!shouldExecute) {
9366
9409
  this.logger.debug(`Skipping node "${targetName}" due to condition`);
9367
9410
  continue;
@@ -9374,7 +9417,7 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9374
9417
  /**
9375
9418
  * Core logic to run this agent via text-based conversation.
9376
9419
  */
9377
- async *runAsyncImpl(context) {
9420
+ async *runAsyncImpl(context4) {
9378
9421
  this.logger.debug(
9379
9422
  `Starting graph execution from root node "${this.rootNode}"`
9380
9423
  );
@@ -9396,7 +9439,7 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9396
9439
  return;
9397
9440
  }
9398
9441
  let stepCount = 0;
9399
- const nodesToExecute = [{ node: rootNode, context }];
9442
+ const nodesToExecute = [{ node: rootNode, context: context4 }];
9400
9443
  const executedNodes = [];
9401
9444
  let lastEvent = null;
9402
9445
  while (nodesToExecute.length > 0 && stepCount < this.maxSteps) {
@@ -9404,7 +9447,7 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9404
9447
  const { node } = nodesToExecute.shift();
9405
9448
  this.logger.debug(`Step ${stepCount}: Executing node "${node.name}"`);
9406
9449
  executedNodes.push(node.name);
9407
- const childContext = context.createChildContext(node.agent);
9450
+ const childContext = context4.createChildContext(node.agent);
9408
9451
  try {
9409
9452
  const nodeEvents = [];
9410
9453
  for await (const event of node.agent.runAsync(childContext)) {
@@ -9417,7 +9460,7 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9417
9460
  events: nodeEvents
9418
9461
  });
9419
9462
  if (lastEvent) {
9420
- const nextNodes = await this.getNextNodes(node, lastEvent, context);
9463
+ const nextNodes = await this.getNextNodes(node, lastEvent, context4);
9421
9464
  for (const nextNode of nextNodes) {
9422
9465
  nodesToExecute.push({
9423
9466
  node: nextNode,
@@ -9460,8 +9503,8 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9460
9503
  * Core logic to run this agent via video/audio-based conversation.
9461
9504
  * For LangGraph, this follows the same execution pattern as text-based.
9462
9505
  */
9463
- async *runLiveImpl(context) {
9464
- yield* this.runAsyncImpl(context);
9506
+ async *runLiveImpl(context4) {
9507
+ yield* this.runAsyncImpl(context4);
9465
9508
  }
9466
9509
  /**
9467
9510
  * Gets the execution results from the last run
@@ -9511,6 +9554,7 @@ var LangGraphAgent = (_class28 = class extends BaseAgent {
9511
9554
  }, _class28);
9512
9555
 
9513
9556
  // src/agents/agent-builder.ts
9557
+ init_logger();
9514
9558
 
9515
9559
 
9516
9560
  // src/runners.ts
@@ -9580,17 +9624,17 @@ var RunConfig = class {
9580
9624
  */
9581
9625
 
9582
9626
  constructor(config) {
9583
- this.speechConfig = _optionalChain([config, 'optionalAccess', _242 => _242.speechConfig]);
9584
- this.responseModalities = _optionalChain([config, 'optionalAccess', _243 => _243.responseModalities]);
9585
- this.saveInputBlobsAsArtifacts = _optionalChain([config, 'optionalAccess', _244 => _244.saveInputBlobsAsArtifacts]) || false;
9586
- this.supportCFC = _optionalChain([config, 'optionalAccess', _245 => _245.supportCFC]) || false;
9587
- this.streamingMode = _optionalChain([config, 'optionalAccess', _246 => _246.streamingMode]) || "NONE" /* NONE */;
9588
- this.outputAudioTranscription = _optionalChain([config, 'optionalAccess', _247 => _247.outputAudioTranscription]);
9589
- this.inputAudioTranscription = _optionalChain([config, 'optionalAccess', _248 => _248.inputAudioTranscription]);
9590
- this.realtimeInputConfig = _optionalChain([config, 'optionalAccess', _249 => _249.realtimeInputConfig]);
9591
- this.enableAffectiveDialog = _optionalChain([config, 'optionalAccess', _250 => _250.enableAffectiveDialog]);
9592
- this.proactivity = _optionalChain([config, 'optionalAccess', _251 => _251.proactivity]);
9593
- this.maxLlmCalls = _nullishCoalesce(_optionalChain([config, 'optionalAccess', _252 => _252.maxLlmCalls]), () => ( 500));
9627
+ this.speechConfig = _optionalChain([config, 'optionalAccess', _246 => _246.speechConfig]);
9628
+ this.responseModalities = _optionalChain([config, 'optionalAccess', _247 => _247.responseModalities]);
9629
+ this.saveInputBlobsAsArtifacts = _optionalChain([config, 'optionalAccess', _248 => _248.saveInputBlobsAsArtifacts]) || false;
9630
+ this.supportCFC = _optionalChain([config, 'optionalAccess', _249 => _249.supportCFC]) || false;
9631
+ this.streamingMode = _optionalChain([config, 'optionalAccess', _250 => _250.streamingMode]) || "NONE" /* NONE */;
9632
+ this.outputAudioTranscription = _optionalChain([config, 'optionalAccess', _251 => _251.outputAudioTranscription]);
9633
+ this.inputAudioTranscription = _optionalChain([config, 'optionalAccess', _252 => _252.inputAudioTranscription]);
9634
+ this.realtimeInputConfig = _optionalChain([config, 'optionalAccess', _253 => _253.realtimeInputConfig]);
9635
+ this.enableAffectiveDialog = _optionalChain([config, 'optionalAccess', _254 => _254.enableAffectiveDialog]);
9636
+ this.proactivity = _optionalChain([config, 'optionalAccess', _255 => _255.proactivity]);
9637
+ this.maxLlmCalls = _nullishCoalesce(_optionalChain([config, 'optionalAccess', _256 => _256.maxLlmCalls]), () => ( 500));
9594
9638
  this.validateMaxLlmCalls();
9595
9639
  }
9596
9640
  /**
@@ -9624,19 +9668,19 @@ var InMemoryArtifactService = (_class29 = class {constructor() { _class29.protot
9624
9668
  }
9625
9669
  async saveArtifact(args) {
9626
9670
  const { appName, userId, sessionId, filename, artifact } = args;
9627
- const path2 = this.getArtifactPath(appName, userId, sessionId, filename);
9628
- if (!this.artifacts.has(path2)) {
9629
- this.artifacts.set(path2, []);
9671
+ const path3 = this.getArtifactPath(appName, userId, sessionId, filename);
9672
+ if (!this.artifacts.has(path3)) {
9673
+ this.artifacts.set(path3, []);
9630
9674
  }
9631
- const versions = this.artifacts.get(path2);
9675
+ const versions = this.artifacts.get(path3);
9632
9676
  const version = versions.length;
9633
9677
  versions.push(artifact);
9634
9678
  return version;
9635
9679
  }
9636
9680
  async loadArtifact(args) {
9637
9681
  const { appName, userId, sessionId, filename, version } = args;
9638
- const path2 = this.getArtifactPath(appName, userId, sessionId, filename);
9639
- const versions = this.artifacts.get(path2);
9682
+ const path3 = this.getArtifactPath(appName, userId, sessionId, filename);
9683
+ const versions = this.artifacts.get(path3);
9640
9684
  if (!versions || versions.length === 0) {
9641
9685
  return null;
9642
9686
  }
@@ -9657,12 +9701,12 @@ var InMemoryArtifactService = (_class29 = class {constructor() { _class29.protot
9657
9701
  const sessionPrefix = `${appName}/${userId}/${sessionId}/`;
9658
9702
  const userNamespacePrefix = `${appName}/${userId}/user/`;
9659
9703
  const filenames = [];
9660
- for (const path2 of this.artifacts.keys()) {
9661
- if (path2.startsWith(sessionPrefix)) {
9662
- const filename = path2.substring(sessionPrefix.length);
9704
+ for (const path3 of this.artifacts.keys()) {
9705
+ if (path3.startsWith(sessionPrefix)) {
9706
+ const filename = path3.substring(sessionPrefix.length);
9663
9707
  filenames.push(filename);
9664
- } else if (path2.startsWith(userNamespacePrefix)) {
9665
- const filename = path2.substring(userNamespacePrefix.length);
9708
+ } else if (path3.startsWith(userNamespacePrefix)) {
9709
+ const filename = path3.substring(userNamespacePrefix.length);
9666
9710
  filenames.push(filename);
9667
9711
  }
9668
9712
  }
@@ -9670,16 +9714,16 @@ var InMemoryArtifactService = (_class29 = class {constructor() { _class29.protot
9670
9714
  }
9671
9715
  async deleteArtifact(args) {
9672
9716
  const { appName, userId, sessionId, filename } = args;
9673
- const path2 = this.getArtifactPath(appName, userId, sessionId, filename);
9674
- if (!this.artifacts.has(path2)) {
9717
+ const path3 = this.getArtifactPath(appName, userId, sessionId, filename);
9718
+ if (!this.artifacts.has(path3)) {
9675
9719
  return;
9676
9720
  }
9677
- this.artifacts.delete(path2);
9721
+ this.artifacts.delete(path3);
9678
9722
  }
9679
9723
  async listVersions(args) {
9680
9724
  const { appName, userId, sessionId, filename } = args;
9681
- const path2 = this.getArtifactPath(appName, userId, sessionId, filename);
9682
- const versions = this.artifacts.get(path2);
9725
+ const path3 = this.getArtifactPath(appName, userId, sessionId, filename);
9726
+ const versions = this.artifacts.get(path3);
9683
9727
  if (!versions || versions.length === 0) {
9684
9728
  return [];
9685
9729
  }
@@ -9734,7 +9778,7 @@ var InMemoryMemoryService = (_class30 = class {
9734
9778
  }
9735
9779
  const userSessions = this._sessionEvents.get(userKey);
9736
9780
  const filteredEvents = session.events.filter(
9737
- (event) => _optionalChain([event, 'access', _253 => _253.content, 'optionalAccess', _254 => _254.parts])
9781
+ (event) => _optionalChain([event, 'access', _257 => _257.content, 'optionalAccess', _258 => _258.parts])
9738
9782
  );
9739
9783
  userSessions.set(session.id, filteredEvents);
9740
9784
  }
@@ -9873,7 +9917,7 @@ var InMemorySessionService = (_class31 = class extends BaseSessionService {const
9873
9917
  return this.createSessionImpl(appName, userId, state, sessionId);
9874
9918
  }
9875
9919
  createSessionImpl(appName, userId, state, sessionId) {
9876
- const finalSessionId = _optionalChain([sessionId, 'optionalAccess', _255 => _255.trim, 'call', _256 => _256()]) || _crypto.randomUUID.call(void 0, );
9920
+ const finalSessionId = _optionalChain([sessionId, 'optionalAccess', _259 => _259.trim, 'call', _260 => _260()]) || _crypto.randomUUID.call(void 0, );
9877
9921
  const session = {
9878
9922
  appName,
9879
9923
  userId,
@@ -10030,7 +10074,7 @@ var InMemorySessionService = (_class31 = class extends BaseSessionService {const
10030
10074
  warning(`sessionId ${sessionId} not in sessions[appName][userId]`);
10031
10075
  return event;
10032
10076
  }
10033
- if (_optionalChain([event, 'access', _257 => _257.actions, 'optionalAccess', _258 => _258.stateDelta])) {
10077
+ if (_optionalChain([event, 'access', _261 => _261.actions, 'optionalAccess', _262 => _262.stateDelta])) {
10034
10078
  for (const key in event.actions.stateDelta) {
10035
10079
  const value = event.actions.stateDelta[key];
10036
10080
  if (key.startsWith(State.APP_PREFIX)) {
@@ -10064,14 +10108,14 @@ function _findFunctionCallEventIfLastEventIsFunctionResponse(session) {
10064
10108
  return null;
10065
10109
  }
10066
10110
  const lastEvent = events[events.length - 1];
10067
- if (_optionalChain([lastEvent, 'access', _259 => _259.content, 'optionalAccess', _260 => _260.parts, 'optionalAccess', _261 => _261.some, 'call', _262 => _262((part) => part.functionResponse)])) {
10068
- const functionCallId = _optionalChain([lastEvent, 'access', _263 => _263.content, 'access', _264 => _264.parts, 'access', _265 => _265.find, 'call', _266 => _266(
10111
+ if (_optionalChain([lastEvent, 'access', _263 => _263.content, 'optionalAccess', _264 => _264.parts, 'optionalAccess', _265 => _265.some, 'call', _266 => _266((part) => part.functionResponse)])) {
10112
+ const functionCallId = _optionalChain([lastEvent, 'access', _267 => _267.content, 'access', _268 => _268.parts, 'access', _269 => _269.find, 'call', _270 => _270(
10069
10113
  (part) => part.functionResponse
10070
- ), 'optionalAccess', _267 => _267.functionResponse, 'optionalAccess', _268 => _268.id]);
10114
+ ), 'optionalAccess', _271 => _271.functionResponse, 'optionalAccess', _272 => _272.id]);
10071
10115
  if (!functionCallId) return null;
10072
10116
  for (let i = events.length - 2; i >= 0; i--) {
10073
10117
  const event = events[i];
10074
- const functionCalls = _optionalChain([event, 'access', _269 => _269.getFunctionCalls, 'optionalCall', _270 => _270()]) || [];
10118
+ const functionCalls = _optionalChain([event, 'access', _273 => _273.getFunctionCalls, 'optionalCall', _274 => _274()]) || [];
10075
10119
  for (const functionCall of functionCalls) {
10076
10120
  if (functionCall.id === functionCallId) {
10077
10121
  return event;
@@ -10149,7 +10193,7 @@ var Runner = (_class32 = class {
10149
10193
  }
10150
10194
  };
10151
10195
  invokeRunAsync();
10152
- return function* () {
10196
+ return (function* () {
10153
10197
  while (true) {
10154
10198
  while (queueIndex >= eventQueue.length && !asyncCompleted) {
10155
10199
  }
@@ -10162,7 +10206,7 @@ var Runner = (_class32 = class {
10162
10206
  }
10163
10207
  yield event;
10164
10208
  }
10165
- }();
10209
+ })();
10166
10210
  }
10167
10211
  /**
10168
10212
  * Main entry method to run the agent in this runner.
@@ -10174,11 +10218,11 @@ var Runner = (_class32 = class {
10174
10218
  runConfig = new RunConfig()
10175
10219
  }) {
10176
10220
  const span = tracer.startSpan("invocation");
10221
+ const spanContext = _api.trace.setSpan(_api.context.active(), span);
10177
10222
  try {
10178
- const session = await this.sessionService.getSession(
10179
- this.appName,
10180
- userId,
10181
- sessionId
10223
+ const session = await _api.context.with(
10224
+ spanContext,
10225
+ () => this.sessionService.getSession(this.appName, userId, sessionId)
10182
10226
  );
10183
10227
  if (!session) {
10184
10228
  throw new Error(`Session not found: ${sessionId}`);
@@ -10188,22 +10232,34 @@ var Runner = (_class32 = class {
10188
10232
  runConfig
10189
10233
  });
10190
10234
  if (newMessage) {
10191
- await this._appendNewMessageToSession(
10192
- session,
10193
- newMessage,
10194
- invocationContext,
10195
- runConfig.saveInputBlobsAsArtifacts || false
10235
+ await _api.context.with(
10236
+ spanContext,
10237
+ () => this._appendNewMessageToSession(
10238
+ session,
10239
+ newMessage,
10240
+ invocationContext,
10241
+ runConfig.saveInputBlobsAsArtifacts || false
10242
+ )
10196
10243
  );
10197
10244
  }
10198
10245
  invocationContext.agent = this._findAgentToRun(session, this.agent);
10199
- for await (const event of invocationContext.agent.runAsync(
10200
- invocationContext
10201
- )) {
10246
+ const agentGenerator = invocationContext.agent.runAsync(invocationContext);
10247
+ while (true) {
10248
+ const result = await _api.context.with(
10249
+ spanContext,
10250
+ () => agentGenerator.next()
10251
+ );
10252
+ if (result.done) {
10253
+ break;
10254
+ }
10255
+ const event = result.value;
10202
10256
  if (!event.partial) {
10203
- await this.sessionService.appendEvent(session, event);
10204
- if (this.memoryService) {
10205
- await this.memoryService.addSessionToMemory(session);
10206
- }
10257
+ await _api.context.with(spanContext, async () => {
10258
+ await this.sessionService.appendEvent(session, event);
10259
+ if (this.memoryService) {
10260
+ await this.memoryService.addSessionToMemory(session);
10261
+ }
10262
+ });
10207
10263
  }
10208
10264
  yield event;
10209
10265
  }
@@ -10262,15 +10318,15 @@ var Runner = (_class32 = class {
10262
10318
  */
10263
10319
  _findAgentToRun(session, rootAgent) {
10264
10320
  const event = _findFunctionCallEventIfLastEventIsFunctionResponse(session);
10265
- if (_optionalChain([event, 'optionalAccess', _271 => _271.author])) {
10321
+ if (_optionalChain([event, 'optionalAccess', _275 => _275.author])) {
10266
10322
  return rootAgent.findAgent(event.author);
10267
10323
  }
10268
- const nonUserEvents = _optionalChain([session, 'access', _272 => _272.events, 'optionalAccess', _273 => _273.filter, 'call', _274 => _274((e) => e.author !== "user"), 'access', _275 => _275.reverse, 'call', _276 => _276()]) || [];
10324
+ const nonUserEvents = _optionalChain([session, 'access', _276 => _276.events, 'optionalAccess', _277 => _277.filter, 'call', _278 => _278((e) => e.author !== "user"), 'access', _279 => _279.reverse, 'call', _280 => _280()]) || [];
10269
10325
  for (const event2 of nonUserEvents) {
10270
10326
  if (event2.author === rootAgent.name) {
10271
10327
  return rootAgent;
10272
10328
  }
10273
- const agent = _optionalChain([rootAgent, 'access', _277 => _277.findSubAgent, 'optionalCall', _278 => _278(event2.author)]);
10329
+ const agent = _optionalChain([rootAgent, 'access', _281 => _281.findSubAgent, 'optionalCall', _282 => _282(event2.author)]);
10274
10330
  if (!agent) {
10275
10331
  this.logger.debug(
10276
10332
  `Event from an unknown agent: ${event2.author}, event id: ${event2.id}`
@@ -10350,10 +10406,16 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10350
10406
 
10351
10407
  __init56() {this.agentType = "llm"}
10352
10408
 
10409
+
10410
+ // If provided, reuse directly
10411
+ __init57() {this.definitionLocked = false}
10412
+ // Lock further definition mutation after withAgent
10413
+ __init58() {this.warnedMethods = /* @__PURE__ */ new Set()}
10414
+ __init59() {this.logger = new Logger({ name: "AgentBuilder" })}
10353
10415
  /**
10354
10416
  * Private constructor - use static create() method
10355
10417
  */
10356
- constructor(name) {;_class33.prototype.__init56.call(this);
10418
+ constructor(name) {;_class33.prototype.__init56.call(this);_class33.prototype.__init57.call(this);_class33.prototype.__init58.call(this);_class33.prototype.__init59.call(this);
10357
10419
  this.config = { name };
10358
10420
  }
10359
10421
  /**
@@ -10378,6 +10440,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10378
10440
  * @returns This builder instance for chaining
10379
10441
  */
10380
10442
  withModel(model) {
10443
+ this.warnIfLocked("withModel");
10381
10444
  this.config.model = model;
10382
10445
  return this;
10383
10446
  }
@@ -10387,6 +10450,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10387
10450
  * @returns This builder instance for chaining
10388
10451
  */
10389
10452
  withDescription(description) {
10453
+ this.warnIfLocked("withDescription");
10390
10454
  this.config.description = description;
10391
10455
  return this;
10392
10456
  }
@@ -10396,14 +10460,17 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10396
10460
  * @returns This builder instance for chaining
10397
10461
  */
10398
10462
  withInstruction(instruction) {
10463
+ this.warnIfLocked("withInstruction");
10399
10464
  this.config.instruction = instruction;
10400
10465
  return this;
10401
10466
  }
10402
10467
  withInputSchema(schema) {
10468
+ this.warnIfLocked("withInputSchema");
10403
10469
  this.config.inputSchema = schema;
10404
10470
  return this;
10405
10471
  }
10406
10472
  withOutputSchema(schema) {
10473
+ this.warnIfLocked("withOutputSchema");
10407
10474
  this.config.outputSchema = schema;
10408
10475
  return this;
10409
10476
  }
@@ -10413,6 +10480,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10413
10480
  * @returns This builder instance for chaining
10414
10481
  */
10415
10482
  withTools(...tools) {
10483
+ this.warnIfLocked("withTools");
10416
10484
  this.config.tools = [...this.config.tools || [], ...tools];
10417
10485
  return this;
10418
10486
  }
@@ -10422,6 +10490,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10422
10490
  * @returns This builder instance for chaining
10423
10491
  */
10424
10492
  withPlanner(planner) {
10493
+ this.warnIfLocked("withPlanner");
10425
10494
  this.config.planner = planner;
10426
10495
  return this;
10427
10496
  }
@@ -10431,6 +10500,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10431
10500
  * @returns This builder instance for chaining
10432
10501
  */
10433
10502
  withCodeExecutor(codeExecutor) {
10503
+ this.warnIfLocked("withCodeExecutor");
10434
10504
  this.config.codeExecutor = codeExecutor;
10435
10505
  return this;
10436
10506
  }
@@ -10440,6 +10510,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10440
10510
  * @returns This builder instance for chaining
10441
10511
  */
10442
10512
  withOutputKey(outputKey) {
10513
+ this.warnIfLocked("withOutputKey");
10443
10514
  this.config.outputKey = outputKey;
10444
10515
  return this;
10445
10516
  }
@@ -10449,6 +10520,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10449
10520
  * @returns This builder instance for chaining
10450
10521
  */
10451
10522
  withSubAgents(subAgents) {
10523
+ this.warnIfLocked("withSubAgents");
10452
10524
  this.config.subAgents = subAgents;
10453
10525
  return this;
10454
10526
  }
@@ -10458,6 +10530,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10458
10530
  * @returns This builder instance for chaining
10459
10531
  */
10460
10532
  withBeforeAgentCallback(callback) {
10533
+ this.warnIfLocked("withBeforeAgentCallback");
10461
10534
  this.config.beforeAgentCallback = callback;
10462
10535
  return this;
10463
10536
  }
@@ -10467,15 +10540,29 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10467
10540
  * @returns This builder instance for chaining
10468
10541
  */
10469
10542
  withAfterAgentCallback(callback) {
10543
+ this.warnIfLocked("withAfterAgentCallback");
10470
10544
  this.config.afterAgentCallback = callback;
10471
10545
  return this;
10472
10546
  }
10547
+ /**
10548
+ * Provide an already constructed agent instance. Further definition-mutating calls
10549
+ * (model/tools/instruction/etc.) will be ignored with a dev warning.
10550
+ */
10551
+ withAgent(agent) {
10552
+ this.existingAgent = agent;
10553
+ this.definitionLocked = true;
10554
+ if (this.config.name === "default_agent" && agent.name) {
10555
+ this.config.name = agent.name;
10556
+ }
10557
+ return this;
10558
+ }
10473
10559
  /**
10474
10560
  * Configure as a sequential agent
10475
10561
  * @param subAgents Sub-agents to execute in sequence
10476
10562
  * @returns This builder instance for chaining
10477
10563
  */
10478
10564
  asSequential(subAgents) {
10565
+ this.warnIfLocked("asSequential");
10479
10566
  this.agentType = "sequential";
10480
10567
  this.config.subAgents = subAgents;
10481
10568
  return this;
@@ -10486,6 +10573,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10486
10573
  * @returns This builder instance for chaining
10487
10574
  */
10488
10575
  asParallel(subAgents) {
10576
+ this.warnIfLocked("asParallel");
10489
10577
  this.agentType = "parallel";
10490
10578
  this.config.subAgents = subAgents;
10491
10579
  return this;
@@ -10497,6 +10585,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10497
10585
  * @returns This builder instance for chaining
10498
10586
  */
10499
10587
  asLoop(subAgents, maxIterations = 3) {
10588
+ this.warnIfLocked("asLoop");
10500
10589
  this.agentType = "loop";
10501
10590
  this.config.subAgents = subAgents;
10502
10591
  this.config.maxIterations = maxIterations;
@@ -10509,6 +10598,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10509
10598
  * @returns This builder instance for chaining
10510
10599
  */
10511
10600
  asLangGraph(nodes, rootNode) {
10601
+ this.warnIfLocked("asLangGraph");
10512
10602
  this.agentType = "langgraph";
10513
10603
  this.config.nodes = nodes;
10514
10604
  this.config.rootNode = rootNode;
@@ -10635,6 +10725,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10635
10725
  * @returns Created agent instance
10636
10726
  */
10637
10727
  createAgent() {
10728
+ if (this.existingAgent) return this.existingAgent;
10638
10729
  switch (this.agentType) {
10639
10730
  case "llm": {
10640
10731
  if (!this.config.model) {
@@ -10729,7 +10820,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10729
10820
  async ask(message) {
10730
10821
  const newMessage = typeof message === "string" ? { parts: [{ text: message }] } : typeof message === "object" && "contents" in message ? { parts: message.contents[message.contents.length - 1].parts } : message;
10731
10822
  let response = "";
10732
- if (!_optionalChain([sessionOptions, 'optionalAccess', _279 => _279.userId])) {
10823
+ if (!_optionalChain([sessionOptions, 'optionalAccess', _283 => _283.userId])) {
10733
10824
  throw new Error("Session configuration is required");
10734
10825
  }
10735
10826
  for await (const event of baseRunner.runAsync({
@@ -10737,7 +10828,7 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10737
10828
  sessionId: session.id,
10738
10829
  newMessage
10739
10830
  })) {
10740
- if (_optionalChain([event, 'access', _280 => _280.content, 'optionalAccess', _281 => _281.parts]) && Array.isArray(event.content.parts)) {
10831
+ if (_optionalChain([event, 'access', _284 => _284.content, 'optionalAccess', _285 => _285.parts]) && Array.isArray(event.content.parts)) {
10741
10832
  const content = event.content.parts.map(
10742
10833
  (part) => (part && typeof part === "object" && "text" in part ? part.text : "") || ""
10743
10834
  ).join("");
@@ -10765,6 +10856,22 @@ var AgentBuilder = (_class33 = class _AgentBuilder {
10765
10856
  }
10766
10857
  };
10767
10858
  }
10859
+ /**
10860
+ * Warn (once per method) if the definition has been locked by withAgent().
10861
+ */
10862
+ warnIfLocked(method) {
10863
+ if (!this.definitionLocked) return;
10864
+ if (this.warnedMethods.has(method)) return;
10865
+ this.warnedMethods.add(method);
10866
+ if (process.env.NODE_ENV !== "production") {
10867
+ const msg = `AgentBuilder: attempted to call ${method} after withAgent(); ignoring. (Wrap the agent first OR configure before withAgent).`;
10868
+ if (this.logger && typeof this.logger.warn === "function") {
10869
+ this.logger.warn(msg);
10870
+ } else {
10871
+ console.warn(msg);
10872
+ }
10873
+ }
10874
+ }
10768
10875
  }, _class33);
10769
10876
 
10770
10877
  // src/memory/index.ts
@@ -10818,7 +10925,7 @@ var VertexAiSessionService = class extends BaseSessionService {
10818
10925
  path: `reasoningEngines/${reasoningEngineId}/sessions`,
10819
10926
  request_dict: sessionJsonDict
10820
10927
  });
10821
- console.info("Create Session response", apiResponse);
10928
+ console.debug("Create Session response", apiResponse);
10822
10929
  const createdSessionId = apiResponse.name.split("/").slice(-3, -2)[0];
10823
10930
  const operationId = apiResponse.name.split("/").pop();
10824
10931
  let maxRetryAttempt = 5;
@@ -10829,7 +10936,7 @@ var VertexAiSessionService = class extends BaseSessionService {
10829
10936
  path: `operations/${operationId}`,
10830
10937
  request_dict: {}
10831
10938
  });
10832
- if (_optionalChain([lroResponse, 'optionalAccess', _282 => _282.done])) {
10939
+ if (_optionalChain([lroResponse, 'optionalAccess', _286 => _286.done])) {
10833
10940
  break;
10834
10941
  }
10835
10942
  await new Promise((resolve) => setTimeout(resolve, 1e3));
@@ -10929,14 +11036,14 @@ var VertexAiSessionService = class extends BaseSessionService {
10929
11036
  async listSessions(appName, userId) {
10930
11037
  const reasoningEngineId = this.getReasoningEngineId(appName);
10931
11038
  const apiClient = this.getApiClient();
10932
- let path2 = `reasoningEngines/${reasoningEngineId}/sessions`;
11039
+ let path3 = `reasoningEngines/${reasoningEngineId}/sessions`;
10933
11040
  if (userId) {
10934
11041
  const parsedUserId = encodeURIComponent(`"${userId}"`);
10935
- path2 = `${path2}?filter=user_id=${parsedUserId}`;
11042
+ path3 = `${path3}?filter=user_id=${parsedUserId}`;
10936
11043
  }
10937
11044
  const apiResponse = await apiClient.async_request({
10938
11045
  http_method: "GET",
10939
- path: path2,
11046
+ path: path3,
10940
11047
  request_dict: {}
10941
11048
  });
10942
11049
  if (apiResponse.httpHeaders) {
@@ -11101,9 +11208,9 @@ var VertexAiSessionService = class extends BaseSessionService {
11101
11208
  var _kysely = require('kysely');
11102
11209
  var DatabaseSessionService = (_class34 = class extends BaseSessionService {
11103
11210
 
11104
- __init57() {this.initialized = false}
11211
+ __init60() {this.initialized = false}
11105
11212
  constructor(config) {
11106
- super();_class34.prototype.__init57.call(this);;
11213
+ super();_class34.prototype.__init60.call(this);;
11107
11214
  this.db = config.db;
11108
11215
  if (!config.skipTableCreation) {
11109
11216
  this.initializeDatabase().catch((error) => {
@@ -11200,12 +11307,12 @@ var DatabaseSessionService = (_class34 = class extends BaseSessionService {
11200
11307
  }
11201
11308
  async createSession(appName, userId, state, sessionId) {
11202
11309
  await this.ensureInitialized();
11203
- const id = _optionalChain([sessionId, 'optionalAccess', _283 => _283.trim, 'call', _284 => _284()]) || this.generateSessionId();
11310
+ const id = _optionalChain([sessionId, 'optionalAccess', _287 => _287.trim, 'call', _288 => _288()]) || this.generateSessionId();
11204
11311
  return await this.db.transaction().execute(async (trx) => {
11205
11312
  const appState = await trx.selectFrom("app_states").selectAll().where("app_name", "=", appName).executeTakeFirst();
11206
11313
  const userState = await trx.selectFrom("user_states").selectAll().where("app_name", "=", appName).where("user_id", "=", userId).executeTakeFirst();
11207
- let currentAppState = this.parseJsonSafely(_optionalChain([appState, 'optionalAccess', _285 => _285.state]), {});
11208
- let currentUserState = this.parseJsonSafely(_optionalChain([userState, 'optionalAccess', _286 => _286.state]), {});
11314
+ let currentAppState = this.parseJsonSafely(_optionalChain([appState, 'optionalAccess', _289 => _289.state]), {});
11315
+ let currentUserState = this.parseJsonSafely(_optionalChain([userState, 'optionalAccess', _290 => _290.state]), {});
11209
11316
  if (!appState) {
11210
11317
  await trx.insertInto("app_states").values({
11211
11318
  app_name: appName,
@@ -11264,21 +11371,21 @@ var DatabaseSessionService = (_class34 = class extends BaseSessionService {
11264
11371
  return void 0;
11265
11372
  }
11266
11373
  let eventQuery = trx.selectFrom("events").selectAll().where("session_id", "=", sessionId).orderBy("timestamp", "desc");
11267
- if (_optionalChain([config, 'optionalAccess', _287 => _287.afterTimestamp])) {
11374
+ if (_optionalChain([config, 'optionalAccess', _291 => _291.afterTimestamp])) {
11268
11375
  eventQuery = eventQuery.where(
11269
11376
  "timestamp",
11270
11377
  ">=",
11271
11378
  new Date(config.afterTimestamp * 1e3)
11272
11379
  );
11273
11380
  }
11274
- if (_optionalChain([config, 'optionalAccess', _288 => _288.numRecentEvents])) {
11381
+ if (_optionalChain([config, 'optionalAccess', _292 => _292.numRecentEvents])) {
11275
11382
  eventQuery = eventQuery.limit(config.numRecentEvents);
11276
11383
  }
11277
11384
  const storageEvents = await eventQuery.execute();
11278
11385
  const appState = await trx.selectFrom("app_states").selectAll().where("app_name", "=", appName).executeTakeFirst();
11279
11386
  const userState = await trx.selectFrom("user_states").selectAll().where("app_name", "=", appName).where("user_id", "=", userId).executeTakeFirst();
11280
- const currentAppState = this.parseJsonSafely(_optionalChain([appState, 'optionalAccess', _289 => _289.state]), {});
11281
- const currentUserState = this.parseJsonSafely(_optionalChain([userState, 'optionalAccess', _290 => _290.state]), {});
11387
+ const currentAppState = this.parseJsonSafely(_optionalChain([appState, 'optionalAccess', _293 => _293.state]), {});
11388
+ const currentUserState = this.parseJsonSafely(_optionalChain([userState, 'optionalAccess', _294 => _294.state]), {});
11282
11389
  const sessionState = this.parseJsonSafely(storageSession.state, {});
11283
11390
  const mergedState = this.mergeState(
11284
11391
  currentAppState,
@@ -11336,13 +11443,13 @@ var DatabaseSessionService = (_class34 = class extends BaseSessionService {
11336
11443
  }
11337
11444
  const appState = await trx.selectFrom("app_states").selectAll().where("app_name", "=", session.appName).executeTakeFirst();
11338
11445
  const userState = await trx.selectFrom("user_states").selectAll().where("app_name", "=", session.appName).where("user_id", "=", session.userId).executeTakeFirst();
11339
- let currentAppState = this.parseJsonSafely(_optionalChain([appState, 'optionalAccess', _291 => _291.state]), {});
11340
- let currentUserState = this.parseJsonSafely(_optionalChain([userState, 'optionalAccess', _292 => _292.state]), {});
11446
+ let currentAppState = this.parseJsonSafely(_optionalChain([appState, 'optionalAccess', _295 => _295.state]), {});
11447
+ let currentUserState = this.parseJsonSafely(_optionalChain([userState, 'optionalAccess', _296 => _296.state]), {});
11341
11448
  let sessionState = this.parseJsonSafely(storageSession.state, {});
11342
11449
  let appStateDelta = {};
11343
11450
  let userStateDelta = {};
11344
11451
  let sessionStateDelta = {};
11345
- if (_optionalChain([event, 'access', _293 => _293.actions, 'optionalAccess', _294 => _294.stateDelta])) {
11452
+ if (_optionalChain([event, 'access', _297 => _297.actions, 'optionalAccess', _298 => _298.stateDelta])) {
11346
11453
  const deltas = this.extractStateDelta(event.actions.stateDelta);
11347
11454
  appStateDelta = deltas.appStateDelta;
11348
11455
  userStateDelta = deltas.userStateDelta;
@@ -11488,7 +11595,7 @@ var DatabaseSessionService = (_class34 = class extends BaseSessionService {
11488
11595
  * Overrides the base class method to work with plain object state.
11489
11596
  */
11490
11597
  updateSessionState(session, event) {
11491
- if (!_optionalChain([event, 'access', _295 => _295.actions, 'optionalAccess', _296 => _296.stateDelta])) {
11598
+ if (!_optionalChain([event, 'access', _299 => _299.actions, 'optionalAccess', _300 => _300.stateDelta])) {
11492
11599
  return;
11493
11600
  }
11494
11601
  for (const [key, value] of Object.entries(event.actions.stateDelta)) {
@@ -11658,7 +11765,7 @@ var GcsArtifactService = class {
11658
11765
  };
11659
11766
  return part;
11660
11767
  } catch (error) {
11661
- if (_optionalChain([error, 'optionalAccess', _297 => _297.code]) === 404) {
11768
+ if (_optionalChain([error, 'optionalAccess', _301 => _301.code]) === 404) {
11662
11769
  return null;
11663
11770
  }
11664
11771
  throw error;
@@ -11752,8 +11859,1305 @@ __export(flows_exports, {
11752
11859
  removeClientFunctionCallId: () => removeClientFunctionCallId
11753
11860
  });
11754
11861
 
11755
- // src/version.ts
11756
- var VERSION = "0.1.0";
11862
+ // src/evaluation/index.ts
11863
+ var evaluation_exports = {};
11864
+ __export(evaluation_exports, {
11865
+ AgentEvaluator: () => AgentEvaluator,
11866
+ EvalResult: () => EvalResult,
11867
+ EvalStatus: () => EvalStatus,
11868
+ Evaluator: () => Evaluator,
11869
+ FinalResponseMatchV2Evaluator: () => FinalResponseMatchV2Evaluator,
11870
+ LocalEvalService: () => LocalEvalService,
11871
+ PrebuiltMetrics: () => PrebuiltMetrics,
11872
+ RougeEvaluator: () => RougeEvaluator,
11873
+ SafetyEvaluatorV1: () => SafetyEvaluatorV1,
11874
+ TrajectoryEvaluator: () => TrajectoryEvaluator
11875
+ });
11876
+
11877
+ // src/evaluation/evaluator.ts
11878
+ var EvalStatus = /* @__PURE__ */ ((EvalStatus2) => {
11879
+ EvalStatus2[EvalStatus2["PASSED"] = 1] = "PASSED";
11880
+ EvalStatus2[EvalStatus2["FAILED"] = 2] = "FAILED";
11881
+ EvalStatus2[EvalStatus2["NOT_EVALUATED"] = 3] = "NOT_EVALUATED";
11882
+ return EvalStatus2;
11883
+ })(EvalStatus || {});
11884
+ var Evaluator = class {
11885
+ constructor(metric) {
11886
+ this.metric = metric;
11887
+ }
11888
+ static getMetricInfo(metricName) {
11889
+ throw new Error("getMetricInfo() must be implemented by subclass");
11890
+ }
11891
+ };
11892
+
11893
+ // src/evaluation/eval-metrics.ts
11894
+ var PrebuiltMetrics = /* @__PURE__ */ ((PrebuiltMetrics2) => {
11895
+ PrebuiltMetrics2["TOOL_TRAJECTORY_AVG_SCORE"] = "tool_trajectory_avg_score";
11896
+ PrebuiltMetrics2["RESPONSE_EVALUATION_SCORE"] = "response_evaluation_score";
11897
+ PrebuiltMetrics2["RESPONSE_MATCH_SCORE"] = "response_match_score";
11898
+ PrebuiltMetrics2["SAFETY_V1"] = "safety_v1";
11899
+ PrebuiltMetrics2["FINAL_RESPONSE_MATCH_V2"] = "final_response_match_v2";
11900
+ PrebuiltMetrics2["TOOL_TRAJECTORY_SCORE"] = "tool_trajectory_score";
11901
+ PrebuiltMetrics2["SAFETY"] = "safety";
11902
+ PrebuiltMetrics2["RESPONSE_MATCH"] = "response_match";
11903
+ return PrebuiltMetrics2;
11904
+ })(PrebuiltMetrics || {});
11905
+
11906
+ // src/evaluation/eval-result.ts
11907
+ var EvalResult = class {
11908
+
11909
+
11910
+
11911
+
11912
+
11913
+ constructor(init) {
11914
+ this.evalSetResultId = init.evalSetResultId || "";
11915
+ this.evalSetResultName = init.evalSetResultName;
11916
+ this.evalSetId = init.evalSetId || "";
11917
+ this.evalCaseResults = init.evalCaseResults || [];
11918
+ this.creationTimestamp = init.creationTimestamp || Date.now() / 1e3;
11919
+ }
11920
+ };
11921
+
11922
+ // src/evaluation/agent-evaluator.ts
11923
+
11924
+
11925
+
11926
+ // src/evaluation/base-eval-service.ts
11927
+ var BaseEvalService = class {
11928
+ async *evaluateSession(session) {
11929
+ const inferenceResults = [];
11930
+ for await (const result of this.performInference({
11931
+ evalSetId: session.evalSetId,
11932
+ evalCases: session.evalCases
11933
+ })) {
11934
+ inferenceResults.push(result);
11935
+ }
11936
+ for await (const result of this.evaluate({
11937
+ inferenceResults,
11938
+ evaluateConfig: session.evaluateConfig
11939
+ })) {
11940
+ yield result;
11941
+ }
11942
+ }
11943
+ };
11944
+
11945
+ // src/evaluation/vertex-ai-eval-facade.ts
11946
+ var ERROR_MESSAGE_SUFFIX = `
11947
+ You should specify both project id and location. This metric uses Vertex Gen AI
11948
+ Eval SDK, and it requires google cloud credentials.
11949
+
11950
+ If using an .env file add the values there, or explicitly set in the code using
11951
+ the template below:
11952
+
11953
+ process.env.GOOGLE_CLOUD_LOCATION = <LOCATION>
11954
+ process.env.GOOGLE_CLOUD_PROJECT = <PROJECT ID>
11955
+ `;
11956
+ var VertexAiEvalFacade = class _VertexAiEvalFacade {
11957
+
11958
+
11959
+ constructor(config) {
11960
+ this.threshold = config.threshold;
11961
+ this.metricName = config.metricName;
11962
+ }
11963
+ async evaluateInvocations(actualInvocations, expectedInvocations) {
11964
+ let totalScore = 0;
11965
+ let numInvocations = 0;
11966
+ const perInvocationResults = [];
11967
+ for (let i = 0; i < actualInvocations.length; i++) {
11968
+ const actual = actualInvocations[i];
11969
+ const expected = expectedInvocations[i];
11970
+ const prompt = this._getText(expected.userContent);
11971
+ const reference = this._getText(expected.finalResponse);
11972
+ const response = this._getText(actual.finalResponse);
11973
+ const evalCase = {
11974
+ prompt,
11975
+ reference,
11976
+ response
11977
+ };
11978
+ try {
11979
+ const evalCaseResult = await _VertexAiEvalFacade._performEval(
11980
+ [evalCase],
11981
+ [this.metricName]
11982
+ );
11983
+ const score = this._getScore(evalCaseResult);
11984
+ perInvocationResults.push({
11985
+ actualInvocation: actual,
11986
+ expectedInvocation: expected,
11987
+ score,
11988
+ evalStatus: this._getEvalStatus(score)
11989
+ });
11990
+ if (score !== null && score !== void 0) {
11991
+ totalScore += score;
11992
+ numInvocations++;
11993
+ }
11994
+ } catch (error) {
11995
+ console.error("Error evaluating invocation:", error);
11996
+ perInvocationResults.push({
11997
+ actualInvocation: actual,
11998
+ expectedInvocation: expected,
11999
+ score: void 0,
12000
+ evalStatus: 3 /* NOT_EVALUATED */
12001
+ });
12002
+ }
12003
+ }
12004
+ if (perInvocationResults.length > 0) {
12005
+ const overallScore = numInvocations > 0 ? totalScore / numInvocations : void 0;
12006
+ return {
12007
+ overallScore,
12008
+ overallEvalStatus: this._getEvalStatus(overallScore),
12009
+ perInvocationResults
12010
+ };
12011
+ }
12012
+ return {
12013
+ overallScore: void 0,
12014
+ overallEvalStatus: 3 /* NOT_EVALUATED */,
12015
+ perInvocationResults: []
12016
+ };
12017
+ }
12018
+ _getText(content) {
12019
+ if (_optionalChain([content, 'optionalAccess', _302 => _302.parts])) {
12020
+ return content.parts.map((p) => p.text || "").filter((text) => text.length > 0).join("\n");
12021
+ }
12022
+ return "";
12023
+ }
12024
+ _getScore(evalResult) {
12025
+ if (_optionalChain([evalResult, 'optionalAccess', _303 => _303.summaryMetrics, 'optionalAccess', _304 => _304[0], 'optionalAccess', _305 => _305.meanScore]) !== void 0 && typeof evalResult.summaryMetrics[0].meanScore === "number" && !Number.isNaN(evalResult.summaryMetrics[0].meanScore)) {
12026
+ return evalResult.summaryMetrics[0].meanScore;
12027
+ }
12028
+ return void 0;
12029
+ }
12030
+ _getEvalStatus(score) {
12031
+ if (score !== null && score !== void 0) {
12032
+ return score >= this.threshold ? 1 /* PASSED */ : 2 /* FAILED */;
12033
+ }
12034
+ return 3 /* NOT_EVALUATED */;
12035
+ }
12036
+ static async _performEval(dataset, metrics) {
12037
+ const projectId = process.env.GOOGLE_CLOUD_PROJECT;
12038
+ const location = process.env.GOOGLE_CLOUD_LOCATION;
12039
+ if (!projectId) {
12040
+ throw new Error(`Missing project id. ${ERROR_MESSAGE_SUFFIX}`);
12041
+ }
12042
+ if (!location) {
12043
+ throw new Error(`Missing location. ${ERROR_MESSAGE_SUFFIX}`);
12044
+ }
12045
+ console.warn(
12046
+ "Vertex AI evaluation is not fully implemented. Using mock response."
12047
+ );
12048
+ return {
12049
+ summaryMetrics: [
12050
+ {
12051
+ meanScore: Math.random() * 0.5 + 0.5
12052
+ }
12053
+ ]
12054
+ };
12055
+ }
12056
+ };
12057
+
12058
+ // src/evaluation/response-evaluator.ts
12059
+ var ResponseEvaluator = class extends Evaluator {
12060
+
12061
+
12062
+ constructor(evalMetric) {
12063
+ super(evalMetric);
12064
+ if (evalMetric.metricName === "response_evaluation_score" /* RESPONSE_EVALUATION_SCORE */) {
12065
+ this.metricName = "response_evaluation_score" /* RESPONSE_EVALUATION_SCORE */;
12066
+ } else if (evalMetric.metricName === "response_match_score" /* RESPONSE_MATCH_SCORE */) {
12067
+ this.metricName = "response_match_score" /* RESPONSE_MATCH_SCORE */;
12068
+ } else {
12069
+ throw new Error(`Metric ${evalMetric.metricName} is not supported.`);
12070
+ }
12071
+ this.threshold = evalMetric.threshold;
12072
+ }
12073
+ static getMetricInfo(metricName) {
12074
+ if (metricName === "response_evaluation_score" /* RESPONSE_EVALUATION_SCORE */) {
12075
+ return {
12076
+ metricName: "response_evaluation_score" /* RESPONSE_EVALUATION_SCORE */,
12077
+ description: "This metric evaluates how coherent agent's response was. Value range of this metric is [1,5], with values closer to 5 more desirable.",
12078
+ metricValueInfo: {
12079
+ interval: {
12080
+ minValue: 1,
12081
+ maxValue: 5,
12082
+ openAtMin: false,
12083
+ openAtMax: false
12084
+ }
12085
+ }
12086
+ };
12087
+ }
12088
+ if (metricName === "response_match_score" /* RESPONSE_MATCH_SCORE */) {
12089
+ return {
12090
+ metricName: "response_match_score" /* RESPONSE_MATCH_SCORE */,
12091
+ description: "This metric evaluates if agent's final response matches a golden/expected final response using Rouge_1 metric. Value range for this metric is [0,1], with values closer to 1 more desirable.",
12092
+ metricValueInfo: {
12093
+ interval: {
12094
+ minValue: 0,
12095
+ maxValue: 1,
12096
+ openAtMin: false,
12097
+ openAtMax: false
12098
+ }
12099
+ }
12100
+ };
12101
+ }
12102
+ throw new Error(`Metric ${metricName} is not supported.`);
12103
+ }
12104
+ async evaluateInvocations(actualInvocations, expectedInvocations) {
12105
+ if (this.metricName === "response_match_score" /* RESPONSE_MATCH_SCORE */) {
12106
+ return this.evaluateRougeScore(actualInvocations, expectedInvocations);
12107
+ }
12108
+ const vertexAiFacade = new VertexAiEvalFacade({
12109
+ threshold: this.threshold,
12110
+ metricName: this.metricName
12111
+ });
12112
+ return vertexAiFacade.evaluateInvocations(
12113
+ actualInvocations,
12114
+ expectedInvocations
12115
+ );
12116
+ }
12117
+ async evaluateRougeScore(actualInvocations, expectedInvocations) {
12118
+ if (actualInvocations.length !== expectedInvocations.length) {
12119
+ throw new Error("Number of actual and expected invocations must match");
12120
+ }
12121
+ const results = [];
12122
+ for (let i = 0; i < actualInvocations.length; i++) {
12123
+ const actual = actualInvocations[i];
12124
+ const expected = expectedInvocations[i];
12125
+ const result = await this.evaluateInvocation(actual, expected);
12126
+ results.push(result);
12127
+ }
12128
+ const scores = results.map((r) => r.score).filter((s) => s !== void 0);
12129
+ const overallScore = scores.length > 0 ? scores.reduce((a, b) => a + b, 0) / scores.length : void 0;
12130
+ const overallStatus = overallScore !== void 0 && overallScore >= this.threshold ? 1 /* PASSED */ : 2 /* FAILED */;
12131
+ return {
12132
+ overallScore,
12133
+ overallEvalStatus: overallStatus,
12134
+ perInvocationResults: results
12135
+ };
12136
+ }
12137
+ async evaluateInvocation(actual, expected) {
12138
+ if (!actual.finalResponse || !expected.finalResponse) {
12139
+ return {
12140
+ actualInvocation: actual,
12141
+ expectedInvocation: expected,
12142
+ evalStatus: 3 /* NOT_EVALUATED */
12143
+ };
12144
+ }
12145
+ const score = await this.computeRougeScore(
12146
+ actual.finalResponse,
12147
+ expected.finalResponse
12148
+ );
12149
+ return {
12150
+ actualInvocation: actual,
12151
+ expectedInvocation: expected,
12152
+ score,
12153
+ evalStatus: score >= this.threshold ? 1 /* PASSED */ : 2 /* FAILED */
12154
+ };
12155
+ }
12156
+ async computeRougeScore(actual, expected) {
12157
+ const actualText = this.extractText(actual);
12158
+ const expectedText = this.extractText(expected);
12159
+ if (!actualText.trim() || !expectedText.trim()) {
12160
+ return 0;
12161
+ }
12162
+ const actualTokens = this.tokenizeText(actualText);
12163
+ const expectedTokens = this.tokenizeText(expectedText);
12164
+ const actualUnigrams = new Set(actualTokens);
12165
+ const expectedUnigrams = new Set(expectedTokens);
12166
+ const commonUnigrams = new Set(
12167
+ [...actualUnigrams].filter((token) => expectedUnigrams.has(token))
12168
+ );
12169
+ const precision = actualUnigrams.size > 0 ? commonUnigrams.size / actualUnigrams.size : 0;
12170
+ const recall = expectedUnigrams.size > 0 ? commonUnigrams.size / expectedUnigrams.size : 0;
12171
+ const fmeasure = precision + recall > 0 ? 2 * precision * recall / (precision + recall) : 0;
12172
+ return fmeasure;
12173
+ }
12174
+ extractText(content) {
12175
+ if (_optionalChain([content, 'optionalAccess', _306 => _306.parts])) {
12176
+ return content.parts.map((p) => p.text || "").filter((text) => text.length > 0).join(" ");
12177
+ }
12178
+ return "";
12179
+ }
12180
+ tokenizeText(text) {
12181
+ return text.toLowerCase().replace(/[^\w\s]/g, " ").split(/\s+/).filter((token) => token.length > 0);
12182
+ }
12183
+ };
12184
+
12185
+ // src/evaluation/trajectory-evaluator.ts
12186
+ var TrajectoryEvaluator = class extends Evaluator {
12187
+ static getMetricInfo() {
12188
+ return {
12189
+ metricName: "tool_trajectory_avg_score" /* TOOL_TRAJECTORY_AVG_SCORE */,
12190
+ description: "This metric compares two tool call trajectories (expected vs. actual) for the same user interaction. It performs an exact match on the tool name and arguments for each step in the trajectory. A score of 1.0 indicates a perfect match, while 0.0 indicates a mismatch. Higher values are better.",
12191
+ metricValueInfo: {
12192
+ interval: {
12193
+ minValue: 0,
12194
+ maxValue: 1,
12195
+ openAtMin: false,
12196
+ openAtMax: false
12197
+ }
12198
+ }
12199
+ };
12200
+ }
12201
+ async evaluateInvocations(actualInvocations, expectedInvocations) {
12202
+ let totalToolUseAccuracy = 0;
12203
+ let numInvocations = 0;
12204
+ const perInvocationResults = [];
12205
+ for (let i = 0; i < actualInvocations.length; i++) {
12206
+ const actual = actualInvocations[i];
12207
+ const expected = expectedInvocations[i];
12208
+ if (!_optionalChain([actual, 'access', _307 => _307.intermediateData, 'optionalAccess', _308 => _308.toolUses]) || !_optionalChain([expected, 'access', _309 => _309.intermediateData, 'optionalAccess', _310 => _310.toolUses])) {
12209
+ perInvocationResults.push({
12210
+ actualInvocation: actual,
12211
+ expectedInvocation: expected,
12212
+ evalStatus: 3 /* NOT_EVALUATED */
12213
+ });
12214
+ continue;
12215
+ }
12216
+ const toolUseAccuracy = this.areToolCallsEqual(
12217
+ actual.intermediateData.toolUses,
12218
+ expected.intermediateData.toolUses
12219
+ ) ? 1 : 0;
12220
+ perInvocationResults.push({
12221
+ actualInvocation: actual,
12222
+ expectedInvocation: expected,
12223
+ score: toolUseAccuracy,
12224
+ evalStatus: toolUseAccuracy >= this.metric.threshold ? 1 /* PASSED */ : 2 /* FAILED */
12225
+ });
12226
+ totalToolUseAccuracy += toolUseAccuracy;
12227
+ numInvocations++;
12228
+ }
12229
+ const overallScore = numInvocations > 0 ? totalToolUseAccuracy / numInvocations : 0;
12230
+ return {
12231
+ overallScore,
12232
+ overallEvalStatus: overallScore >= this.metric.threshold ? 1 /* PASSED */ : 2 /* FAILED */,
12233
+ perInvocationResults
12234
+ };
12235
+ }
12236
+ areToolCallsEqual(actual, expected) {
12237
+ if (actual.length !== expected.length) {
12238
+ return false;
12239
+ }
12240
+ return actual.every((actualCall, index) => {
12241
+ const expectedCall = expected[index];
12242
+ return this.isToolCallEqual(actualCall, expectedCall);
12243
+ });
12244
+ }
12245
+ isToolCallEqual(actual, expected) {
12246
+ if (actual.name !== expected.name) {
12247
+ return false;
12248
+ }
12249
+ const actualArgs = actual.args || {};
12250
+ const expectedArgs = expected.args || {};
12251
+ const actualKeys = Object.keys(actualArgs).sort();
12252
+ const expectedKeys = Object.keys(expectedArgs).sort();
12253
+ if (actualKeys.length !== expectedKeys.length) {
12254
+ return false;
12255
+ }
12256
+ return actualKeys.every((key, index) => {
12257
+ const expectedKey = expectedKeys[index];
12258
+ if (key !== expectedKey) {
12259
+ return false;
12260
+ }
12261
+ return JSON.stringify(actualArgs[key]) === JSON.stringify(expectedArgs[key]);
12262
+ });
12263
+ }
12264
+ };
12265
+
12266
+ // src/evaluation/safety-evaluator.ts
12267
+ var SafetyEvaluatorV1 = class extends Evaluator {
12268
+ static getMetricInfo() {
12269
+ return {
12270
+ metricName: "safety_v1" /* SAFETY_V1 */,
12271
+ description: "This metric evaluates the safety (harmlessness) of an Agent's Response. Value range of the metric is [0, 1], with values closer to 1 to be more desirable (safe).",
12272
+ metricValueInfo: {
12273
+ interval: {
12274
+ minValue: 0,
12275
+ maxValue: 1,
12276
+ openAtMin: false,
12277
+ openAtMax: false
12278
+ }
12279
+ }
12280
+ };
12281
+ }
12282
+ async evaluateInvocations(actualInvocations, expectedInvocations) {
12283
+ const facade = new VertexAiEvalFacade({
12284
+ threshold: this.metric.threshold,
12285
+ metricName: "safety_v1" /* SAFETY_V1 */
12286
+ });
12287
+ return await facade.evaluateInvocations(
12288
+ actualInvocations,
12289
+ expectedInvocations
12290
+ );
12291
+ }
12292
+ };
12293
+
12294
+ // src/evaluation/llm-as-judge-utils.ts
12295
+ function getTextFromContent(content) {
12296
+ if (_optionalChain([content, 'optionalAccess', _311 => _311.parts])) {
12297
+ return content.parts.map((part) => part.text).filter(Boolean).join("\n");
12298
+ }
12299
+ return "";
12300
+ }
12301
+ function getEvalStatus(score, threshold) {
12302
+ return score >= threshold ? 1 /* PASSED */ : 2 /* FAILED */;
12303
+ }
12304
+
12305
+ // src/evaluation/llm-as-judge.ts
12306
+ var LlmAsJudge = class {
12307
+ async sampleJudge(prompt, numSamples, critiqueParser, judgeModelOptions) {
12308
+ const modelName = _optionalChain([judgeModelOptions, 'optionalAccess', _312 => _312.judgeModel]) || "gemini-2.5-flash";
12309
+ const model = LLMRegistry.getModelOrCreate(modelName);
12310
+ const config = _optionalChain([judgeModelOptions, 'optionalAccess', _313 => _313.judgeModelConfig]) || {};
12311
+ const samples = [];
12312
+ for (let i = 0; i < numSamples; i++) {
12313
+ try {
12314
+ const response = await model.generateContent({
12315
+ prompt,
12316
+ ...config
12317
+ });
12318
+ const label = critiqueParser(response.text);
12319
+ if (label !== "not_found" /* NOT_FOUND */) {
12320
+ samples.push(label);
12321
+ }
12322
+ } catch (error) {
12323
+ console.error("Error sampling judge model:", error);
12324
+ }
12325
+ }
12326
+ return samples;
12327
+ }
12328
+ };
12329
+
12330
+ // src/evaluation/final-response-match-v2.ts
12331
+ var FINAL_RESPONSE_MATCH_V2_PROMPT = `You are an expert rater for an AI agent. The AI agent is going to call an API to answer the user query and generate API tool use code based for the choice of the API and API arguments. The ideal model response should be a function call that fulfills user query, or a natural language response hedges or asks users for further clarification if a function call does not apply.
12332
+ The primary focus of this rating task is to check correctness of the model responses.
12333
+
12334
+ The data consists of:
12335
+ - A user query.
12336
+ - A model generated response for the prompt. The responses can consist of:
12337
+ - Natural language, when the model is asking for clarification, or tells the user it does not possess the requested functionality / option.
12338
+ - Code, in the form of one or multiple python function calls, and additional code as needed, for when the model is fulfilling the user request.
12339
+ You can use the help from a reference response annotated by a human rater. This reference response is of high quality. You can compare the agent's response with the reference response and decide if the agent's response is valid.
12340
+ Note sometimes the reference response only contains the key entities of the correct answer and you need to be flexible to allow the agent response to contain more information than the reference response, or to present the key entities in a different format or structure or in shorter or longer format.
12341
+ When the agent response is provided in the form of tables/dataframes or should be best provided in the form of tables/dataframes: focus on the key entities and main components requested in the user query and check whether you can retrieve those from the agent response. Likewise, if you have the reference response, then find out the key entities and main components in them and check whether you can retrieve those from the agent response. If the prompt does not specify any format instructions and the main items/components are included in the response then tolerate the differences in the formatting of those tables/dataframes.
12342
+
12343
+ You should follow the constitutions below very carefully to rate the model response:
12344
+ - Allow flexibility of format even when reference code only uses one of the possible format, unless API spec or user prompt has explicit format requirement
12345
+ - e.g. For state name, allow both abbreviation and full name unless API spec has explicit requirement. e.g. both 'tx' and 'Texas' should be allowed in the agent response even when reference code only uses one of them.
12346
+ - e.g. If a reference response list outputs in a list format, the agent response is allowed to use sentence format and vice versa unless user prompt explicitly asks for a specific format.
12347
+ - e.g. For numbers, allow flexibility of formatting, e.g. 1000000 vs 1,000,000.
12348
+ - The model shouldn't assume that it doesn't have access to according data or incapable of answering the question if reference response is able to find a legit answer.
12349
+ - If the model response contains the correct final answer, rate it as valid even when the model response contains more information than the reference response.
12350
+ - If the user prompt has csv or other table format data, don't read it yourself. Trust the reference response final answer instead.
12351
+ - When the validation needs maths, date calculations, do not use your own calculator. Trust the reference response final answer instead.
12352
+ - Be mindful about unit of numbers. For example, if the reference response says 100 miles, but the model response says 100 km, it is invalid.
12353
+ - When the agent response or the reference response is provided in the form of tables/dataframes: focus on the key entities and main components requested in the user query and check whether you can retrieve those from the agent response and whether those match the reference response. If the user query does not specify any format instructions and the main items/components are included in the response then tolerate the differences in the formatting of those tables/dataframes.
12354
+ - When the answer is in numeric format, check whether there are any format requirements in the numeric format, rounding, precision, number of decimals, etc. specified in the user query and the prompt. If there are no such instructions, then tolerate different numerical formats.
12355
+ - When the answer is in numeric format and there are rounding or precision differences between the agent response and the reference response, if no further instructions are provided evaluate if the rounding strategy or precision in the agent response follows the standards for that entity. For instance, model accuracy scores must be reported with at least two decimal places (e.g., 0.798 \u2192 0.80 is acceptable, but 0.7 is not).
12356
+
12357
+ Below are the inputs:
12358
+ {{
12359
+ "User prompt": {prompt},
12360
+ "Agent response": {response},
12361
+ "Reference response": {golden_response},
12362
+ }}
12363
+
12364
+ The answer should be a json alone which follows the json structure below:
12365
+ {{
12366
+ "reasoning": [reasoning],
12367
+ "is_the_agent_response_valid": [valid or invalid],
12368
+ }}
12369
+ Answer with assertiveness:
12370
+ `;
12371
+ var DEFAULT_NUM_SAMPLES = 5;
12372
+ function parseCritique(response) {
12373
+ const labelMatchIsResponseValid = response.match(
12374
+ /"is_the_agent_response_valid":\s*\[*[\n\s]*"*([^"^\]^\s]*)"*[\n\s]*\]*\s*[,\n\}]/
12375
+ );
12376
+ if (_optionalChain([labelMatchIsResponseValid, 'optionalAccess', _314 => _314[1]])) {
12377
+ const label = labelMatchIsResponseValid[1].toLowerCase();
12378
+ return label === "valid" ? "valid" /* VALID */ : "invalid" /* INVALID */;
12379
+ }
12380
+ return "not_found" /* NOT_FOUND */;
12381
+ }
12382
+ var FinalResponseMatchV2Evaluator = class extends Evaluator {
12383
+ constructor(evalMetric, llmAsJudge = new LlmAsJudge()) {
12384
+ super(evalMetric);
12385
+ this.llmAsJudge = llmAsJudge;
12386
+ }
12387
+ static getMetricInfo() {
12388
+ return {
12389
+ metricName: "final_response_match_v2" /* FINAL_RESPONSE_MATCH_V2 */,
12390
+ description: "This metric evaluates if the agent's final response matches a golden/expected final response using an LLM judge. Value range for this metric is [0,1], with values closer to 1 more desirable.",
12391
+ metricValueInfo: {
12392
+ interval: {
12393
+ minValue: 0,
12394
+ maxValue: 1,
12395
+ openAtMin: false,
12396
+ openAtMax: false
12397
+ }
12398
+ }
12399
+ };
12400
+ }
12401
+ async evaluateInvocations(actualInvocations, expectedInvocations) {
12402
+ const perInvocationResults = [];
12403
+ let totalScore = 0;
12404
+ let numInvocations = 0;
12405
+ if (!actualInvocations.length) {
12406
+ return {
12407
+ overallEvalStatus: 3 /* NOT_EVALUATED */,
12408
+ perInvocationResults: []
12409
+ };
12410
+ }
12411
+ for (let i = 0; i < actualInvocations.length; i++) {
12412
+ const actual = actualInvocations[i];
12413
+ const expected = expectedInvocations[i];
12414
+ const prompt = getTextFromContent(expected.userContent);
12415
+ const response = getTextFromContent(actual.finalResponse);
12416
+ const goldenResponse = getTextFromContent(expected.finalResponse);
12417
+ const formattedPrompt = FINAL_RESPONSE_MATCH_V2_PROMPT.replace(
12418
+ "{prompt}",
12419
+ prompt
12420
+ ).replace("{response}", response).replace("{golden_response}", goldenResponse);
12421
+ const numSamples = _nullishCoalesce(_optionalChain([this, 'access', _315 => _315.metric, 'access', _316 => _316.judgeModelOptions, 'optionalAccess', _317 => _317.numSamples]), () => ( DEFAULT_NUM_SAMPLES));
12422
+ const labels = await this.llmAsJudge.sampleJudge(
12423
+ formattedPrompt,
12424
+ numSamples,
12425
+ parseCritique,
12426
+ this.metric.judgeModelOptions
12427
+ );
12428
+ const score = labels.filter((l) => l === "valid" /* VALID */).length / labels.length;
12429
+ perInvocationResults.push({
12430
+ actualInvocation: actual,
12431
+ expectedInvocation: expected,
12432
+ score,
12433
+ evalStatus: getEvalStatus(score, this.metric.threshold)
12434
+ });
12435
+ totalScore += score;
12436
+ numInvocations++;
12437
+ }
12438
+ const overallScore = totalScore / numInvocations;
12439
+ return {
12440
+ overallScore,
12441
+ overallEvalStatus: getEvalStatus(overallScore, this.metric.threshold),
12442
+ perInvocationResults
12443
+ };
12444
+ }
12445
+ };
12446
+
12447
+ // src/evaluation/metric-evaluator-registry.ts
12448
+ var MetricEvaluatorRegistry = (_class35 = class {constructor() { _class35.prototype.__init61.call(this); }
12449
+ __init61() {this.registry = /* @__PURE__ */ new Map()}
12450
+ getEvaluator(evalMetric) {
12451
+ const entry = this.registry.get(evalMetric.metricName);
12452
+ if (!entry) {
12453
+ throw new Error(`${evalMetric.metricName} not found in registry.`);
12454
+ }
12455
+ return new entry.evaluator(evalMetric);
12456
+ }
12457
+ registerEvaluator(metricInfo, evaluator) {
12458
+ const metricName = metricInfo.metricName;
12459
+ if (this.registry.has(metricName)) {
12460
+ console.info(
12461
+ `Updating Evaluator class for ${metricName} from ${_optionalChain([this, 'access', _318 => _318.registry, 'access', _319 => _319.get, 'call', _320 => _320(metricName), 'optionalAccess', _321 => _321.evaluator, 'access', _322 => _322.name])} to ${evaluator.name}`
12462
+ );
12463
+ }
12464
+ this.registry.set(metricName, {
12465
+ evaluator,
12466
+ metricInfo: { ...metricInfo }
12467
+ });
12468
+ }
12469
+ getRegisteredMetrics() {
12470
+ return Array.from(this.registry.values()).map((entry) => ({
12471
+ ...entry.metricInfo
12472
+ }));
12473
+ }
12474
+ }, _class35);
12475
+ function getDefaultMetricEvaluatorRegistry() {
12476
+ const registry = new MetricEvaluatorRegistry();
12477
+ registry.registerEvaluator(
12478
+ TrajectoryEvaluator.getMetricInfo(),
12479
+ TrajectoryEvaluator
12480
+ );
12481
+ registry.registerEvaluator(
12482
+ ResponseEvaluator.getMetricInfo("response_evaluation_score" /* RESPONSE_EVALUATION_SCORE */),
12483
+ ResponseEvaluator
12484
+ );
12485
+ registry.registerEvaluator(
12486
+ ResponseEvaluator.getMetricInfo("response_match_score" /* RESPONSE_MATCH_SCORE */),
12487
+ ResponseEvaluator
12488
+ );
12489
+ registry.registerEvaluator(
12490
+ SafetyEvaluatorV1.getMetricInfo(),
12491
+ SafetyEvaluatorV1
12492
+ );
12493
+ registry.registerEvaluator(
12494
+ FinalResponseMatchV2Evaluator.getMetricInfo(),
12495
+ FinalResponseMatchV2Evaluator
12496
+ );
12497
+ return registry;
12498
+ }
12499
+ var DEFAULT_METRIC_EVALUATOR_REGISTRY = getDefaultMetricEvaluatorRegistry();
12500
+
12501
+ // src/evaluation/local-eval-service.ts
12502
+ var LocalEvalService = class extends BaseEvalService {
12503
+ constructor(agent, parallelism = 4) {
12504
+ super();
12505
+ this.agent = agent;
12506
+ this.parallelism = parallelism;
12507
+ this.initializeRunner();
12508
+ }
12509
+
12510
+ async initializeRunner() {
12511
+ if ("ask" in this.agent) {
12512
+ this.runner = this.agent;
12513
+ } else {
12514
+ try {
12515
+ const { runner } = await AgentBuilder.create("eval_agent").withModel("gemini-2.5-flash").withDescription("Agent for evaluation purposes").build();
12516
+ this.runner = {
12517
+ ask: async (message) => {
12518
+ return await runner.ask(message);
12519
+ }
12520
+ };
12521
+ } catch (error) {
12522
+ console.warn(
12523
+ "Failed to create AgentBuilder runner, falling back to mock:",
12524
+ error
12525
+ );
12526
+ this.runner = {
12527
+ ask: async (message) => {
12528
+ return `Mock response to: ${message}`;
12529
+ }
12530
+ };
12531
+ }
12532
+ }
12533
+ }
12534
+ async *performInference(request) {
12535
+ for (const evalSet of request.evalCases) {
12536
+ for (const evalCase of evalSet.evalCases) {
12537
+ const expected = [];
12538
+ for (const convo of evalCase.conversation) {
12539
+ if (convo.finalResponse) {
12540
+ expected.push({
12541
+ invocationId: `${evalCase.evalId}-expected-${expected.length}`,
12542
+ userContent: convo.userContent,
12543
+ finalResponse: convo.finalResponse,
12544
+ intermediateData: convo.intermediateData,
12545
+ creationTimestamp: convo.creationTimestamp
12546
+ });
12547
+ }
12548
+ }
12549
+ const actual = await this.runInference(evalCase);
12550
+ yield [...expected, ...actual];
12551
+ }
12552
+ }
12553
+ }
12554
+ async *evaluate(request) {
12555
+ const { inferenceResults, evaluateConfig } = request;
12556
+ const resultsByCase = /* @__PURE__ */ new Map();
12557
+ for (const result of inferenceResults) {
12558
+ const invocationId = result[0].invocationId;
12559
+ if (!invocationId) continue;
12560
+ const lastHyphenIndex = invocationId.lastIndexOf("-");
12561
+ const evalId = lastHyphenIndex !== -1 ? invocationId.substring(0, lastHyphenIndex) : invocationId;
12562
+ const existing = resultsByCase.get(evalId) || [];
12563
+ resultsByCase.set(evalId, [...existing, ...result]);
12564
+ }
12565
+ for (const [evalId, results] of resultsByCase) {
12566
+ const evalResult = {
12567
+ evalSetResultId: `${evalId}-result-${Date.now()}`,
12568
+ evalSetId: evalId,
12569
+ evalCaseResults: [],
12570
+ creationTimestamp: Date.now()
12571
+ };
12572
+ for (const evalMetric of evaluateConfig.evalMetrics) {
12573
+ const evaluator = DEFAULT_METRIC_EVALUATOR_REGISTRY.getEvaluator(evalMetric);
12574
+ const actual = results.filter(
12575
+ (r) => !_optionalChain([r, 'access', _323 => _323.invocationId, 'optionalAccess', _324 => _324.includes, 'call', _325 => _325("expected")])
12576
+ );
12577
+ const expected = results.filter(
12578
+ (r) => _optionalChain([r, 'access', _326 => _326.invocationId, 'optionalAccess', _327 => _327.includes, 'call', _328 => _328("expected")])
12579
+ );
12580
+ const result = await evaluator.evaluateInvocations(actual, expected);
12581
+ evalResult.evalCaseResults.push({
12582
+ evalSetId: evalId,
12583
+ evalId,
12584
+ finalEvalStatus: result.perInvocationResults.length > 0 ? result.perInvocationResults[0].evalStatus : 3 /* NOT_EVALUATED */,
12585
+ overallEvalMetricResults: [],
12586
+ sessionId: evalId,
12587
+ evalMetricResultPerInvocation: result.perInvocationResults.map(
12588
+ (r) => ({
12589
+ actualInvocation: r.actualInvocation,
12590
+ expectedInvocation: r.expectedInvocation,
12591
+ evalMetricResults: [
12592
+ {
12593
+ metricName: evalMetric.metricName,
12594
+ threshold: evalMetric.threshold,
12595
+ score: r.score,
12596
+ evalStatus: r.evalStatus
12597
+ }
12598
+ ]
12599
+ })
12600
+ )
12601
+ });
12602
+ }
12603
+ yield evalResult;
12604
+ }
12605
+ }
12606
+ async runInference(evalCase) {
12607
+ const results = [];
12608
+ if (!this.runner) {
12609
+ await this.initializeRunner();
12610
+ }
12611
+ if (evalCase.sessionInput) {
12612
+ try {
12613
+ if (this.runner.initializeSession) {
12614
+ await this.runner.initializeSession(evalCase.sessionInput);
12615
+ } else if (this.runner.setSessionState) {
12616
+ await this.runner.setSessionState(evalCase.sessionInput);
12617
+ } else {
12618
+ console.log(
12619
+ `Session input provided for ${evalCase.evalId}:`,
12620
+ evalCase.sessionInput
12621
+ );
12622
+ }
12623
+ } catch (error) {
12624
+ console.warn(
12625
+ `Failed to initialize session for ${evalCase.evalId}:`,
12626
+ error
12627
+ );
12628
+ }
12629
+ }
12630
+ for (const invocation of evalCase.conversation) {
12631
+ try {
12632
+ const response = await this.runner.ask(invocation.userContent);
12633
+ results.push({
12634
+ invocationId: `${evalCase.evalId}-${results.length}`,
12635
+ userContent: invocation.userContent,
12636
+ finalResponse: {
12637
+ role: "model",
12638
+ parts: [{ text: response || "" }]
12639
+ },
12640
+ intermediateData: {
12641
+ toolUses: [],
12642
+ intermediateResponses: []
12643
+ },
12644
+ creationTimestamp: Date.now()
12645
+ });
12646
+ } catch (error) {
12647
+ console.error(`Error running inference for ${evalCase.evalId}:`, error);
12648
+ results.push({
12649
+ invocationId: `${evalCase.evalId}-${results.length}`,
12650
+ userContent: invocation.userContent,
12651
+ finalResponse: {
12652
+ role: "model",
12653
+ parts: [
12654
+ {
12655
+ text: `Error: ${error instanceof Error ? error.message : "Unknown error"}`
12656
+ }
12657
+ ]
12658
+ },
12659
+ intermediateData: {
12660
+ toolUses: [],
12661
+ intermediateResponses: []
12662
+ },
12663
+ creationTimestamp: Date.now()
12664
+ });
12665
+ }
12666
+ }
12667
+ return results;
12668
+ }
12669
+ };
12670
+
12671
+ // src/evaluation/agent-evaluator.ts
12672
+ var NUM_RUNS = 2;
12673
+ var TOOL_TRAJECTORY_SCORE_KEY = "tool_trajectory_avg_score" /* TOOL_TRAJECTORY_AVG_SCORE */;
12674
+ var RESPONSE_EVALUATION_SCORE_KEY = "response_evaluation_score" /* RESPONSE_EVALUATION_SCORE */;
12675
+ var RESPONSE_MATCH_SCORE_KEY = "response_match_score" /* RESPONSE_MATCH_SCORE */;
12676
+ var SAFETY_V1_KEY = "safety_v1" /* SAFETY_V1 */;
12677
+ var ALLOWED_CRITERIA = [
12678
+ TOOL_TRAJECTORY_SCORE_KEY,
12679
+ RESPONSE_EVALUATION_SCORE_KEY,
12680
+ RESPONSE_MATCH_SCORE_KEY,
12681
+ SAFETY_V1_KEY
12682
+ ];
12683
+ var QUERY_COLUMN = "query";
12684
+ var REFERENCE_COLUMN = "reference";
12685
+ var EXPECTED_TOOL_USE_COLUMN = "expected_tool_use";
12686
+ var DEFAULT_CRITERIA = {
12687
+ [TOOL_TRAJECTORY_SCORE_KEY]: 1,
12688
+ [RESPONSE_MATCH_SCORE_KEY]: 0.8
12689
+ };
12690
+ var loadJson = async (filePath) => {
12691
+ try {
12692
+ const fileContent = await fs2.readFile(filePath, "utf-8");
12693
+ return JSON.parse(fileContent);
12694
+ } catch (error) {
12695
+ throw new Error(`Failed to load JSON from ${filePath}: ${error}`);
12696
+ }
12697
+ };
12698
+ var AgentEvaluator = class _AgentEvaluator {
12699
+ static async findConfigForTestFile(testFile) {
12700
+ const testFolder = path2.dirname(testFile);
12701
+ const configPath = path2.join(testFolder, "test_config.json");
12702
+ try {
12703
+ await fs2.access(configPath);
12704
+ const configData = await loadJson(configPath);
12705
+ if ("criteria" in configData && typeof configData.criteria === "object") {
12706
+ return configData.criteria;
12707
+ }
12708
+ throw new Error(
12709
+ `Invalid format for test_config.json at ${configPath}. Expected a 'criteria' dictionary.`
12710
+ );
12711
+ } catch (error) {
12712
+ return DEFAULT_CRITERIA;
12713
+ }
12714
+ }
12715
+ static async evaluateEvalSet(agent, evalSet, criteria, numRuns = NUM_RUNS, printDetailedResults = false) {
12716
+ const evalMetrics = Object.entries(criteria).map(
12717
+ ([metricName, threshold]) => ({
12718
+ metricName,
12719
+ threshold
12720
+ })
12721
+ );
12722
+ const evalResultsByEvalId = await _AgentEvaluator._getEvalResultsByEvalId(
12723
+ agent,
12724
+ evalSet,
12725
+ evalMetrics,
12726
+ numRuns
12727
+ );
12728
+ const failures = [];
12729
+ for (const [_, evalResultsPerEvalId] of evalResultsByEvalId) {
12730
+ const evalMetricResults = _AgentEvaluator._getEvalMetricResultsWithInvocation(
12731
+ evalResultsPerEvalId
12732
+ );
12733
+ const failuresPerEvalCase = _AgentEvaluator._processMetricsAndGetFailures(
12734
+ evalMetricResults,
12735
+ printDetailedResults,
12736
+ agent.name || "Unknown Agent"
12737
+ );
12738
+ failures.push(...failuresPerEvalCase);
12739
+ }
12740
+ if (failures.length > 0) {
12741
+ throw new Error(
12742
+ `Following are all the test failures. If you looking to get more details on the failures, then please re-run this test with \`printDetailedResults\` set to \`true\`.
12743
+ ${failures.join(
12744
+ "\n"
12745
+ )}`
12746
+ );
12747
+ }
12748
+ }
12749
+ static async evaluate(agent, evalDatasetFilePathOrDir, numRuns = NUM_RUNS, initialSessionFile) {
12750
+ const testFiles = [];
12751
+ try {
12752
+ const stat2 = await fs2.stat(evalDatasetFilePathOrDir);
12753
+ if (stat2.isDirectory()) {
12754
+ const files = await this._findTestFilesRecursively(
12755
+ evalDatasetFilePathOrDir
12756
+ );
12757
+ testFiles.push(...files);
12758
+ } else {
12759
+ testFiles.push(evalDatasetFilePathOrDir);
12760
+ }
12761
+ } catch (error) {
12762
+ throw new Error(`Invalid path: ${evalDatasetFilePathOrDir}`);
12763
+ }
12764
+ const initialSession = await _AgentEvaluator._getInitialSession(initialSessionFile);
12765
+ for (const testFile of testFiles) {
12766
+ const criteria = await _AgentEvaluator.findConfigForTestFile(testFile);
12767
+ const evalSet = await _AgentEvaluator._loadEvalSetFromFile(
12768
+ testFile,
12769
+ criteria,
12770
+ initialSession
12771
+ );
12772
+ await _AgentEvaluator.evaluateEvalSet(agent, evalSet, criteria, numRuns);
12773
+ }
12774
+ }
12775
+ static async migrateEvalDataToNewSchema(oldEvalDataFile, newEvalDataFile, initialSessionFile) {
12776
+ if (!oldEvalDataFile || !newEvalDataFile) {
12777
+ throw new Error("One of oldEvalDataFile or newEvalDataFile is empty.");
12778
+ }
12779
+ const criteria = await _AgentEvaluator.findConfigForTestFile(oldEvalDataFile);
12780
+ const initialSession = await _AgentEvaluator._getInitialSession(initialSessionFile);
12781
+ const evalSet = await _AgentEvaluator._getEvalSetFromOldFormat(
12782
+ oldEvalDataFile,
12783
+ criteria,
12784
+ initialSession
12785
+ );
12786
+ await fs2.writeFile(newEvalDataFile, JSON.stringify(evalSet, null, 2));
12787
+ }
12788
+ static async _findTestFilesRecursively(dir) {
12789
+ const testFiles = [];
12790
+ async function walk(currentDir) {
12791
+ const entries = await fs2.readdir(currentDir, { withFileTypes: true });
12792
+ for (const entry of entries) {
12793
+ const fullPath = path2.join(currentDir, entry.name);
12794
+ if (entry.isDirectory()) {
12795
+ await walk(fullPath);
12796
+ } else if (entry.name.endsWith(".test.json")) {
12797
+ testFiles.push(fullPath);
12798
+ }
12799
+ }
12800
+ }
12801
+ await walk(dir);
12802
+ return testFiles;
12803
+ }
12804
+ static async _loadEvalSetFromFile(evalSetFile, criteria, initialSession) {
12805
+ try {
12806
+ const content = await fs2.readFile(evalSetFile, "utf-8");
12807
+ try {
12808
+ const evalSet = JSON.parse(content);
12809
+ if (evalSet.evalSetId && evalSet.evalCases) {
12810
+ if (Object.keys(initialSession).length > 0) {
12811
+ throw new Error(
12812
+ "Initial session should be specified as a part of EvalSet file. Explicit initial session is only needed, when specifying data in the older schema."
12813
+ );
12814
+ }
12815
+ return evalSet;
12816
+ }
12817
+ } catch (parseError) {
12818
+ throw new Error(`Failed to parse eval set data: ${parseError}`);
12819
+ }
12820
+ } catch (error) {
12821
+ throw new Error(`Failed to process eval set file: ${error}`);
12822
+ }
12823
+ console.warn(
12824
+ `Contents of ${evalSetFile} appear to be in older format. To avoid this warning, please update your test files to contain data in EvalSet schema. You can use 'migrateEvalDataToNewSchema' for migrating your old test files.`
12825
+ );
12826
+ return _AgentEvaluator._getEvalSetFromOldFormat(
12827
+ evalSetFile,
12828
+ criteria,
12829
+ initialSession
12830
+ );
12831
+ }
12832
+ static async _getEvalSetFromOldFormat(evalSetFile, criteria, initialSession) {
12833
+ const data = await _AgentEvaluator._loadDataset(evalSetFile);
12834
+ _AgentEvaluator._validateInput(data, criteria);
12835
+ return {
12836
+ evalSetId: `eval-set-${Date.now()}`,
12837
+ name: evalSetFile,
12838
+ evalCases: data[0].map(
12839
+ (item, index) => ({
12840
+ evalId: `eval-${index}`,
12841
+ conversation: [
12842
+ {
12843
+ invocationId: `invocation-${index}`,
12844
+ userContent: {
12845
+ role: "user",
12846
+ parts: [{ text: item[QUERY_COLUMN] || "" }]
12847
+ },
12848
+ finalResponse: item[REFERENCE_COLUMN] ? {
12849
+ role: "model",
12850
+ parts: [{ text: item[REFERENCE_COLUMN] }]
12851
+ } : void 0,
12852
+ intermediateData: item[EXPECTED_TOOL_USE_COLUMN] ? {
12853
+ toolUses: item[EXPECTED_TOOL_USE_COLUMN],
12854
+ intermediateResponses: []
12855
+ } : void 0,
12856
+ creationTimestamp: Date.now()
12857
+ }
12858
+ ],
12859
+ sessionInput: Object.keys(initialSession).length > 0 ? {
12860
+ appName: "test-app",
12861
+ userId: "test-user",
12862
+ state: initialSession
12863
+ } : void 0
12864
+ })
12865
+ ),
12866
+ creationTimestamp: Date.now()
12867
+ };
12868
+ }
12869
+ static async _getInitialSession(initialSessionFile) {
12870
+ if (!initialSessionFile) {
12871
+ return {};
12872
+ }
12873
+ try {
12874
+ const content = await fs2.readFile(initialSessionFile, "utf-8");
12875
+ return JSON.parse(content);
12876
+ } catch (error) {
12877
+ throw new Error(
12878
+ `Failed to load initial session from ${initialSessionFile}: ${error}`
12879
+ );
12880
+ }
12881
+ }
12882
+ static async _loadDataset(inputData) {
12883
+ const stat2 = await fs2.stat(inputData);
12884
+ if (stat2.isDirectory()) {
12885
+ const testFiles = await this._findTestFilesRecursively(inputData);
12886
+ const results = await Promise.all(testFiles.map((f) => loadJson(f)));
12887
+ return results.map((r) => Array.isArray(r) ? r : [r]);
12888
+ }
12889
+ if (stat2.isFile()) {
12890
+ const data = await loadJson(inputData);
12891
+ return [Array.isArray(data) ? data : [data]];
12892
+ }
12893
+ throw new Error(`Invalid input path: ${inputData}`);
12894
+ }
12895
+ static _validateInput(evalDataset, criteria) {
12896
+ if (!evalDataset || evalDataset.length === 0) {
12897
+ throw new Error("The evaluation dataset is None or empty.");
12898
+ }
12899
+ for (const key of Object.keys(criteria)) {
12900
+ if (!ALLOWED_CRITERIA.includes(key)) {
12901
+ throw new Error(
12902
+ `Invalid criteria key: ${key}. Expected one of ${ALLOWED_CRITERIA.join(
12903
+ ", "
12904
+ )}.`
12905
+ );
12906
+ }
12907
+ }
12908
+ const sample = evalDataset[0];
12909
+ if (!Array.isArray(sample) || sample.length === 0) {
12910
+ throw new Error("The evaluation dataset is empty.");
12911
+ }
12912
+ const firstQuery = sample[0];
12913
+ if (typeof firstQuery !== "object") {
12914
+ throw new Error(
12915
+ `Each evaluation dataset sample must be list of dictionary. But it's ${JSON.stringify(
12916
+ evalDataset
12917
+ )}`
12918
+ );
12919
+ }
12920
+ if (TOOL_TRAJECTORY_SCORE_KEY in criteria) {
12921
+ if (!(QUERY_COLUMN in firstQuery) || !(EXPECTED_TOOL_USE_COLUMN in firstQuery)) {
12922
+ throw new Error(
12923
+ `Samples for ${TOOL_TRAJECTORY_SCORE_KEY} must include '${QUERY_COLUMN}' and '${EXPECTED_TOOL_USE_COLUMN}' keys. The sample is ${JSON.stringify(sample)}.`
12924
+ );
12925
+ }
12926
+ }
12927
+ if (RESPONSE_EVALUATION_SCORE_KEY in criteria) {
12928
+ if (!(QUERY_COLUMN in firstQuery)) {
12929
+ throw new Error(
12930
+ `Samples for ${RESPONSE_EVALUATION_SCORE_KEY} must include '${QUERY_COLUMN}' key. The sample is ${JSON.stringify(sample)}.`
12931
+ );
12932
+ }
12933
+ }
12934
+ if (RESPONSE_MATCH_SCORE_KEY in criteria) {
12935
+ if (!(QUERY_COLUMN in firstQuery) || !(REFERENCE_COLUMN in firstQuery)) {
12936
+ throw new Error(
12937
+ `Samples for ${RESPONSE_MATCH_SCORE_KEY} must include '${QUERY_COLUMN}' and '${REFERENCE_COLUMN}' keys. The sample is ${JSON.stringify(sample)}.`
12938
+ );
12939
+ }
12940
+ }
12941
+ }
12942
+ static _printDetails(evalMetricResultWithInvocations, overallEvalStatus, overallScore, metricName = "", threshold = 0) {
12943
+ console.log(
12944
+ `Summary: \`${overallEvalStatus}\` for Metric: \`${metricName}\`. Expected threshold: \`${threshold}\`, actual value: \`${overallScore}\`.`
12945
+ );
12946
+ const data = evalMetricResultWithInvocations.map((per) => ({
12947
+ evalStatus: per.evalMetricResult.evalStatus,
12948
+ score: per.evalMetricResult.score,
12949
+ threshold,
12950
+ prompt: _AgentEvaluator._convertContentToText(
12951
+ per.expectedInvocation.userContent
12952
+ ),
12953
+ expectedResponse: _AgentEvaluator._convertContentToText(
12954
+ per.expectedInvocation.finalResponse
12955
+ ),
12956
+ actualResponse: _AgentEvaluator._convertContentToText(
12957
+ per.actualInvocation.finalResponse
12958
+ ),
12959
+ expectedToolCalls: _AgentEvaluator._convertToolCallsToText(
12960
+ per.expectedInvocation.intermediateData
12961
+ ),
12962
+ actualToolCalls: _AgentEvaluator._convertToolCallsToText(
12963
+ per.actualInvocation.intermediateData
12964
+ )
12965
+ }));
12966
+ console.table(data);
12967
+ console.log("\n\n");
12968
+ }
12969
+ static _convertContentToText(content) {
12970
+ if (_optionalChain([content, 'optionalAccess', _329 => _329.parts])) {
12971
+ return content.parts.map((p) => p.text || "").filter((text) => text.length > 0).join("\n");
12972
+ }
12973
+ return "";
12974
+ }
12975
+ static _convertToolCallsToText(intermediateData) {
12976
+ if (_optionalChain([intermediateData, 'optionalAccess', _330 => _330.toolUses])) {
12977
+ return intermediateData.toolUses.map((t) => JSON.stringify(t)).join("\n");
12978
+ }
12979
+ return "";
12980
+ }
12981
+ static async _getEvalResultsByEvalId(agent, evalSet, evalMetrics, numRuns) {
12982
+ const evalService = new LocalEvalService(agent);
12983
+ const inferenceResults = [];
12984
+ for (let run = 0; run < numRuns; run++) {
12985
+ for await (const result of evalService.performInference({
12986
+ evalSetId: evalSet.evalSetId,
12987
+ evalCases: [evalSet]
12988
+ })) {
12989
+ inferenceResults.push(result);
12990
+ }
12991
+ }
12992
+ const evalResultsByEvalId = /* @__PURE__ */ new Map();
12993
+ for await (const evalResult of evalService.evaluate({
12994
+ inferenceResults,
12995
+ evaluateConfig: { evalMetrics }
12996
+ })) {
12997
+ for (const caseResult of evalResult.evalCaseResults) {
12998
+ const evalId = caseResult.evalId;
12999
+ if (!evalResultsByEvalId.has(evalId)) {
13000
+ evalResultsByEvalId.set(evalId, []);
13001
+ }
13002
+ evalResultsByEvalId.get(evalId).push(caseResult);
13003
+ }
13004
+ }
13005
+ return evalResultsByEvalId;
13006
+ }
13007
+ static _getEvalMetricResultsWithInvocation(evalResultsPerEvalId) {
13008
+ const evalMetricResults = {};
13009
+ for (const evalCaseResult of evalResultsPerEvalId) {
13010
+ for (const evalMetricsPerInvocation of evalCaseResult.evalMetricResultPerInvocation) {
13011
+ for (const evalMetricResult of evalMetricsPerInvocation.evalMetricResults) {
13012
+ const metricName = evalMetricResult.metricName;
13013
+ if (!(metricName in evalMetricResults)) {
13014
+ evalMetricResults[metricName] = [];
13015
+ }
13016
+ evalMetricResults[metricName].push({
13017
+ actualInvocation: evalMetricsPerInvocation.actualInvocation,
13018
+ expectedInvocation: evalMetricsPerInvocation.expectedInvocation,
13019
+ evalMetricResult
13020
+ });
13021
+ }
13022
+ }
13023
+ }
13024
+ return evalMetricResults;
13025
+ }
13026
+ static _processMetricsAndGetFailures(evalMetricResults, printDetailedResults, agentModule) {
13027
+ const failures = [];
13028
+ for (const [metricName, evalMetricResultsWithInvocations] of Object.entries(
13029
+ evalMetricResults
13030
+ )) {
13031
+ const threshold = _optionalChain([evalMetricResultsWithInvocations, 'access', _331 => _331[0], 'optionalAccess', _332 => _332.evalMetricResult, 'access', _333 => _333.threshold]) || 0;
13032
+ const scores = evalMetricResultsWithInvocations.map((m) => m.evalMetricResult.score).filter((s) => s !== void 0);
13033
+ let overallScore;
13034
+ let overallEvalStatus;
13035
+ if (scores.length > 0) {
13036
+ overallScore = scores.reduce((a, b) => a + b, 0) / scores.length;
13037
+ overallEvalStatus = overallScore >= threshold ? 1 /* PASSED */ : 2 /* FAILED */;
13038
+ } else {
13039
+ overallScore = void 0;
13040
+ overallEvalStatus = 3 /* NOT_EVALUATED */;
13041
+ }
13042
+ if (overallEvalStatus !== 1 /* PASSED */) {
13043
+ if (printDetailedResults) {
13044
+ _AgentEvaluator._printDetails(
13045
+ evalMetricResultsWithInvocations,
13046
+ overallEvalStatus,
13047
+ overallScore,
13048
+ metricName,
13049
+ threshold
13050
+ );
13051
+ }
13052
+ failures.push(
13053
+ `${metricName} for ${agentModule} Failed. Expected ${threshold}, but got ${overallScore}.`
13054
+ );
13055
+ }
13056
+ }
13057
+ return failures;
13058
+ }
13059
+ };
13060
+
13061
+ // src/evaluation/final-response-match-v1.ts
13062
+ var RougeEvaluator = class extends Evaluator {
13063
+
13064
+ constructor(evalMetric) {
13065
+ super(evalMetric);
13066
+ this.evalMetric = evalMetric;
13067
+ }
13068
+ static getMetricInfo() {
13069
+ return {
13070
+ metricName: "response_match_score" /* RESPONSE_MATCH_SCORE */,
13071
+ description: "This metric evaluates if the agent's final response matches a golden/expected final response using Rouge_1 metric. Value range for this metric is [0,1], with values closer to 1 more desirable.",
13072
+ metricValueInfo: {
13073
+ interval: {
13074
+ minValue: 0,
13075
+ maxValue: 1,
13076
+ openAtMin: false,
13077
+ openAtMax: false
13078
+ }
13079
+ }
13080
+ };
13081
+ }
13082
+ async evaluateInvocations(actualInvocations, expectedInvocations) {
13083
+ let totalScore = 0;
13084
+ let numInvocations = 0;
13085
+ const perInvocationResults = [];
13086
+ for (let i = 0; i < actualInvocations.length; i++) {
13087
+ const actual = actualInvocations[i];
13088
+ const expected = expectedInvocations[i];
13089
+ const reference = getTextFromContent2(expected.finalResponse);
13090
+ const response = getTextFromContent2(actual.finalResponse);
13091
+ const rouge1Scores = await calculateRouge1Scores(response, reference);
13092
+ const score = rouge1Scores.fmeasure;
13093
+ perInvocationResults.push({
13094
+ actualInvocation: actual,
13095
+ expectedInvocation: expected,
13096
+ score,
13097
+ evalStatus: getEvalStatus2(score, this.evalMetric.threshold)
13098
+ });
13099
+ totalScore += score;
13100
+ numInvocations++;
13101
+ }
13102
+ if (perInvocationResults.length > 0) {
13103
+ const overallScore = totalScore / numInvocations;
13104
+ return {
13105
+ overallScore,
13106
+ overallEvalStatus: getEvalStatus2(
13107
+ overallScore,
13108
+ this.evalMetric.threshold
13109
+ ),
13110
+ perInvocationResults
13111
+ };
13112
+ }
13113
+ return {
13114
+ overallEvalStatus: 3 /* NOT_EVALUATED */,
13115
+ perInvocationResults: []
13116
+ };
13117
+ }
13118
+ };
13119
+ function getTextFromContent2(content) {
13120
+ if (_optionalChain([content, 'optionalAccess', _334 => _334.parts])) {
13121
+ return content.parts.map((part) => part.text).filter(Boolean).join("\n");
13122
+ }
13123
+ return "";
13124
+ }
13125
+ function getEvalStatus2(score, threshold) {
13126
+ return score >= threshold ? 1 /* PASSED */ : 2 /* FAILED */;
13127
+ }
13128
+ function calculateRouge1Scores(response, reference) {
13129
+ if (!response.trim() || !reference.trim()) {
13130
+ return { precision: 0, recall: 0, fmeasure: 0 };
13131
+ }
13132
+ const responseTokens = tokenizeText(response);
13133
+ const referenceTokens = tokenizeText(reference);
13134
+ const responseUnigrams = new Set(responseTokens);
13135
+ const referenceUnigrams = new Set(referenceTokens);
13136
+ const commonUnigrams = new Set(
13137
+ [...responseUnigrams].filter((token) => referenceUnigrams.has(token))
13138
+ );
13139
+ const precision = responseUnigrams.size > 0 ? commonUnigrams.size / responseUnigrams.size : 0;
13140
+ const recall = referenceUnigrams.size > 0 ? commonUnigrams.size / referenceUnigrams.size : 0;
13141
+ const fmeasure = precision + recall > 0 ? 2 * precision * recall / (precision + recall) : 0;
13142
+ return { precision, recall, fmeasure };
13143
+ }
13144
+ function tokenizeText(text) {
13145
+ return text.toLowerCase().replace(/[^\w\s]/g, " ").split(/\s+/).filter((token) => token.length > 0);
13146
+ }
13147
+
13148
+ // src/version.ts
13149
+ var VERSION = "0.1.0";
13150
+
13151
+
13152
+
13153
+
13154
+
13155
+
13156
+
13157
+
13158
+
13159
+
13160
+
11757
13161
 
11758
13162
 
11759
13163
 
@@ -11905,4 +13309,4 @@ var VERSION = "0.1.0";
11905
13309
 
11906
13310
 
11907
13311
 
11908
- exports.AF_FUNCTION_CALL_ID_PREFIX = AF_FUNCTION_CALL_ID_PREFIX; exports.Agent = LlmAgent; exports.AgentBuilder = AgentBuilder; exports.AgentTool = AgentTool; exports.Agents = agents_exports; exports.AiSdkLlm = AiSdkLlm; exports.AnthropicLlm = AnthropicLlm; exports.ApiKeyCredential = ApiKeyCredential; exports.ApiKeyScheme = ApiKeyScheme; exports.AuthConfig = AuthConfig; exports.AuthCredential = AuthCredential; exports.AuthCredentialType = AuthCredentialType; exports.AuthHandler = AuthHandler; exports.AuthScheme = AuthScheme; exports.AuthSchemeType = AuthSchemeType; exports.AuthTool = AuthTool; exports.AutoFlow = AutoFlow; exports.BaseAgent = BaseAgent; exports.BaseCodeExecutor = BaseCodeExecutor; exports.BaseLLMConnection = BaseLLMConnection; exports.BaseLlm = BaseLlm; exports.BaseLlmFlow = BaseLlmFlow; exports.BaseLlmRequestProcessor = BaseLlmRequestProcessor; exports.BaseLlmResponseProcessor = BaseLlmResponseProcessor; exports.BasePlanner = BasePlanner; exports.BaseSessionService = BaseSessionService; exports.BaseTool = BaseTool; exports.BasicAuthCredential = BasicAuthCredential; exports.BearerTokenCredential = BearerTokenCredential; exports.BuiltInCodeExecutor = BuiltInCodeExecutor; exports.BuiltInPlanner = BuiltInPlanner; exports.CallbackContext = CallbackContext; exports.CodeExecutionUtils = CodeExecutionUtils; exports.CodeExecutorContext = CodeExecutorContext; exports.DatabaseSessionService = DatabaseSessionService; exports.EnhancedAuthConfig = EnhancedAuthConfig; exports.Event = Event; exports.EventActions = EventActions; exports.Events = events_exports; exports.ExitLoopTool = ExitLoopTool; exports.FileOperationsTool = FileOperationsTool; exports.Flows = flows_exports; exports.FunctionTool = FunctionTool; exports.GcsArtifactService = GcsArtifactService; exports.GetUserChoiceTool = GetUserChoiceTool; exports.GoogleLlm = GoogleLlm; exports.GoogleSearch = GoogleSearch; exports.HttpRequestTool = HttpRequestTool; exports.HttpScheme = HttpScheme; exports.InMemoryArtifactService = InMemoryArtifactService; exports.InMemoryMemoryService = InMemoryMemoryService; exports.InMemoryRunner = InMemoryRunner; exports.InMemorySessionService = InMemorySessionService; exports.InvocationContext = InvocationContext; exports.LLMRegistry = LLMRegistry; exports.LangGraphAgent = LangGraphAgent; exports.LlmAgent = LlmAgent; exports.LlmCallsLimitExceededError = LlmCallsLimitExceededError; exports.LlmRequest = LlmRequest; exports.LlmResponse = LlmResponse; exports.LoadArtifactsTool = LoadArtifactsTool; exports.LoadMemoryTool = LoadMemoryTool; exports.LoopAgent = LoopAgent; exports.McpAbi = McpAbi; exports.McpAtp = McpAtp; exports.McpBamm = McpBamm; exports.McpCoinGecko = McpCoinGecko; exports.McpDiscord = McpDiscord; exports.McpError = McpError; exports.McpErrorType = McpErrorType; exports.McpFilesystem = McpFilesystem; exports.McpFraxlend = McpFraxlend; exports.McpGeneric = McpGeneric; exports.McpIqWiki = McpIqWiki; exports.McpMemory = McpMemory; exports.McpNearAgent = McpNearAgent; exports.McpNearIntents = McpNearIntents; exports.McpOdos = McpOdos; exports.McpSamplingHandler = McpSamplingHandler; exports.McpTelegram = McpTelegram; exports.McpToolset = McpToolset; exports.Memory = memory_exports; exports.Models = models_exports; exports.OAuth2Credential = OAuth2Credential; exports.OAuth2Scheme = OAuth2Scheme; exports.OpenAiLlm = OpenAiLlm; exports.OpenIdConnectScheme = OpenIdConnectScheme; exports.ParallelAgent = ParallelAgent; exports.PlanReActPlanner = PlanReActPlanner; exports.REQUEST_EUC_FUNCTION_CALL_NAME = REQUEST_EUC_FUNCTION_CALL_NAME; exports.ReadonlyContext = ReadonlyContext; exports.RunConfig = RunConfig; exports.Runner = Runner; exports.SequentialAgent = SequentialAgent; exports.Sessions = sessions_exports; exports.SingleFlow = SingleFlow; exports.State = State; exports.StreamingMode = StreamingMode; exports.TelemetryService = TelemetryService; exports.ToolContext = ToolContext; exports.Tools = tools_exports; exports.TransferToAgentTool = TransferToAgentTool; exports.UserInteractionTool = UserInteractionTool; exports.VERSION = VERSION; exports.VertexAiSessionService = VertexAiSessionService; exports._findFunctionCallEventIfLastEventIsFunctionResponse = _findFunctionCallEventIfLastEventIsFunctionResponse; exports.adkToMcpToolType = adkToMcpToolType; exports.agentTransferRequestProcessor = requestProcessor8; exports.basicRequestProcessor = requestProcessor2; exports.buildFunctionDeclaration = buildFunctionDeclaration; exports.codeExecutionRequestProcessor = requestProcessor3; exports.codeExecutionResponseProcessor = responseProcessor; exports.contentRequestProcessor = requestProcessor4; exports.createAuthToolArguments = createAuthToolArguments; exports.createBranchContextForSubAgent = createBranchContextForSubAgent; exports.createDatabaseSessionService = createDatabaseSessionService; exports.createFunctionTool = createFunctionTool; exports.createMysqlSessionService = createMysqlSessionService; exports.createPostgresSessionService = createPostgresSessionService; exports.createSamplingHandler = createSamplingHandler; exports.createSqliteSessionService = createSqliteSessionService; exports.createTool = createTool; exports.generateAuthEvent = generateAuthEvent; exports.generateClientFunctionCallId = generateClientFunctionCallId; exports.getLongRunningFunctionCalls = getLongRunningFunctionCalls; exports.getMcpTools = getMcpTools; exports.handleFunctionCallsAsync = handleFunctionCallsAsync; exports.handleFunctionCallsLive = handleFunctionCallsLive; exports.identityRequestProcessor = requestProcessor5; exports.initializeTelemetry = initializeTelemetry; exports.injectSessionState = injectSessionState; exports.instructionsRequestProcessor = requestProcessor6; exports.isEnhancedAuthConfig = isEnhancedAuthConfig; exports.jsonSchemaToDeclaration = jsonSchemaToDeclaration; exports.mcpSchemaToParameters = mcpSchemaToParameters; exports.mergeAgentRun = mergeAgentRun; exports.mergeParallelFunctionResponseEvents = mergeParallelFunctionResponseEvents; exports.newInvocationContextId = newInvocationContextId; exports.nlPlanningRequestProcessor = requestProcessor7; exports.nlPlanningResponseProcessor = responseProcessor2; exports.normalizeJsonSchema = normalizeJsonSchema; exports.populateClientFunctionCallId = populateClientFunctionCallId; exports.registerProviders = registerProviders; exports.removeClientFunctionCallId = removeClientFunctionCallId; exports.requestProcessor = requestProcessor; exports.shutdownTelemetry = shutdownTelemetry; exports.telemetryService = telemetryService; exports.traceLlmCall = traceLlmCall; exports.traceToolCall = traceToolCall; exports.tracer = tracer;
13312
+ exports.AF_FUNCTION_CALL_ID_PREFIX = AF_FUNCTION_CALL_ID_PREFIX; exports.Agent = LlmAgent; exports.AgentBuilder = AgentBuilder; exports.AgentEvaluator = AgentEvaluator; exports.AgentTool = AgentTool; exports.Agents = agents_exports; exports.AiSdkLlm = AiSdkLlm; exports.AnthropicLlm = AnthropicLlm; exports.ApiKeyCredential = ApiKeyCredential; exports.ApiKeyScheme = ApiKeyScheme; exports.AuthConfig = AuthConfig; exports.AuthCredential = AuthCredential; exports.AuthCredentialType = AuthCredentialType; exports.AuthHandler = AuthHandler; exports.AuthScheme = AuthScheme; exports.AuthSchemeType = AuthSchemeType; exports.AuthTool = AuthTool; exports.AutoFlow = AutoFlow; exports.BaseAgent = BaseAgent; exports.BaseCodeExecutor = BaseCodeExecutor; exports.BaseLLMConnection = BaseLLMConnection; exports.BaseLlm = BaseLlm; exports.BaseLlmFlow = BaseLlmFlow; exports.BaseLlmRequestProcessor = BaseLlmRequestProcessor; exports.BaseLlmResponseProcessor = BaseLlmResponseProcessor; exports.BasePlanner = BasePlanner; exports.BaseSessionService = BaseSessionService; exports.BaseTool = BaseTool; exports.BasicAuthCredential = BasicAuthCredential; exports.BearerTokenCredential = BearerTokenCredential; exports.BuiltInCodeExecutor = BuiltInCodeExecutor; exports.BuiltInPlanner = BuiltInPlanner; exports.CallbackContext = CallbackContext; exports.CodeExecutionUtils = CodeExecutionUtils; exports.CodeExecutorContext = CodeExecutorContext; exports.DatabaseSessionService = DatabaseSessionService; exports.EnhancedAuthConfig = EnhancedAuthConfig; exports.EvalResult = EvalResult; exports.EvalStatus = EvalStatus; exports.Evaluation = evaluation_exports; exports.Evaluator = Evaluator; exports.Event = Event; exports.EventActions = EventActions; exports.Events = events_exports; exports.ExitLoopTool = ExitLoopTool; exports.FileOperationsTool = FileOperationsTool; exports.FinalResponseMatchV2Evaluator = FinalResponseMatchV2Evaluator; exports.Flows = flows_exports; exports.FunctionTool = FunctionTool; exports.GcsArtifactService = GcsArtifactService; exports.GetUserChoiceTool = GetUserChoiceTool; exports.GoogleLlm = GoogleLlm; exports.GoogleSearch = GoogleSearch; exports.HttpRequestTool = HttpRequestTool; exports.HttpScheme = HttpScheme; exports.InMemoryArtifactService = InMemoryArtifactService; exports.InMemoryMemoryService = InMemoryMemoryService; exports.InMemoryRunner = InMemoryRunner; exports.InMemorySessionService = InMemorySessionService; exports.InvocationContext = InvocationContext; exports.LLMRegistry = LLMRegistry; exports.LangGraphAgent = LangGraphAgent; exports.LlmAgent = LlmAgent; exports.LlmCallsLimitExceededError = LlmCallsLimitExceededError; exports.LlmRequest = LlmRequest; exports.LlmResponse = LlmResponse; exports.LoadArtifactsTool = LoadArtifactsTool; exports.LoadMemoryTool = LoadMemoryTool; exports.LocalEvalService = LocalEvalService; exports.LoopAgent = LoopAgent; exports.McpAbi = McpAbi; exports.McpAtp = McpAtp; exports.McpBamm = McpBamm; exports.McpCoinGecko = McpCoinGecko; exports.McpDiscord = McpDiscord; exports.McpError = McpError; exports.McpErrorType = McpErrorType; exports.McpFilesystem = McpFilesystem; exports.McpFraxlend = McpFraxlend; exports.McpGeneric = McpGeneric; exports.McpIqWiki = McpIqWiki; exports.McpMemory = McpMemory; exports.McpNearAgent = McpNearAgent; exports.McpNearIntents = McpNearIntents; exports.McpOdos = McpOdos; exports.McpSamplingHandler = McpSamplingHandler; exports.McpTelegram = McpTelegram; exports.McpToolset = McpToolset; exports.Memory = memory_exports; exports.Models = models_exports; exports.OAuth2Credential = OAuth2Credential; exports.OAuth2Scheme = OAuth2Scheme; exports.OpenAiLlm = OpenAiLlm; exports.OpenIdConnectScheme = OpenIdConnectScheme; exports.ParallelAgent = ParallelAgent; exports.PlanReActPlanner = PlanReActPlanner; exports.PrebuiltMetrics = PrebuiltMetrics; exports.REQUEST_EUC_FUNCTION_CALL_NAME = REQUEST_EUC_FUNCTION_CALL_NAME; exports.ReadonlyContext = ReadonlyContext; exports.RougeEvaluator = RougeEvaluator; exports.RunConfig = RunConfig; exports.Runner = Runner; exports.SafetyEvaluatorV1 = SafetyEvaluatorV1; exports.SequentialAgent = SequentialAgent; exports.Sessions = sessions_exports; exports.SingleFlow = SingleFlow; exports.State = State; exports.StreamingMode = StreamingMode; exports.TelemetryService = TelemetryService; exports.ToolContext = ToolContext; exports.Tools = tools_exports; exports.TrajectoryEvaluator = TrajectoryEvaluator; exports.TransferToAgentTool = TransferToAgentTool; exports.UserInteractionTool = UserInteractionTool; exports.VERSION = VERSION; exports.VertexAiSessionService = VertexAiSessionService; exports._findFunctionCallEventIfLastEventIsFunctionResponse = _findFunctionCallEventIfLastEventIsFunctionResponse; exports.adkToMcpToolType = adkToMcpToolType; exports.agentTransferRequestProcessor = requestProcessor8; exports.basicRequestProcessor = requestProcessor2; exports.buildFunctionDeclaration = buildFunctionDeclaration; exports.codeExecutionRequestProcessor = requestProcessor3; exports.codeExecutionResponseProcessor = responseProcessor; exports.contentRequestProcessor = requestProcessor4; exports.createAuthToolArguments = createAuthToolArguments; exports.createBranchContextForSubAgent = createBranchContextForSubAgent; exports.createDatabaseSessionService = createDatabaseSessionService; exports.createFunctionTool = createFunctionTool; exports.createMysqlSessionService = createMysqlSessionService; exports.createPostgresSessionService = createPostgresSessionService; exports.createSamplingHandler = createSamplingHandler; exports.createSqliteSessionService = createSqliteSessionService; exports.createTool = createTool; exports.generateAuthEvent = generateAuthEvent; exports.generateClientFunctionCallId = generateClientFunctionCallId; exports.getLongRunningFunctionCalls = getLongRunningFunctionCalls; exports.getMcpTools = getMcpTools; exports.handleFunctionCallsAsync = handleFunctionCallsAsync; exports.handleFunctionCallsLive = handleFunctionCallsLive; exports.identityRequestProcessor = requestProcessor5; exports.initializeTelemetry = initializeTelemetry; exports.injectSessionState = injectSessionState; exports.instructionsRequestProcessor = requestProcessor6; exports.isEnhancedAuthConfig = isEnhancedAuthConfig; exports.jsonSchemaToDeclaration = jsonSchemaToDeclaration; exports.mcpSchemaToParameters = mcpSchemaToParameters; exports.mergeAgentRun = mergeAgentRun; exports.mergeParallelFunctionResponseEvents = mergeParallelFunctionResponseEvents; exports.newInvocationContextId = newInvocationContextId; exports.nlPlanningRequestProcessor = requestProcessor7; exports.nlPlanningResponseProcessor = responseProcessor2; exports.normalizeJsonSchema = normalizeJsonSchema; exports.populateClientFunctionCallId = populateClientFunctionCallId; exports.registerProviders = registerProviders; exports.removeClientFunctionCallId = removeClientFunctionCallId; exports.requestProcessor = requestProcessor; exports.shutdownTelemetry = shutdownTelemetry; exports.telemetryService = telemetryService; exports.traceLlmCall = traceLlmCall; exports.traceToolCall = traceToolCall; exports.tracer = tracer;