@iqai/adk 0.1.21 → 0.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -53,7 +53,7 @@ var init_logger = __esm({
53
53
  }
54
54
  info(message, ...args) {
55
55
  const time = (/* @__PURE__ */ new Date()).toLocaleTimeString();
56
- console.info(
56
+ console.debug(
57
57
  this.colorize(`[${time}] \u2139\uFE0F [${this.name}] ${message}`),
58
58
  ...args
59
59
  );
@@ -229,7 +229,7 @@ var init_base_tool = __esm({
229
229
  * @param context The context of the tool
230
230
  * @returns The result of running the tool
231
231
  */
232
- async runAsync(args, context) {
232
+ async runAsync(args, context4) {
233
233
  throw new Error(`${this.constructor.name} runAsync is not implemented`);
234
234
  }
235
235
  /**
@@ -253,6 +253,12 @@ var init_base_tool = __esm({
253
253
  if (!toolWithFunctionDeclarations.functionDeclarations) {
254
254
  toolWithFunctionDeclarations.functionDeclarations = [];
255
255
  }
256
+ const alreadyExists = toolWithFunctionDeclarations.functionDeclarations.some(
257
+ (fd) => fd?.name === functionDeclaration.name
258
+ );
259
+ if (alreadyExists) {
260
+ return;
261
+ }
256
262
  toolWithFunctionDeclarations.functionDeclarations.push(
257
263
  functionDeclaration
258
264
  );
@@ -281,7 +287,7 @@ var init_base_tool = __esm({
281
287
  * @param context Tool execution context
282
288
  * @returns Result of the tool execution or error information
283
289
  */
284
- async safeExecute(args, context) {
290
+ async safeExecute(args, context4) {
285
291
  if (!this.validateArguments(args)) {
286
292
  return {
287
293
  error: "Invalid arguments",
@@ -302,7 +308,7 @@ var init_base_tool = __esm({
302
308
  );
303
309
  await new Promise((resolve) => setTimeout(resolve, delay));
304
310
  }
305
- const result = await this.runAsync(args, context);
311
+ const result = await this.runAsync(args, context4);
306
312
  return { result };
307
313
  } catch (error) {
308
314
  lastError = error instanceof Error ? error : new Error(String(error));
@@ -500,7 +506,7 @@ var init_function_tool = __esm({
500
506
  /**
501
507
  * Executes the wrapped function with the provided arguments.
502
508
  */
503
- async runAsync(args, context) {
509
+ async runAsync(args, context4) {
504
510
  try {
505
511
  const missingArgs = this.getMissingMandatoryArgs(args);
506
512
  if (missingArgs.length > 0) {
@@ -513,13 +519,13 @@ You could retry calling this tool, but it is IMPORTANT for you to provide all th
513
519
  }
514
520
  const argsToCall = { ...args };
515
521
  if (this.functionAcceptsToolContext()) {
516
- argsToCall.toolContext = context;
522
+ argsToCall.toolContext = context4;
517
523
  }
518
524
  const funcParams = this.getFunctionParameters();
519
525
  const argValues = [];
520
526
  for (const paramName of funcParams) {
521
527
  if (paramName === "toolContext" && this.functionAcceptsToolContext()) {
522
- argValues.push(context);
528
+ argValues.push(context4);
523
529
  } else if (paramName in argsToCall) {
524
530
  const convertedValue = this.convertArgumentType(
525
531
  argsToCall[paramName],
@@ -954,6 +960,7 @@ init_logger();
954
960
  import {
955
961
  DiagConsoleLogger,
956
962
  DiagLogLevel,
963
+ context,
957
964
  diag,
958
965
  trace
959
966
  } from "@opentelemetry/api";
@@ -994,13 +1001,24 @@ var TelemetryService = class {
994
1001
  this.sdk = new NodeSDK({
995
1002
  resource,
996
1003
  traceExporter,
997
- instrumentations: [getNodeAutoInstrumentations()]
1004
+ instrumentations: [
1005
+ getNodeAutoInstrumentations({
1006
+ // Follow Python ADK approach: let all HTTP instrumentation through.
1007
+ // This provides transparency and aligns with standard OpenTelemetry behavior.
1008
+ // High-level LLM tracing is provided through dedicated ADK spans.
1009
+ "@opentelemetry/instrumentation-http": {
1010
+ ignoreIncomingRequestHook: (req) => {
1011
+ return true;
1012
+ }
1013
+ }
1014
+ })
1015
+ ]
998
1016
  });
999
1017
  try {
1000
1018
  this.sdk.start();
1001
1019
  this.isInitialized = true;
1002
1020
  this.tracer = trace.getTracer("iqai-adk", config.appVersion || "0.1.0");
1003
- diag.info("OpenTelemetry SDK started successfully.");
1021
+ diag.debug("OpenTelemetry SDK started successfully.");
1004
1022
  } catch (error) {
1005
1023
  diag.error("Error starting OpenTelemetry SDK:", error);
1006
1024
  throw error;
@@ -1043,7 +1061,7 @@ var TelemetryService = class {
1043
1061
  });
1044
1062
  await Promise.race([this.sdk.shutdown(), timeoutPromise]);
1045
1063
  this.isInitialized = false;
1046
- diag.info("Telemetry terminated successfully.");
1064
+ diag.debug("Telemetry terminated successfully.");
1047
1065
  } catch (error) {
1048
1066
  if (error instanceof Error && error.message.includes("timeout")) {
1049
1067
  diag.warn("Telemetry shutdown timed out, some traces may be lost");
@@ -1071,7 +1089,7 @@ var TelemetryService = class {
1071
1089
  }
1072
1090
  }
1073
1091
  span.setAttributes({
1074
- "gen_ai.system.name": "iqai-adk",
1092
+ "gen_ai.system": "iqai-adk",
1075
1093
  "gen_ai.operation.name": "execute_tool",
1076
1094
  "gen_ai.tool.name": tool.name,
1077
1095
  "gen_ai.tool.description": tool.description,
@@ -1085,7 +1103,7 @@ var TelemetryService = class {
1085
1103
  ...process.env.NODE_ENV && {
1086
1104
  "deployment.environment.name": process.env.NODE_ENV
1087
1105
  },
1088
- // Tool-specific data
1106
+ // ADK-specific attributes (matching Python namespace pattern)
1089
1107
  "adk.tool_call_args": this._safeJsonStringify(args),
1090
1108
  "adk.event_id": functionResponseEvent.invocationId,
1091
1109
  "adk.tool_response": this._safeJsonStringify(toolResponse),
@@ -1101,9 +1119,8 @@ var TelemetryService = class {
1101
1119
  if (!span) return;
1102
1120
  const requestData = this._buildLlmRequestForTrace(llmRequest);
1103
1121
  span.setAttributes({
1104
- // Standard OpenTelemetry attributes
1105
- "gen_ai.system.name": "iqai-adk",
1106
- "gen_ai.operation.name": "generate",
1122
+ // Standard OpenTelemetry attributes (following Python pattern)
1123
+ "gen_ai.system": "iqai-adk",
1107
1124
  "gen_ai.request.model": llmRequest.model,
1108
1125
  // Session and user tracking (maps to Langfuse sessionId, userId)
1109
1126
  "session.id": invocationContext.session.id,
@@ -1116,15 +1133,21 @@ var TelemetryService = class {
1116
1133
  "gen_ai.request.max_tokens": llmRequest.config.maxOutputTokens || 0,
1117
1134
  "gen_ai.request.temperature": llmRequest.config.temperature || 0,
1118
1135
  "gen_ai.request.top_p": llmRequest.config.topP || 0,
1119
- // Legacy ADK attributes (keep for backward compatibility)
1120
1136
  "adk.system_name": "iqai-adk",
1121
1137
  "adk.request_model": llmRequest.model,
1122
- "adk.invocation_id": invocationContext.session.id,
1138
+ // ADK-specific attributes (matching Python namespace pattern)
1139
+ "adk.invocation_id": invocationContext.invocationId,
1123
1140
  "adk.session_id": invocationContext.session.id,
1124
1141
  "adk.event_id": eventId,
1125
1142
  "adk.llm_request": this._safeJsonStringify(requestData),
1126
1143
  "adk.llm_response": this._safeJsonStringify(llmResponse)
1127
1144
  });
1145
+ if (llmResponse.usageMetadata) {
1146
+ span.setAttributes({
1147
+ "gen_ai.usage.input_tokens": llmResponse.usageMetadata.promptTokenCount || 0,
1148
+ "gen_ai.usage.output_tokens": llmResponse.usageMetadata.candidatesTokenCount || 0
1149
+ });
1150
+ }
1128
1151
  span.addEvent("gen_ai.content.prompt", {
1129
1152
  "gen_ai.prompt": this._safeJsonStringify(requestData.messages)
1130
1153
  });
@@ -1137,9 +1160,14 @@ var TelemetryService = class {
1137
1160
  */
1138
1161
  async *traceAsyncGenerator(spanName, generator) {
1139
1162
  const span = this.tracer.startSpan(spanName);
1163
+ const spanContext = trace.setSpan(context.active(), span);
1140
1164
  try {
1141
- for await (const item of generator) {
1142
- yield item;
1165
+ while (true) {
1166
+ const result = await context.with(spanContext, () => generator.next());
1167
+ if (result.done) {
1168
+ break;
1169
+ }
1170
+ yield result.value;
1143
1171
  }
1144
1172
  } catch (error) {
1145
1173
  span.recordException(error);
@@ -1226,7 +1254,7 @@ var traceLlmCall = (invocationContext, eventId, llmRequest, llmResponse) => tele
1226
1254
  // src/models/base-llm.ts
1227
1255
  var BaseLlm = class {
1228
1256
  /**
1229
- * The name of the LLM, e.g. gemini-1.5-flash or gemini-1.5-flash-001.
1257
+ * The name of the LLM, e.g. gemini-2.5-flash or gemini-2.5-flash-001.
1230
1258
  */
1231
1259
  model;
1232
1260
  logger = new Logger({ name: "BaseLlm" });
@@ -1915,7 +1943,7 @@ var GoogleLlm = class extends BaseLlm {
1915
1943
  /**
1916
1944
  * Constructor for Gemini
1917
1945
  */
1918
- constructor(model = "gemini-1.5-flash") {
1946
+ constructor(model = "gemini-2.5-flash") {
1919
1947
  super(model);
1920
1948
  }
1921
1949
  /**
@@ -3954,10 +3982,10 @@ var CreatedTool = class extends BaseTool {
3954
3982
  /**
3955
3983
  * Executes the tool function with validation
3956
3984
  */
3957
- async runAsync(args, context) {
3985
+ async runAsync(args, context4) {
3958
3986
  try {
3959
3987
  const validatedArgs = this.schema.parse(args);
3960
- const result = await Promise.resolve(this.func(validatedArgs, context));
3988
+ const result = await Promise.resolve(this.func(validatedArgs, context4));
3961
3989
  return result ?? {};
3962
3990
  } catch (error) {
3963
3991
  if (error instanceof z.ZodError) {
@@ -4215,7 +4243,7 @@ var AgentTool = class extends BaseTool {
4215
4243
  /**
4216
4244
  * Execute the tool by running the agent with the provided input
4217
4245
  */
4218
- async runAsync(params, context) {
4246
+ async runAsync(params, context4) {
4219
4247
  try {
4220
4248
  const input = params.input || Object.values(params)[0];
4221
4249
  if (!isLlmAgent(this.agent)) {
@@ -4223,7 +4251,7 @@ var AgentTool = class extends BaseTool {
4223
4251
  `Agent ${this.name} does not support running as a tool`
4224
4252
  );
4225
4253
  }
4226
- const parentInvocation = context._invocationContext;
4254
+ const parentInvocation = context4._invocationContext;
4227
4255
  const childInvocationContext = new InvocationContext({
4228
4256
  invocationId: uuidv42(),
4229
4257
  agent: this.agent,
@@ -4260,8 +4288,8 @@ var AgentTool = class extends BaseTool {
4260
4288
  } catch {
4261
4289
  toolResult = mergedText;
4262
4290
  }
4263
- if (this.outputKey && context?.state) {
4264
- context.state[this.outputKey] = toolResult;
4291
+ if (this.outputKey && context4?.state) {
4292
+ context4.state[this.outputKey] = toolResult;
4265
4293
  }
4266
4294
  return toolResult;
4267
4295
  } catch (error) {
@@ -4809,9 +4837,9 @@ var UserInteractionTool = class extends BaseTool {
4809
4837
  /**
4810
4838
  * Execute the user interaction
4811
4839
  */
4812
- async runAsync(args, context) {
4840
+ async runAsync(args, context4) {
4813
4841
  try {
4814
- const actions = context.actions;
4842
+ const actions = context4.actions;
4815
4843
  if (!actions || !actions.promptUser) {
4816
4844
  return {
4817
4845
  success: false,
@@ -4859,9 +4887,9 @@ var ExitLoopTool = class extends BaseTool {
4859
4887
  /**
4860
4888
  * Execute the exit loop action
4861
4889
  */
4862
- async runAsync(_args, context) {
4890
+ async runAsync(_args, context4) {
4863
4891
  this.logger.debug("Executing exit loop tool");
4864
- context.actions.escalate = true;
4892
+ context4.actions.escalate = true;
4865
4893
  }
4866
4894
  };
4867
4895
 
@@ -4912,14 +4940,14 @@ var GetUserChoiceTool = class extends BaseTool {
4912
4940
  * This is a long running operation that will return null initially
4913
4941
  * and the actual choice will be provided asynchronously
4914
4942
  */
4915
- async runAsync(args, context) {
4943
+ async runAsync(args, context4) {
4916
4944
  this.logger.debug(
4917
4945
  `Executing get_user_choice with options: ${args.options.join(", ")}`
4918
4946
  );
4919
4947
  if (args.question) {
4920
4948
  this.logger.debug(`Question: ${args.question}`);
4921
4949
  }
4922
- context.actions.skipSummarization = true;
4950
+ context4.actions.skipSummarization = true;
4923
4951
  return null;
4924
4952
  }
4925
4953
  };
@@ -4961,9 +4989,9 @@ var TransferToAgentTool = class extends BaseTool {
4961
4989
  /**
4962
4990
  * Execute the transfer to agent action
4963
4991
  */
4964
- async runAsync(args, context) {
4992
+ async runAsync(args, context4) {
4965
4993
  this.logger.debug(`Executing transfer to agent: ${args.agent_name}`);
4966
- context.actions.transferToAgent = args.agent_name;
4994
+ context4.actions.transferToAgent = args.agent_name;
4967
4995
  }
4968
4996
  };
4969
4997
 
@@ -5004,10 +5032,10 @@ var LoadMemoryTool = class extends BaseTool {
5004
5032
  /**
5005
5033
  * Execute the memory loading action
5006
5034
  */
5007
- async runAsync(args, context) {
5035
+ async runAsync(args, context4) {
5008
5036
  this.logger.debug(`Executing load_memory with query: ${args.query}`);
5009
5037
  try {
5010
- const searchResult = await context.searchMemory(args.query);
5038
+ const searchResult = await context4.searchMemory(args.query);
5011
5039
  return {
5012
5040
  memories: searchResult.memories || [],
5013
5041
  count: searchResult.memories?.length || 0
@@ -5057,7 +5085,7 @@ var LoadArtifactsTool = class extends BaseTool {
5057
5085
  /**
5058
5086
  * Execute the load artifacts operation
5059
5087
  */
5060
- async runAsync(args, context) {
5088
+ async runAsync(args, context4) {
5061
5089
  const artifactNames = args.artifact_names || [];
5062
5090
  return { artifact_names: artifactNames };
5063
5091
  }
@@ -6088,12 +6116,12 @@ var McpToolset = class {
6088
6116
  * Checks if a tool should be included based on the tool filter.
6089
6117
  * Similar to Python's _is_selected method.
6090
6118
  */
6091
- isSelected(tool, context) {
6119
+ isSelected(tool, context4) {
6092
6120
  if (!this.toolFilter) {
6093
6121
  return true;
6094
6122
  }
6095
6123
  if (typeof this.toolFilter === "function") {
6096
- return this.toolFilter(tool, context);
6124
+ return this.toolFilter(tool, context4);
6097
6125
  }
6098
6126
  if (Array.isArray(this.toolFilter)) {
6099
6127
  return this.toolFilter.includes(tool.name);
@@ -6146,7 +6174,7 @@ var McpToolset = class {
6146
6174
  * Retrieves tools from the MCP server and converts them to BaseTool instances.
6147
6175
  * Similar to Python's get_tools method.
6148
6176
  */
6149
- async getTools(context) {
6177
+ async getTools(context4) {
6150
6178
  try {
6151
6179
  if (this.isClosing) {
6152
6180
  throw new McpError(
@@ -6168,7 +6196,7 @@ var McpToolset = class {
6168
6196
  }
6169
6197
  const tools = [];
6170
6198
  for (const mcpTool of toolsResponse.tools) {
6171
- if (this.isSelected(mcpTool, context)) {
6199
+ if (this.isSelected(mcpTool, context4)) {
6172
6200
  try {
6173
6201
  const tool = await createTool2(mcpTool, client);
6174
6202
  tools.push(tool);
@@ -6205,9 +6233,9 @@ var McpToolset = class {
6205
6233
  /**
6206
6234
  * Refreshes the tool cache by clearing it and fetching tools again
6207
6235
  */
6208
- async refreshTools(context) {
6236
+ async refreshTools(context4) {
6209
6237
  this.tools = [];
6210
- return this.getTools(context);
6238
+ return this.getTools(context4);
6211
6239
  }
6212
6240
  /**
6213
6241
  * Closes the connection to the MCP server.
@@ -6251,6 +6279,7 @@ async function getMcpTools(config, toolFilter) {
6251
6279
  }
6252
6280
 
6253
6281
  // src/flows/llm-flows/functions.ts
6282
+ import { context as context2, trace as trace2 } from "@opentelemetry/api";
6254
6283
  var AF_FUNCTION_CALL_ID_PREFIX = "adk-";
6255
6284
  var REQUEST_EUC_FUNCTION_CALL_NAME = "adk_request_credential";
6256
6285
  function generateClientFunctionCallId() {
@@ -6340,23 +6369,40 @@ async function handleFunctionCallsAsync(invocationContext, functionCallEvent, to
6340
6369
  toolsDict
6341
6370
  );
6342
6371
  const functionArgs = functionCall.args || {};
6343
- const functionResponse = await callToolAsync(
6344
- tool,
6345
- functionArgs,
6346
- toolContext
6347
- );
6348
- if (tool.isLongRunning) {
6372
+ const tracer2 = telemetryService.getTracer();
6373
+ const span = tracer2.startSpan(`execute_tool ${tool.name}`);
6374
+ const spanContext = trace2.setSpan(context2.active(), span);
6375
+ try {
6376
+ const functionResponse = await context2.with(spanContext, async () => {
6377
+ const result = await callToolAsync(tool, functionArgs, toolContext);
6378
+ if (tool.isLongRunning && !result) {
6379
+ return null;
6380
+ }
6381
+ const functionResponseEvent = buildResponseEvent(
6382
+ tool,
6383
+ result,
6384
+ toolContext,
6385
+ invocationContext
6386
+ );
6387
+ telemetryService.traceToolCall(
6388
+ tool,
6389
+ functionArgs,
6390
+ functionResponseEvent
6391
+ );
6392
+ return { result, event: functionResponseEvent };
6393
+ });
6349
6394
  if (!functionResponse) {
6350
6395
  continue;
6351
6396
  }
6397
+ functionResponseEvents.push(functionResponse.event);
6398
+ span.setStatus({ code: 1 });
6399
+ } catch (error) {
6400
+ span.recordException(error);
6401
+ span.setStatus({ code: 2, message: error.message });
6402
+ throw error;
6403
+ } finally {
6404
+ span.end();
6352
6405
  }
6353
- const functionResponseEvent = buildResponseEvent(
6354
- tool,
6355
- functionResponse,
6356
- toolContext,
6357
- invocationContext
6358
- );
6359
- functionResponseEvents.push(functionResponseEvent);
6360
6406
  }
6361
6407
  if (!functionResponseEvents.length) {
6362
6408
  return null;
@@ -6456,7 +6502,7 @@ var BaseLlmFlow = class {
6456
6502
  responseProcessors = [];
6457
6503
  logger = new Logger({ name: "BaseLlmFlow" });
6458
6504
  async *runAsync(invocationContext) {
6459
- this.logger.info(`Agent '${invocationContext.agent.name}' started.`);
6505
+ this.logger.debug(`Agent '${invocationContext.agent.name}' started.`);
6460
6506
  let stepCount = 0;
6461
6507
  while (true) {
6462
6508
  stepCount++;
@@ -6466,7 +6512,7 @@ var BaseLlmFlow = class {
6466
6512
  yield event;
6467
6513
  }
6468
6514
  if (!lastEvent || lastEvent.isFinalResponse()) {
6469
- this.logger.info(
6515
+ this.logger.debug(
6470
6516
  `Agent '${invocationContext.agent.name}' finished after ${stepCount} steps.`
6471
6517
  );
6472
6518
  break;
@@ -6496,7 +6542,7 @@ var BaseLlmFlow = class {
6496
6542
  yield event;
6497
6543
  }
6498
6544
  if (invocationContext.endInvocation) {
6499
- this.logger.info("Invocation ended during preprocessing.");
6545
+ this.logger.debug("Invocation ended during preprocessing.");
6500
6546
  return;
6501
6547
  }
6502
6548
  const modelResponseEvent = new Event({
@@ -6611,7 +6657,7 @@ var BaseLlmFlow = class {
6611
6657
  yield functionResponseEvent;
6612
6658
  const transferToAgent = functionResponseEvent.actions?.transferToAgent;
6613
6659
  if (transferToAgent) {
6614
- this.logger.info(`\u{1F504} Live transfer to agent '${transferToAgent}'`);
6660
+ this.logger.debug(`\u{1F504} Live transfer to agent '${transferToAgent}'`);
6615
6661
  const agentToRun = this._getAgentToRun(
6616
6662
  invocationContext,
6617
6663
  transferToAgent
@@ -6650,7 +6696,7 @@ var BaseLlmFlow = class {
6650
6696
  yield functionResponseEvent;
6651
6697
  const transferToAgent = functionResponseEvent.actions?.transferToAgent;
6652
6698
  if (transferToAgent) {
6653
- this.logger.info(`\u{1F504} Transferring to agent '${transferToAgent}'`);
6699
+ this.logger.debug(`\u{1F504} Transferring to agent '${transferToAgent}'`);
6654
6700
  const agentToRun = this._getAgentToRun(
6655
6701
  invocationContext,
6656
6702
  transferToAgent
@@ -7074,8 +7120,6 @@ var BasicLlmRequestProcessor = class extends BaseLlmRequestProcessor {
7074
7120
  llmRequest.liveConnectConfig.realtimeInputConfig = runConfig.realtimeInputConfig;
7075
7121
  llmRequest.liveConnectConfig.enableAffectiveDialog = runConfig.enableAffectiveDialog;
7076
7122
  llmRequest.liveConnectConfig.proactivity = runConfig.proactivity;
7077
- const tools = await agent.canonicalTools();
7078
- llmRequest.appendTools(tools);
7079
7123
  for await (const _ of []) {
7080
7124
  yield _;
7081
7125
  }
@@ -9069,19 +9113,19 @@ var LlmAgent = class _LlmAgent extends BaseAgent {
9069
9113
  * Core logic to run this agent via text-based conversation
9070
9114
  * This matches the Python implementation's _run_async_impl
9071
9115
  */
9072
- async *runAsyncImpl(context) {
9116
+ async *runAsyncImpl(context4) {
9073
9117
  this.logger.debug(`Starting LlmAgent execution for "${this.name}"`);
9074
9118
  try {
9075
- for await (const event of this.llmFlow.runAsync(context)) {
9119
+ for await (const event of this.llmFlow.runAsync(context4)) {
9076
9120
  this.maybeSaveOutputToState(event);
9077
9121
  yield event;
9078
9122
  }
9079
9123
  } catch (error) {
9080
9124
  this.logger.error("Error in LlmAgent execution:", error);
9081
9125
  const errorEvent = new Event({
9082
- invocationId: context.invocationId,
9126
+ invocationId: context4.invocationId,
9083
9127
  author: this.name,
9084
- branch: context.branch,
9128
+ branch: context4.branch,
9085
9129
  content: {
9086
9130
  parts: [
9087
9131
  {
@@ -9349,7 +9393,7 @@ var LangGraphAgent = class extends BaseAgent {
9349
9393
  /**
9350
9394
  * Gets the next nodes to execute based on the current node and its result
9351
9395
  */
9352
- async getNextNodes(currentNode, lastEvent, context) {
9396
+ async getNextNodes(currentNode, lastEvent, context4) {
9353
9397
  if (!currentNode.targets || currentNode.targets.length === 0) {
9354
9398
  return [];
9355
9399
  }
@@ -9361,7 +9405,7 @@ var LangGraphAgent = class extends BaseAgent {
9361
9405
  continue;
9362
9406
  }
9363
9407
  if (targetNode.condition) {
9364
- const shouldExecute = await targetNode.condition(lastEvent, context);
9408
+ const shouldExecute = await targetNode.condition(lastEvent, context4);
9365
9409
  if (!shouldExecute) {
9366
9410
  this.logger.debug(`Skipping node "${targetName}" due to condition`);
9367
9411
  continue;
@@ -9374,7 +9418,7 @@ var LangGraphAgent = class extends BaseAgent {
9374
9418
  /**
9375
9419
  * Core logic to run this agent via text-based conversation.
9376
9420
  */
9377
- async *runAsyncImpl(context) {
9421
+ async *runAsyncImpl(context4) {
9378
9422
  this.logger.debug(
9379
9423
  `Starting graph execution from root node "${this.rootNode}"`
9380
9424
  );
@@ -9396,7 +9440,7 @@ var LangGraphAgent = class extends BaseAgent {
9396
9440
  return;
9397
9441
  }
9398
9442
  let stepCount = 0;
9399
- const nodesToExecute = [{ node: rootNode, context }];
9443
+ const nodesToExecute = [{ node: rootNode, context: context4 }];
9400
9444
  const executedNodes = [];
9401
9445
  let lastEvent = null;
9402
9446
  while (nodesToExecute.length > 0 && stepCount < this.maxSteps) {
@@ -9404,7 +9448,7 @@ var LangGraphAgent = class extends BaseAgent {
9404
9448
  const { node } = nodesToExecute.shift();
9405
9449
  this.logger.debug(`Step ${stepCount}: Executing node "${node.name}"`);
9406
9450
  executedNodes.push(node.name);
9407
- const childContext = context.createChildContext(node.agent);
9451
+ const childContext = context4.createChildContext(node.agent);
9408
9452
  try {
9409
9453
  const nodeEvents = [];
9410
9454
  for await (const event of node.agent.runAsync(childContext)) {
@@ -9417,7 +9461,7 @@ var LangGraphAgent = class extends BaseAgent {
9417
9461
  events: nodeEvents
9418
9462
  });
9419
9463
  if (lastEvent) {
9420
- const nextNodes = await this.getNextNodes(node, lastEvent, context);
9464
+ const nextNodes = await this.getNextNodes(node, lastEvent, context4);
9421
9465
  for (const nextNode of nextNodes) {
9422
9466
  nodesToExecute.push({
9423
9467
  node: nextNode,
@@ -9460,8 +9504,8 @@ var LangGraphAgent = class extends BaseAgent {
9460
9504
  * Core logic to run this agent via video/audio-based conversation.
9461
9505
  * For LangGraph, this follows the same execution pattern as text-based.
9462
9506
  */
9463
- async *runLiveImpl(context) {
9464
- yield* this.runAsyncImpl(context);
9507
+ async *runLiveImpl(context4) {
9508
+ yield* this.runAsyncImpl(context4);
9465
9509
  }
9466
9510
  /**
9467
9511
  * Gets the execution results from the last run
@@ -9514,7 +9558,7 @@ var LangGraphAgent = class extends BaseAgent {
9514
9558
  import { generateId } from "ai";
9515
9559
 
9516
9560
  // src/runners.ts
9517
- import { SpanStatusCode } from "@opentelemetry/api";
9561
+ import { SpanStatusCode, context as context3, trace as trace3 } from "@opentelemetry/api";
9518
9562
 
9519
9563
  // src/agents/run-config.ts
9520
9564
  var StreamingMode = /* @__PURE__ */ ((StreamingMode2) => {
@@ -10174,11 +10218,11 @@ var Runner = class {
10174
10218
  runConfig = new RunConfig()
10175
10219
  }) {
10176
10220
  const span = tracer.startSpan("invocation");
10221
+ const spanContext = trace3.setSpan(context3.active(), span);
10177
10222
  try {
10178
- const session = await this.sessionService.getSession(
10179
- this.appName,
10180
- userId,
10181
- sessionId
10223
+ const session = await context3.with(
10224
+ spanContext,
10225
+ () => this.sessionService.getSession(this.appName, userId, sessionId)
10182
10226
  );
10183
10227
  if (!session) {
10184
10228
  throw new Error(`Session not found: ${sessionId}`);
@@ -10188,22 +10232,34 @@ var Runner = class {
10188
10232
  runConfig
10189
10233
  });
10190
10234
  if (newMessage) {
10191
- await this._appendNewMessageToSession(
10192
- session,
10193
- newMessage,
10194
- invocationContext,
10195
- runConfig.saveInputBlobsAsArtifacts || false
10235
+ await context3.with(
10236
+ spanContext,
10237
+ () => this._appendNewMessageToSession(
10238
+ session,
10239
+ newMessage,
10240
+ invocationContext,
10241
+ runConfig.saveInputBlobsAsArtifacts || false
10242
+ )
10196
10243
  );
10197
10244
  }
10198
10245
  invocationContext.agent = this._findAgentToRun(session, this.agent);
10199
- for await (const event of invocationContext.agent.runAsync(
10200
- invocationContext
10201
- )) {
10246
+ const agentGenerator = invocationContext.agent.runAsync(invocationContext);
10247
+ while (true) {
10248
+ const result = await context3.with(
10249
+ spanContext,
10250
+ () => agentGenerator.next()
10251
+ );
10252
+ if (result.done) {
10253
+ break;
10254
+ }
10255
+ const event = result.value;
10202
10256
  if (!event.partial) {
10203
- await this.sessionService.appendEvent(session, event);
10204
- if (this.memoryService) {
10205
- await this.memoryService.addSessionToMemory(session);
10206
- }
10257
+ await context3.with(spanContext, async () => {
10258
+ await this.sessionService.appendEvent(session, event);
10259
+ if (this.memoryService) {
10260
+ await this.memoryService.addSessionToMemory(session);
10261
+ }
10262
+ });
10207
10263
  }
10208
10264
  yield event;
10209
10265
  }
@@ -10818,7 +10874,7 @@ var VertexAiSessionService = class extends BaseSessionService {
10818
10874
  path: `reasoningEngines/${reasoningEngineId}/sessions`,
10819
10875
  request_dict: sessionJsonDict
10820
10876
  });
10821
- console.info("Create Session response", apiResponse);
10877
+ console.debug("Create Session response", apiResponse);
10822
10878
  const createdSessionId = apiResponse.name.split("/").slice(-3, -2)[0];
10823
10879
  const operationId = apiResponse.name.split("/").pop();
10824
10880
  let maxRetryAttempt = 5;
package/package.json CHANGED
@@ -1,9 +1,12 @@
1
1
  {
2
2
  "name": "@iqai/adk",
3
- "version": "0.1.21",
3
+ "version": "0.1.22",
4
4
  "description": "Agent Development Kit for TypeScript with multi-provider LLM support",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
7
+ "bin": {
8
+ "adk": "./dist/cli/index.js"
9
+ },
7
10
  "repository": {
8
11
  "type": "git",
9
12
  "url": "https://github.com/IQAIcom/adk-ts.git"
@@ -21,6 +24,7 @@
21
24
  "license": "MIT",
22
25
  "dependencies": {
23
26
  "@anthropic-ai/sdk": "^0.39.0",
27
+ "@clack/prompts": "^0.11.0",
24
28
  "@electric-sql/pglite": "^0.3.2",
25
29
  "@google-cloud/storage": "^7.16.0",
26
30
  "@google-cloud/vertexai": "^0.5.0",
@@ -34,15 +38,21 @@
34
38
  "@opentelemetry/sdk-trace-base": "^2.0.1",
35
39
  "@opentelemetry/sdk-trace-node": "^2.0.1",
36
40
  "@opentelemetry/semantic-conventions": "^1.34.0",
41
+ "@types/cors": "^2.8.19",
42
+ "@types/express": "^4.17.21",
37
43
  "ai": "^4.3.16",
38
44
  "axios": "^1.6.2",
39
45
  "chalk": "^5.4.1",
46
+ "cors": "^2.8.5",
40
47
  "dedent": "^1.6.0",
41
48
  "dockerode": "^4.0.7",
42
49
  "dotenv": "^16.4.7",
43
50
  "drizzle-orm": "^0.43.1",
51
+ "express": "^4.19.2",
44
52
  "kysely": "^0.28.2",
45
53
  "openai": "^4.93.0",
54
+ "socket.io": "^4.8.1",
55
+ "ts-node": "^10.9.2",
46
56
  "uuid": "^11.1.0",
47
57
  "zod": "^3.25.67",
48
58
  "zod-to-json-schema": "^3.24.6"