@google/adk 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,7 @@ import { createPartFromText } from "@google/genai";
7
7
  import { trace } from "@opentelemetry/api";
8
8
  import { InvocationContext, newInvocationContextId } from "../agents/invocation_context.js";
9
9
  import { LlmAgent } from "../agents/llm_agent.js";
10
- import { RunConfig } from "../agents/run_config.js";
10
+ import { createRunConfig } from "../agents/run_config.js";
11
11
  import { createEvent, getFunctionCalls } from "../events/event.js";
12
12
  import { createEventActions } from "../events/event_actions.js";
13
13
  import { PluginManager } from "../plugins/plugin_manager.js";
@@ -40,9 +40,10 @@ class Runner {
40
40
  sessionId,
41
41
  newMessage,
42
42
  stateDelta,
43
- runConfig = new RunConfig()
43
+ runConfig
44
44
  }) {
45
45
  var _a;
46
+ runConfig = createRunConfig(runConfig);
46
47
  const span = trace.getTracer("gcp.vertex.agent").startSpan("invocation");
47
48
  try {
48
49
  const session = await this.sessionService.getSession({ appName: this.appName, userId, sessionId });
@@ -15,7 +15,7 @@ export declare enum StreamingMode {
15
15
  /**
16
16
  * Configs for runtime behavior of agents.
17
17
  */
18
- export declare class RunConfig {
18
+ export interface RunConfig {
19
19
  /**
20
20
  * Speech configuration for the live agent.
21
21
  */
@@ -27,7 +27,7 @@ export declare class RunConfig {
27
27
  /**
28
28
  * Whether or not to save the input blobs as artifacts.
29
29
  */
30
- saveInputBlobsAsArtifacts: boolean;
30
+ saveInputBlobsAsArtifacts?: boolean;
31
31
  /**
32
32
  * Whether to support CFC (Compositional Function Calling). Only applicable
33
33
  * for StreamingMode.SSE. If it's true. the LIVE API will be invoked. Since
@@ -36,11 +36,11 @@ export declare class RunConfig {
36
36
  * WARNING: This feature is **experimental** and its API or behavior may
37
37
  * change in future releases.
38
38
  */
39
- supportCfc: boolean;
39
+ supportCfc?: boolean;
40
40
  /**
41
41
  * Streaming mode, None or StreamingMode.SSE or StreamingMode.BIDI.
42
42
  */
43
- streamingMode: StreamingMode;
43
+ streamingMode?: StreamingMode;
44
44
  /**
45
45
  * Output audio transcription config.
46
46
  */
@@ -53,7 +53,7 @@ export declare class RunConfig {
53
53
  * If enabled, the model will detect emotions and adapt its responses
54
54
  * accordingly.
55
55
  */
56
- enableAffectiveDialog: boolean;
56
+ enableAffectiveDialog?: boolean;
57
57
  /**
58
58
  * Configures the proactivity of the model. This allows the model to respond
59
59
  * proactively to the input and to ignore irrelevant input.
@@ -71,6 +71,18 @@ export declare class RunConfig {
71
71
  * calls is enforced, if the value is set in this range.
72
72
  * - Less than or equal to 0: This allows for unbounded number of llm calls.
73
73
  */
74
- maxLlmCalls: number;
75
- constructor(params?: Partial<RunConfig>);
74
+ maxLlmCalls?: number;
76
75
  }
76
+ export declare function createRunConfig(params?: Partial<RunConfig>): {
77
+ speechConfig?: SpeechConfig | undefined;
78
+ responseModalities?: Modality[] | undefined;
79
+ saveInputBlobsAsArtifacts: boolean;
80
+ supportCfc: boolean;
81
+ streamingMode: StreamingMode;
82
+ outputAudioTranscription?: AudioTranscriptionConfig | undefined;
83
+ inputAudioTranscription?: AudioTranscriptionConfig | undefined;
84
+ enableAffectiveDialog: boolean;
85
+ proactivity?: ProactivityConfig | undefined;
86
+ realtimeInputConfig?: RealtimeInputConfig | undefined;
87
+ maxLlmCalls: number;
88
+ };
@@ -13,16 +13,19 @@ export { LlmAgent } from './agents/llm_agent.js';
13
13
  export type { AfterModelCallback, AfterToolCallback, BeforeModelCallback, BeforeToolCallback, SingleAfterModelCallback, SingleAfterToolCallback, SingleBeforeModelCallback, SingleBeforeToolCallback } from './agents/llm_agent.js';
14
14
  export { LoopAgent } from './agents/loop_agent.js';
15
15
  export { ParallelAgent } from './agents/parallel_agent.js';
16
- export { RunConfig, StreamingMode } from './agents/run_config.js';
16
+ export type { RunConfig } from './agents/run_config.js';
17
+ export { StreamingMode } from './agents/run_config.js';
17
18
  export { SequentialAgent } from './agents/sequential_agent.js';
18
19
  export { InMemoryArtifactService } from './artifacts/in_memory_artifact_service.js';
19
20
  export type { BaseCredentialService } from './auth/credential_service/base_credential_service.js';
20
- export { createEvent, getFunctionCalls, getFunctionResponses, hasTrailingCodeExecutionResult, isFinalResponse } from './events/event.js';
21
+ export { createEvent, getFunctionCalls, getFunctionResponses, hasTrailingCodeExecutionResult, isFinalResponse, stringifyContent } from './events/event.js';
21
22
  export type { Event } from './events/event.js';
22
23
  export type { EventActions } from './events/event_actions.js';
23
24
  export { InMemoryMemoryService } from './memory/in_memory_memory_service.js';
24
25
  export { BaseLlm } from './models/base_llm.js';
25
26
  export type { BaseLlmConnection } from './models/base_llm_connection.js';
27
+ export { Gemini } from './models/google_llm.js';
28
+ export type { GeminiParams } from './models/google_llm.js';
26
29
  export type { LlmRequest } from './models/llm_request.js';
27
30
  export type { LlmResponse } from './models/llm_response.js';
28
31
  export { BasePlugin } from './plugins/base_plugin.js';
@@ -75,6 +75,13 @@ export declare function getFunctionResponses(event: Event): FunctionResponse[];
75
75
  * Returns whether the event has a trailing code execution result.
76
76
  */
77
77
  export declare function hasTrailingCodeExecutionResult(event: Event): boolean;
78
+ /**
79
+ * Extracts and concatenates all text from the parts of a `Event` object.
80
+ * @param event The `Event` object to process.
81
+ *
82
+ * @returns A single string with the combined text.
83
+ */
84
+ export declare function stringifyContent(event: Event): string;
78
85
  /**
79
86
  * Generates a new unique ID for the event.
80
87
  */
@@ -26,10 +26,11 @@ export declare abstract class BaseLlm {
26
26
  * Generates one content from the given contents and tools.
27
27
  *
28
28
  * @param llmRequest LlmRequest, the request to send to the LLM.
29
+ * @param stream whether to do streaming call.
29
30
  * For non-streaming call, it will only yield one Content.
30
31
  * @return A generator of LlmResponse.
31
32
  */
32
- abstract generateContentAsync(llmRequest: LlmRequest): AsyncGenerator<LlmResponse, void>;
33
+ abstract generateContentAsync(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void>;
33
34
  /**
34
35
  * Creates a live connection to the LLM.
35
36
  *
@@ -17,9 +17,7 @@ class InvocationCostManager {
17
17
  incrementAndEnforceLlmCallsLimit(runConfig) {
18
18
  this.numberOfLlmCalls++;
19
19
  if (runConfig && runConfig.maxLlmCalls > 0 && this.numberOfLlmCalls > runConfig.maxLlmCalls) {
20
- throw new Error(
21
- "Max number of llm calls limit of ".concat(runConfig.maxLlmCalls, " exceeded")
22
- );
20
+ throw new Error("Max number of llm calls limit of ".concat(runConfig.maxLlmCalls, " exceeded"));
23
21
  }
24
22
  }
25
23
  }
@@ -53,6 +53,7 @@ import { getContents, getCurrentTurnContents } from "./content_processor_utils.j
53
53
  import { generateAuthEvent, generateRequestConfirmationEvent, getLongRunningFunctionCalls, handleFunctionCallList, handleFunctionCallsAsync, populateClientFunctionCallId, REQUEST_CONFIRMATION_FUNCTION_CALL_NAME } from "./functions.js";
54
54
  import { injectSessionState } from "./instructions.js";
55
55
  import { ReadonlyContext } from "./readonly_context.js";
56
+ import { StreamingMode } from "./run_config.js";
56
57
  const ADK_AGENT_NAME_LABEL_KEY = "adk_agent_name";
57
58
  async function convertToolUnionToTools(toolUnion, context) {
58
59
  if (toolUnion instanceof BaseTool) {
@@ -831,7 +832,7 @@ class LlmAgent extends BaseAgent {
831
832
  }
832
833
  callLlmAsync(invocationContext, llmRequest, modelResponseEvent) {
833
834
  return __asyncGenerator(this, null, function* () {
834
- var _a, _b, _c, _d;
835
+ var _a, _b, _c, _d, _e;
835
836
  const beforeModelResponse = yield new __await(this.handleBeforeModelCallback(
836
837
  invocationContext,
837
838
  llmRequest,
@@ -851,7 +852,11 @@ class LlmAgent extends BaseAgent {
851
852
  throw new Error("CFC is not yet supported in callLlmAsync");
852
853
  } else {
853
854
  invocationContext.incrementLlmCallCount();
854
- const responsesGenerator = llm.generateContentAsync(llmRequest);
855
+ const responsesGenerator = llm.generateContentAsync(
856
+ llmRequest,
857
+ /* stream= */
858
+ ((_e = invocationContext.runConfig) == null ? void 0 : _e.streamingMode) === StreamingMode.SSE
859
+ );
855
860
  try {
856
861
  for (var iter = __forAwait(this.runAndHandleError(
857
862
  responsesGenerator,
@@ -1,3 +1,19 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropSymbols = Object.getOwnPropertySymbols;
3
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
4
+ var __propIsEnum = Object.prototype.propertyIsEnumerable;
5
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
6
+ var __spreadValues = (a, b) => {
7
+ for (var prop in b || (b = {}))
8
+ if (__hasOwnProp.call(b, prop))
9
+ __defNormalProp(a, prop, b[prop]);
10
+ if (__getOwnPropSymbols)
11
+ for (var prop of __getOwnPropSymbols(b)) {
12
+ if (__propIsEnum.call(b, prop))
13
+ __defNormalProp(a, prop, b[prop]);
14
+ }
15
+ return a;
16
+ };
1
17
  /**
2
18
  * @license
3
19
  * Copyright 2025 Google LLC
@@ -10,19 +26,14 @@ var StreamingMode = /* @__PURE__ */ ((StreamingMode2) => {
10
26
  StreamingMode2["BIDI"] = "bidi";
11
27
  return StreamingMode2;
12
28
  })(StreamingMode || {});
13
- class RunConfig {
14
- constructor(params = {}) {
15
- this.speechConfig = params.speechConfig;
16
- this.responseModalities = params.responseModalities;
17
- this.saveInputBlobsAsArtifacts = params.saveInputBlobsAsArtifacts || false;
18
- this.supportCfc = params.supportCfc || false;
19
- this.streamingMode = params.streamingMode || "none" /* NONE */;
20
- this.outputAudioTranscription = params.outputAudioTranscription;
21
- this.inputAudioTranscription = params.inputAudioTranscription;
22
- this.enableAffectiveDialog = params.enableAffectiveDialog || false;
23
- this.realtimeInputConfig = params.realtimeInputConfig;
24
- this.maxLlmCalls = validateMaxLlmCalls(params.maxLlmCalls || 500);
25
- }
29
+ function createRunConfig(params = {}) {
30
+ return __spreadValues({
31
+ saveInputBlobsAsArtifacts: false,
32
+ supportCfc: false,
33
+ enableAffectiveDialog: false,
34
+ streamingMode: "none" /* NONE */,
35
+ maxLlmCalls: validateMaxLlmCalls(params.maxLlmCalls || 500)
36
+ }, params);
26
37
  }
27
38
  function validateMaxLlmCalls(value) {
28
39
  if (value > Number.MAX_SAFE_INTEGER) {
@@ -38,6 +49,6 @@ function validateMaxLlmCalls(value) {
38
49
  return value;
39
50
  }
40
51
  export {
41
- RunConfig,
42
- StreamingMode
52
+ StreamingMode,
53
+ createRunConfig
43
54
  };
@@ -11,12 +11,13 @@ import { LiveRequestQueue } from "./agents/live_request_queue.js";
11
11
  import { LlmAgent } from "./agents/llm_agent.js";
12
12
  import { LoopAgent } from "./agents/loop_agent.js";
13
13
  import { ParallelAgent } from "./agents/parallel_agent.js";
14
- import { RunConfig, StreamingMode } from "./agents/run_config.js";
14
+ import { StreamingMode } from "./agents/run_config.js";
15
15
  import { SequentialAgent } from "./agents/sequential_agent.js";
16
16
  import { InMemoryArtifactService } from "./artifacts/in_memory_artifact_service.js";
17
- import { createEvent, getFunctionCalls, getFunctionResponses, hasTrailingCodeExecutionResult, isFinalResponse } from "./events/event.js";
17
+ import { createEvent, getFunctionCalls, getFunctionResponses, hasTrailingCodeExecutionResult, isFinalResponse, stringifyContent } from "./events/event.js";
18
18
  import { InMemoryMemoryService } from "./memory/in_memory_memory_service.js";
19
19
  import { BaseLlm } from "./models/base_llm.js";
20
+ import { Gemini } from "./models/google_llm.js";
20
21
  import { BasePlugin } from "./plugins/base_plugin.js";
21
22
  import { LoggingPlugin } from "./plugins/logging_plugin.js";
22
23
  import { PluginManager } from "./plugins/plugin_manager.js";
@@ -49,6 +50,7 @@ export {
49
50
  CallbackContext,
50
51
  FunctionTool,
51
52
  GOOGLE_SEARCH,
53
+ Gemini,
52
54
  InMemoryArtifactService,
53
55
  InMemoryMemoryService,
54
56
  InMemoryPolicyEngine,
@@ -65,7 +67,6 @@ export {
65
67
  PluginManager,
66
68
  PolicyOutcome,
67
69
  REQUEST_CONFIRMATION_FUNCTION_CALL_NAME,
68
- RunConfig,
69
70
  Runner,
70
71
  SecurityPlugin,
71
72
  SequentialAgent,
@@ -81,5 +82,6 @@ export {
81
82
  hasTrailingCodeExecutionResult,
82
83
  isFinalResponse,
83
84
  setLogLevel,
85
+ stringifyContent,
84
86
  zodObjectToSchema
85
87
  };
@@ -70,6 +70,16 @@ function hasTrailingCodeExecutionResult(event) {
70
70
  }
71
71
  return false;
72
72
  }
73
+ function stringifyContent(event) {
74
+ var _a;
75
+ if (!((_a = event.content) == null ? void 0 : _a.parts)) {
76
+ return "";
77
+ }
78
+ return event.content.parts.map((part) => {
79
+ var _a2;
80
+ return (_a2 = part.text) != null ? _a2 : "";
81
+ }).join("");
82
+ }
73
83
  const ASCII_LETTERS_AND_NUMBERS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
74
84
  function createNewEventId() {
75
85
  let id = "";
@@ -86,5 +96,6 @@ export {
86
96
  getFunctionCalls,
87
97
  getFunctionResponses,
88
98
  hasTrailingCodeExecutionResult,
89
- isFinalResponse
99
+ isFinalResponse,
100
+ stringifyContent
90
101
  };