@google/adk 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/dist/cjs/agents/functions.js +23 -32
  2. package/dist/cjs/agents/invocation_context.js +1 -3
  3. package/dist/cjs/agents/llm_agent.js +7 -2
  4. package/dist/cjs/agents/run_config.js +13 -17
  5. package/dist/cjs/artifacts/gcs_artifact_service.js +140 -0
  6. package/dist/cjs/common.js +3 -2
  7. package/dist/cjs/index.js +5 -5
  8. package/dist/cjs/index.js.map +4 -4
  9. package/dist/cjs/runner/runner.js +2 -1
  10. package/dist/cjs/sessions/database_session_service.js +52 -0
  11. package/dist/esm/agents/functions.js +23 -32
  12. package/dist/esm/agents/invocation_context.js +1 -3
  13. package/dist/esm/agents/llm_agent.js +7 -2
  14. package/dist/esm/agents/run_config.js +11 -15
  15. package/dist/esm/artifacts/gcs_artifact_service.js +110 -0
  16. package/dist/esm/common.js +3 -2
  17. package/dist/esm/index.js +5 -5
  18. package/dist/esm/index.js.map +4 -4
  19. package/dist/esm/runner/runner.js +3 -2
  20. package/dist/esm/sessions/database_session_service.js +22 -0
  21. package/dist/types/agents/run_config.d.ts +19 -7
  22. package/dist/types/artifacts/gcs_artifact_service.d.ts +16 -0
  23. package/dist/types/common.d.ts +4 -1
  24. package/dist/types/index.d.ts +1 -0
  25. package/dist/types/models/base_llm.d.ts +7 -4
  26. package/dist/types/models/google_llm.d.ts +1 -1
  27. package/dist/types/models/registry.d.ts +6 -2
  28. package/dist/types/sessions/database_session_service.d.ts +10 -0
  29. package/dist/web/agents/functions.js +23 -32
  30. package/dist/web/agents/invocation_context.js +1 -3
  31. package/dist/web/agents/llm_agent.js +7 -2
  32. package/dist/web/agents/run_config.js +26 -15
  33. package/dist/web/artifacts/gcs_artifact_service.js +126 -0
  34. package/dist/web/common.js +3 -2
  35. package/dist/web/index.js +1 -1
  36. package/dist/web/index.js.map +4 -4
  37. package/dist/web/runner/runner.js +3 -2
  38. package/dist/web/sessions/database_session_service.js +22 -0
  39. package/package.json +2 -1
@@ -7,7 +7,7 @@ import { createPartFromText } from "@google/genai";
7
7
  import { trace } from "@opentelemetry/api";
8
8
  import { InvocationContext, newInvocationContextId } from "../agents/invocation_context.js";
9
9
  import { LlmAgent } from "../agents/llm_agent.js";
10
- import { RunConfig } from "../agents/run_config.js";
10
+ import { createRunConfig } from "../agents/run_config.js";
11
11
  import { createEvent, getFunctionCalls } from "../events/event.js";
12
12
  import { createEventActions } from "../events/event_actions.js";
13
13
  import { PluginManager } from "../plugins/plugin_manager.js";
@@ -40,9 +40,10 @@ class Runner {
40
40
  sessionId,
41
41
  newMessage,
42
42
  stateDelta,
43
- runConfig = new RunConfig()
43
+ runConfig
44
44
  }) {
45
45
  var _a;
46
+ runConfig = createRunConfig(runConfig);
46
47
  const span = trace.getTracer("gcp.vertex.agent").startSpan("invocation");
47
48
  try {
48
49
  const session = await this.sessionService.getSession({ appName: this.appName, userId, sessionId });
@@ -0,0 +1,22 @@
1
+ import { BaseSessionService } from "./base_session_service.js";
2
+ class DatabaseSessionService extends BaseSessionService {
3
+ constructor(dbUrl) {
4
+ super();
5
+ this.dbUrl = dbUrl;
6
+ }
7
+ createSession(request) {
8
+ throw new Error("Method not implemented.");
9
+ }
10
+ getSession(request) {
11
+ throw new Error("Method not implemented.");
12
+ }
13
+ listSessions(request) {
14
+ throw new Error("Method not implemented.");
15
+ }
16
+ deleteSession(request) {
17
+ throw new Error("Method not implemented.");
18
+ }
19
+ }
20
+ export {
21
+ DatabaseSessionService
22
+ };
@@ -15,7 +15,7 @@ export declare enum StreamingMode {
15
15
  /**
16
16
  * Configs for runtime behavior of agents.
17
17
  */
18
- export declare class RunConfig {
18
+ export interface RunConfig {
19
19
  /**
20
20
  * Speech configuration for the live agent.
21
21
  */
@@ -27,7 +27,7 @@ export declare class RunConfig {
27
27
  /**
28
28
  * Whether or not to save the input blobs as artifacts.
29
29
  */
30
- saveInputBlobsAsArtifacts: boolean;
30
+ saveInputBlobsAsArtifacts?: boolean;
31
31
  /**
32
32
  * Whether to support CFC (Compositional Function Calling). Only applicable
33
33
  * for StreamingMode.SSE. If it's true. the LIVE API will be invoked. Since
@@ -36,11 +36,11 @@ export declare class RunConfig {
36
36
  * WARNING: This feature is **experimental** and its API or behavior may
37
37
  * change in future releases.
38
38
  */
39
- supportCfc: boolean;
39
+ supportCfc?: boolean;
40
40
  /**
41
41
  * Streaming mode, None or StreamingMode.SSE or StreamingMode.BIDI.
42
42
  */
43
- streamingMode: StreamingMode;
43
+ streamingMode?: StreamingMode;
44
44
  /**
45
45
  * Output audio transcription config.
46
46
  */
@@ -53,7 +53,7 @@ export declare class RunConfig {
53
53
  * If enabled, the model will detect emotions and adapt its responses
54
54
  * accordingly.
55
55
  */
56
- enableAffectiveDialog: boolean;
56
+ enableAffectiveDialog?: boolean;
57
57
  /**
58
58
  * Configures the proactivity of the model. This allows the model to respond
59
59
  * proactively to the input and to ignore irrelevant input.
@@ -71,6 +71,18 @@ export declare class RunConfig {
71
71
  * calls is enforced, if the value is set in this range.
72
72
  * - Less than or equal to 0: This allows for unbounded number of llm calls.
73
73
  */
74
- maxLlmCalls: number;
75
- constructor(params?: Partial<RunConfig>);
74
+ maxLlmCalls?: number;
76
75
  }
76
+ export declare function createRunConfig(params?: Partial<RunConfig>): {
77
+ speechConfig?: SpeechConfig | undefined;
78
+ responseModalities?: Modality[] | undefined;
79
+ saveInputBlobsAsArtifacts: boolean;
80
+ supportCfc: boolean;
81
+ streamingMode: StreamingMode;
82
+ outputAudioTranscription?: AudioTranscriptionConfig | undefined;
83
+ inputAudioTranscription?: AudioTranscriptionConfig | undefined;
84
+ enableAffectiveDialog: boolean;
85
+ proactivity?: ProactivityConfig | undefined;
86
+ realtimeInputConfig?: RealtimeInputConfig | undefined;
87
+ maxLlmCalls: number;
88
+ };
@@ -0,0 +1,16 @@
1
+ /**
2
+ * @license
3
+ * Copyright 2025 Google LLC
4
+ * SPDX-License-Identifier: Apache-2.0
5
+ */
6
+ import { Part } from '@google/genai';
7
+ import { BaseArtifactService, DeleteArtifactRequest, ListArtifactKeysRequest, ListVersionsRequest, LoadArtifactRequest, SaveArtifactRequest } from './base_artifact_service.js';
8
+ export declare class GcsArtifactService implements BaseArtifactService {
9
+ private readonly bucket;
10
+ constructor(bucket: string);
11
+ saveArtifact(request: SaveArtifactRequest): Promise<number>;
12
+ loadArtifact(request: LoadArtifactRequest): Promise<Part | undefined>;
13
+ listArtifactKeys(request: ListArtifactKeysRequest): Promise<string[]>;
14
+ deleteArtifact(request: DeleteArtifactRequest): Promise<void>;
15
+ listVersions(request: ListVersionsRequest): Promise<number[]>;
16
+ }
@@ -13,13 +13,15 @@ export { LlmAgent } from './agents/llm_agent.js';
13
13
  export type { AfterModelCallback, AfterToolCallback, BeforeModelCallback, BeforeToolCallback, SingleAfterModelCallback, SingleAfterToolCallback, SingleBeforeModelCallback, SingleBeforeToolCallback } from './agents/llm_agent.js';
14
14
  export { LoopAgent } from './agents/loop_agent.js';
15
15
  export { ParallelAgent } from './agents/parallel_agent.js';
16
- export { RunConfig, StreamingMode } from './agents/run_config.js';
16
+ export type { RunConfig } from './agents/run_config.js';
17
+ export { StreamingMode } from './agents/run_config.js';
17
18
  export { SequentialAgent } from './agents/sequential_agent.js';
18
19
  export { InMemoryArtifactService } from './artifacts/in_memory_artifact_service.js';
19
20
  export type { BaseCredentialService } from './auth/credential_service/base_credential_service.js';
20
21
  export { createEvent, getFunctionCalls, getFunctionResponses, hasTrailingCodeExecutionResult, isFinalResponse, stringifyContent } from './events/event.js';
21
22
  export type { Event } from './events/event.js';
22
23
  export type { EventActions } from './events/event_actions.js';
24
+ export { createEventActions } from './events/event_actions.js';
23
25
  export { InMemoryMemoryService } from './memory/in_memory_memory_service.js';
24
26
  export { BaseLlm } from './models/base_llm.js';
25
27
  export type { BaseLlmConnection } from './models/base_llm_connection.js';
@@ -27,6 +29,7 @@ export { Gemini } from './models/google_llm.js';
27
29
  export type { GeminiParams } from './models/google_llm.js';
28
30
  export type { LlmRequest } from './models/llm_request.js';
29
31
  export type { LlmResponse } from './models/llm_response.js';
32
+ export { LLMRegistry } from './models/registry.js';
30
33
  export { BasePlugin } from './plugins/base_plugin.js';
31
34
  export { LoggingPlugin } from './plugins/logging_plugin.js';
32
35
  export { PluginManager } from './plugins/plugin_manager.js';
@@ -7,3 +7,4 @@ export * from './common.js';
7
7
  export * from './tools/mcp/mcp_session_manager.js';
8
8
  export * from './tools/mcp/mcp_tool.js';
9
9
  export * from './tools/mcp/mcp_toolset.js';
10
+ export * from './artifacts/gcs_artifact_service.js';
@@ -13,11 +13,13 @@ export declare abstract class BaseLlm {
13
13
  readonly model: string;
14
14
  /**
15
15
  * Creates an instance of BaseLLM.
16
- *
17
- * @param model The name of the LLM, e.g. gemini-1.5-flash or
16
+ * @param params The parameters for creating a BaseLlm instance.
17
+ * @param params.model The name of the LLM, e.g. gemini-1.5-flash or
18
18
  * gemini-1.5-flash-001.
19
19
  */
20
- constructor(model: string);
20
+ constructor({ model }: {
21
+ model: string;
22
+ });
21
23
  /**
22
24
  * List of supported models in regex for LlmRegistry.
23
25
  */
@@ -26,10 +28,11 @@ export declare abstract class BaseLlm {
26
28
  * Generates one content from the given contents and tools.
27
29
  *
28
30
  * @param llmRequest LlmRequest, the request to send to the LLM.
31
+ * @param stream whether to do streaming call.
29
32
  * For non-streaming call, it will only yield one Content.
30
33
  * @return A generator of LlmResponse.
31
34
  */
32
- abstract generateContentAsync(llmRequest: LlmRequest): AsyncGenerator<LlmResponse, void>;
35
+ abstract generateContentAsync(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void>;
33
36
  /**
34
37
  * Creates a live connection to the LLM.
35
38
  *
@@ -52,7 +52,7 @@ export declare class Gemini extends BaseLlm {
52
52
  /**
53
53
  * @param params The parameters for creating a Gemini instance.
54
54
  */
55
- constructor({ model, apiKey, vertexai, project, location, headers }?: GeminiParams);
55
+ constructor({ model, apiKey, vertexai, project, location, headers, }: GeminiParams);
56
56
  /**
57
57
  * A list of model name patterns that are supported by this LLM.
58
58
  *
@@ -8,7 +8,9 @@ import { BaseLlm } from './base_llm.js';
8
8
  * type[BaseLlm] equivalent in TypeScript, represents a class that can be new-ed
9
9
  * to create a BaseLlm instance.
10
10
  */
11
- export type BaseLlmType = (new (model: string) => BaseLlm) & {
11
+ export type BaseLlmType = (new (params: {
12
+ model: string;
13
+ }) => BaseLlm) & {
12
14
  readonly supportedModels: Array<string | RegExp>;
13
15
  };
14
16
  /**
@@ -32,7 +34,9 @@ export declare class LLMRegistry {
32
34
  * Registers a new LLM class.
33
35
  * @param llmCls The class that implements the model.
34
36
  */
35
- static register<T extends BaseLlm>(llmCls: (new (model: string) => T) & {
37
+ static register<T extends BaseLlm>(llmCls: (new (params: {
38
+ model: string;
39
+ }) => T) & {
36
40
  readonly supportedModels: Array<string | RegExp>;
37
41
  }): void;
38
42
  /**
@@ -0,0 +1,10 @@
1
+ import { BaseSessionService, CreateSessionRequest, DeleteSessionRequest, GetSessionRequest, ListSessionsRequest, ListSessionsResponse } from './base_session_service.js';
2
+ import { Session } from './session.js';
3
+ export declare class DatabaseSessionService extends BaseSessionService {
4
+ private readonly dbUrl;
5
+ constructor(dbUrl: string);
6
+ createSession(request: CreateSessionRequest): Promise<Session>;
7
+ getSession(request: GetSessionRequest): Promise<Session | undefined>;
8
+ listSessions(request: ListSessionsRequest): Promise<ListSessionsResponse>;
9
+ deleteSession(request: DeleteSessionRequest): Promise<void>;
10
+ }
@@ -3,6 +3,7 @@
3
3
  * Copyright 2025 Google LLC
4
4
  * SPDX-License-Identifier: Apache-2.0
5
5
  */
6
+ import { createUserContent } from "@google/genai";
6
7
  import { createEvent, getFunctionCalls } from "../events/event.js";
7
8
  import { mergeEventActions } from "../events/event_actions.js";
8
9
  import { ToolContext } from "../tools/tool_context.js";
@@ -126,30 +127,6 @@ async function callToolAsync(tool, args, toolContext) {
126
127
  logger.debug("callToolAsync ".concat(tool.name));
127
128
  return await tool.runAsync({ args, toolContext });
128
129
  }
129
- function buildResponseEvent(tool, functionResult, toolContext, invocationContext) {
130
- let responseResult = functionResult;
131
- if (typeof functionResult !== "object" || functionResult == null) {
132
- responseResult = { result: functionResult };
133
- }
134
- const partFunctionResponse = {
135
- functionResponse: {
136
- name: tool.name,
137
- response: responseResult,
138
- id: toolContext.functionCallId
139
- }
140
- };
141
- const content = {
142
- role: "user",
143
- parts: [partFunctionResponse]
144
- };
145
- return createEvent({
146
- invocationId: invocationContext.invocationId,
147
- author: invocationContext.agent.name,
148
- content,
149
- actions: toolContext.actions,
150
- branch: invocationContext.branch
151
- });
152
- }
153
130
  async function handleFunctionCallsAsync({
154
131
  invocationContext,
155
132
  functionCallEvent,
@@ -200,6 +177,7 @@ async function handleFunctionCallList({
200
177
  logger.debug("execute_tool ".concat(tool.name));
201
178
  const functionArgs = (_a = functionCall.args) != null ? _a : {};
202
179
  let functionResponse = null;
180
+ let functionResponseError;
203
181
  functionResponse = await invocationContext.pluginManager.runBeforeToolCallback({
204
182
  tool,
205
183
  toolArgs: functionArgs,
@@ -236,10 +214,11 @@ async function handleFunctionCallList({
236
214
  );
237
215
  if (onToolErrorResponse) {
238
216
  functionResponse = onToolErrorResponse;
217
+ } else {
218
+ functionResponseError = e.message;
239
219
  }
240
220
  } else {
241
- logger.error("Unknown error on tool execution type", e);
242
- throw e;
221
+ functionResponseError = e;
243
222
  }
244
223
  }
245
224
  }
@@ -268,12 +247,24 @@ async function handleFunctionCallList({
268
247
  if (tool.isLongRunning && !functionResponse) {
269
248
  continue;
270
249
  }
271
- const functionResponseEvent = buildResponseEvent(
272
- tool,
273
- functionResponse,
274
- toolContext,
275
- invocationContext
276
- );
250
+ if (functionResponseError) {
251
+ functionResponse = { error: functionResponseError };
252
+ } else if (typeof functionResponse !== "object" || functionResponse == null) {
253
+ functionResponse = { result: functionResponse };
254
+ }
255
+ const functionResponseEvent = createEvent({
256
+ invocationId: invocationContext.invocationId,
257
+ author: invocationContext.agent.name,
258
+ content: createUserContent({
259
+ functionResponse: {
260
+ id: toolContext.functionCallId,
261
+ name: tool.name,
262
+ response: functionResponse
263
+ }
264
+ }),
265
+ actions: toolContext.actions,
266
+ branch: invocationContext.branch
267
+ });
277
268
  logger.debug("traceToolCall", {
278
269
  tool: tool.name,
279
270
  args: functionArgs,
@@ -17,9 +17,7 @@ class InvocationCostManager {
17
17
  incrementAndEnforceLlmCallsLimit(runConfig) {
18
18
  this.numberOfLlmCalls++;
19
19
  if (runConfig && runConfig.maxLlmCalls > 0 && this.numberOfLlmCalls > runConfig.maxLlmCalls) {
20
- throw new Error(
21
- "Max number of llm calls limit of ".concat(runConfig.maxLlmCalls, " exceeded")
22
- );
20
+ throw new Error("Max number of llm calls limit of ".concat(runConfig.maxLlmCalls, " exceeded"));
23
21
  }
24
22
  }
25
23
  }
@@ -53,6 +53,7 @@ import { getContents, getCurrentTurnContents } from "./content_processor_utils.j
53
53
  import { generateAuthEvent, generateRequestConfirmationEvent, getLongRunningFunctionCalls, handleFunctionCallList, handleFunctionCallsAsync, populateClientFunctionCallId, REQUEST_CONFIRMATION_FUNCTION_CALL_NAME } from "./functions.js";
54
54
  import { injectSessionState } from "./instructions.js";
55
55
  import { ReadonlyContext } from "./readonly_context.js";
56
+ import { StreamingMode } from "./run_config.js";
56
57
  const ADK_AGENT_NAME_LABEL_KEY = "adk_agent_name";
57
58
  async function convertToolUnionToTools(toolUnion, context) {
58
59
  if (toolUnion instanceof BaseTool) {
@@ -831,7 +832,7 @@ class LlmAgent extends BaseAgent {
831
832
  }
832
833
  callLlmAsync(invocationContext, llmRequest, modelResponseEvent) {
833
834
  return __asyncGenerator(this, null, function* () {
834
- var _a, _b, _c, _d;
835
+ var _a, _b, _c, _d, _e;
835
836
  const beforeModelResponse = yield new __await(this.handleBeforeModelCallback(
836
837
  invocationContext,
837
838
  llmRequest,
@@ -851,7 +852,11 @@ class LlmAgent extends BaseAgent {
851
852
  throw new Error("CFC is not yet supported in callLlmAsync");
852
853
  } else {
853
854
  invocationContext.incrementLlmCallCount();
854
- const responsesGenerator = llm.generateContentAsync(llmRequest);
855
+ const responsesGenerator = llm.generateContentAsync(
856
+ llmRequest,
857
+ /* stream= */
858
+ ((_e = invocationContext.runConfig) == null ? void 0 : _e.streamingMode) === StreamingMode.SSE
859
+ );
855
860
  try {
856
861
  for (var iter = __forAwait(this.runAndHandleError(
857
862
  responsesGenerator,
@@ -1,3 +1,19 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropSymbols = Object.getOwnPropertySymbols;
3
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
4
+ var __propIsEnum = Object.prototype.propertyIsEnumerable;
5
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
6
+ var __spreadValues = (a, b) => {
7
+ for (var prop in b || (b = {}))
8
+ if (__hasOwnProp.call(b, prop))
9
+ __defNormalProp(a, prop, b[prop]);
10
+ if (__getOwnPropSymbols)
11
+ for (var prop of __getOwnPropSymbols(b)) {
12
+ if (__propIsEnum.call(b, prop))
13
+ __defNormalProp(a, prop, b[prop]);
14
+ }
15
+ return a;
16
+ };
1
17
  /**
2
18
  * @license
3
19
  * Copyright 2025 Google LLC
@@ -10,19 +26,14 @@ var StreamingMode = /* @__PURE__ */ ((StreamingMode2) => {
10
26
  StreamingMode2["BIDI"] = "bidi";
11
27
  return StreamingMode2;
12
28
  })(StreamingMode || {});
13
- class RunConfig {
14
- constructor(params = {}) {
15
- this.speechConfig = params.speechConfig;
16
- this.responseModalities = params.responseModalities;
17
- this.saveInputBlobsAsArtifacts = params.saveInputBlobsAsArtifacts || false;
18
- this.supportCfc = params.supportCfc || false;
19
- this.streamingMode = params.streamingMode || "none" /* NONE */;
20
- this.outputAudioTranscription = params.outputAudioTranscription;
21
- this.inputAudioTranscription = params.inputAudioTranscription;
22
- this.enableAffectiveDialog = params.enableAffectiveDialog || false;
23
- this.realtimeInputConfig = params.realtimeInputConfig;
24
- this.maxLlmCalls = validateMaxLlmCalls(params.maxLlmCalls || 500);
25
- }
29
+ function createRunConfig(params = {}) {
30
+ return __spreadValues({
31
+ saveInputBlobsAsArtifacts: false,
32
+ supportCfc: false,
33
+ enableAffectiveDialog: false,
34
+ streamingMode: "none" /* NONE */,
35
+ maxLlmCalls: validateMaxLlmCalls(params.maxLlmCalls || 500)
36
+ }, params);
26
37
  }
27
38
  function validateMaxLlmCalls(value) {
28
39
  if (value > Number.MAX_SAFE_INTEGER) {
@@ -38,6 +49,6 @@ function validateMaxLlmCalls(value) {
38
49
  return value;
39
50
  }
40
51
  export {
41
- RunConfig,
42
- StreamingMode
52
+ StreamingMode,
53
+ createRunConfig
43
54
  };
@@ -0,0 +1,126 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __defProps = Object.defineProperties;
3
+ var __getOwnPropDescs = Object.getOwnPropertyDescriptors;
4
+ var __getOwnPropSymbols = Object.getOwnPropertySymbols;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __propIsEnum = Object.prototype.propertyIsEnumerable;
7
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
8
+ var __spreadValues = (a, b) => {
9
+ for (var prop in b || (b = {}))
10
+ if (__hasOwnProp.call(b, prop))
11
+ __defNormalProp(a, prop, b[prop]);
12
+ if (__getOwnPropSymbols)
13
+ for (var prop of __getOwnPropSymbols(b)) {
14
+ if (__propIsEnum.call(b, prop))
15
+ __defNormalProp(a, prop, b[prop]);
16
+ }
17
+ return a;
18
+ };
19
+ var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b));
20
+ /**
21
+ * @license
22
+ * Copyright 2025 Google LLC
23
+ * SPDX-License-Identifier: Apache-2.0
24
+ */
25
+ import { Storage } from "@google-cloud/storage";
26
+ import { createPartFromBase64, createPartFromText } from "@google/genai";
27
+ class GcsArtifactService {
28
+ constructor(bucket) {
29
+ this.bucket = new Storage().bucket(bucket);
30
+ }
31
+ async saveArtifact(request) {
32
+ const versions = await this.listVersions(request);
33
+ const version = versions.length > 0 ? Math.max(...versions) + 1 : 0;
34
+ const file = this.bucket.file(getFileName(__spreadProps(__spreadValues({}, request), {
35
+ version
36
+ })));
37
+ if (request.artifact.inlineData) {
38
+ await file.save(JSON.stringify(request.artifact.inlineData.data), {
39
+ contentType: request.artifact.inlineData.mimeType
40
+ });
41
+ return version;
42
+ }
43
+ if (request.artifact.text) {
44
+ await file.save(request.artifact.text, {
45
+ contentType: "text/plain"
46
+ });
47
+ return version;
48
+ }
49
+ throw new Error("Artifact must have either inlineData or text.");
50
+ }
51
+ async loadArtifact(request) {
52
+ let version = request.version;
53
+ if (version === void 0) {
54
+ const versions = await this.listVersions(request);
55
+ if (versions.length === 0) {
56
+ return void 0;
57
+ }
58
+ version = Math.max(...versions);
59
+ }
60
+ const file = this.bucket.file(getFileName(__spreadProps(__spreadValues({}, request), {
61
+ version
62
+ })));
63
+ const [[metadata], [rawDataBuffer]] = await Promise.all([file.getMetadata(), file.download()]);
64
+ if (metadata.contentType === "text/plain") {
65
+ return createPartFromText(rawDataBuffer.toString("utf-8"));
66
+ }
67
+ return createPartFromBase64(
68
+ rawDataBuffer.toString("base64"),
69
+ metadata.contentType
70
+ );
71
+ }
72
+ async listArtifactKeys(request) {
73
+ const fileNames = [];
74
+ const sessionPrefix = "".concat(request.appName, "/").concat(request.userId, "/").concat(request.sessionId, "/");
75
+ const usernamePrefix = "".concat(request.appName, "/").concat(request.userId, "/user/");
76
+ const [
77
+ [sessionFiles],
78
+ [userSessionFiles]
79
+ ] = await Promise.all([
80
+ this.bucket.getFiles({ prefix: sessionPrefix }),
81
+ this.bucket.getFiles({ prefix: usernamePrefix })
82
+ ]);
83
+ for (const file of sessionFiles) {
84
+ fileNames.push(file.name.split("/").pop());
85
+ }
86
+ for (const file of userSessionFiles) {
87
+ fileNames.push(file.name.split("/").pop());
88
+ }
89
+ return fileNames.sort((a, b) => a.localeCompare(b));
90
+ }
91
+ async deleteArtifact(request) {
92
+ const versions = await this.listVersions(request);
93
+ await Promise.all(versions.map((version) => {
94
+ const file = this.bucket.file(getFileName(__spreadProps(__spreadValues({}, request), {
95
+ version
96
+ })));
97
+ return file.delete();
98
+ }));
99
+ return;
100
+ }
101
+ async listVersions(request) {
102
+ const prefix = getFileName(request);
103
+ const [files] = await this.bucket.getFiles({ prefix });
104
+ const versions = [];
105
+ for (const file of files) {
106
+ const version = file.name.split("/").pop();
107
+ versions.push(parseInt(version, 10));
108
+ }
109
+ return versions;
110
+ }
111
+ }
112
+ function getFileName({
113
+ appName,
114
+ userId,
115
+ sessionId,
116
+ filename,
117
+ version
118
+ }) {
119
+ if (filename.startsWith("user:")) {
120
+ return "".concat(appName, "/").concat(userId, "/user/").concat(filename, "/").concat(version);
121
+ }
122
+ return "".concat(appName, "/").concat(userId, "/").concat(sessionId, "/").concat(filename, "/").concat(version);
123
+ }
124
+ export {
125
+ GcsArtifactService
126
+ };
@@ -11,10 +11,11 @@ import { LiveRequestQueue } from "./agents/live_request_queue.js";
11
11
  import { LlmAgent } from "./agents/llm_agent.js";
12
12
  import { LoopAgent } from "./agents/loop_agent.js";
13
13
  import { ParallelAgent } from "./agents/parallel_agent.js";
14
- import { RunConfig, StreamingMode } from "./agents/run_config.js";
14
+ import { StreamingMode } from "./agents/run_config.js";
15
15
  import { SequentialAgent } from "./agents/sequential_agent.js";
16
16
  import { InMemoryArtifactService } from "./artifacts/in_memory_artifact_service.js";
17
17
  import { createEvent, getFunctionCalls, getFunctionResponses, hasTrailingCodeExecutionResult, isFinalResponse, stringifyContent } from "./events/event.js";
18
+ import { createEventActions } from "./events/event_actions.js";
18
19
  import { InMemoryMemoryService } from "./memory/in_memory_memory_service.js";
19
20
  import { BaseLlm } from "./models/base_llm.js";
20
21
  import { Gemini } from "./models/google_llm.js";
@@ -67,7 +68,6 @@ export {
67
68
  PluginManager,
68
69
  PolicyOutcome,
69
70
  REQUEST_CONFIRMATION_FUNCTION_CALL_NAME,
70
- RunConfig,
71
71
  Runner,
72
72
  SecurityPlugin,
73
73
  SequentialAgent,
@@ -75,6 +75,7 @@ export {
75
75
  ToolConfirmation,
76
76
  ToolContext,
77
77
  createEvent,
78
+ createEventActions,
78
79
  createSession,
79
80
  functionsExportedForTestingOnly,
80
81
  getAskUserConfirmationFunctionCalls,