@google/adk 0.1.2 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/agents/content_processor_utils.js +20 -8
- package/dist/cjs/agents/functions.js +23 -32
- package/dist/cjs/agents/llm_agent.js +1 -0
- package/dist/cjs/artifacts/gcs_artifact_service.js +140 -0
- package/dist/cjs/common.js +12 -0
- package/dist/cjs/index.js +5 -5
- package/dist/cjs/index.js.map +4 -4
- package/dist/cjs/models/base_llm.js +12 -3
- package/dist/cjs/models/google_llm.js +6 -20
- package/dist/cjs/models/registry.js +1 -1
- package/dist/cjs/telemetry/google_cloud.js +85 -0
- package/dist/cjs/telemetry/setup.js +97 -0
- package/dist/cjs/telemetry/tracing.js +231 -0
- package/dist/cjs/utils/client_labels.js +56 -0
- package/dist/cjs/version.js +1 -1
- package/dist/esm/agents/content_processor_utils.js +20 -8
- package/dist/esm/agents/functions.js +23 -32
- package/dist/esm/agents/llm_agent.js +1 -0
- package/dist/esm/artifacts/gcs_artifact_service.js +110 -0
- package/dist/esm/common.js +8 -0
- package/dist/esm/index.js +5 -5
- package/dist/esm/index.js.map +4 -4
- package/dist/esm/models/base_llm.js +12 -3
- package/dist/esm/models/google_llm.js +6 -20
- package/dist/esm/models/registry.js +1 -1
- package/dist/esm/telemetry/google_cloud.js +54 -0
- package/dist/esm/telemetry/setup.js +67 -0
- package/dist/esm/telemetry/tracing.js +195 -0
- package/dist/esm/utils/client_labels.js +26 -0
- package/dist/esm/version.js +1 -1
- package/dist/types/artifacts/gcs_artifact_service.d.ts +16 -0
- package/dist/types/common.d.ts +4 -0
- package/dist/types/index.d.ts +3 -0
- package/dist/types/models/base_llm.d.ts +6 -3
- package/dist/types/models/google_llm.d.ts +1 -2
- package/dist/types/models/registry.d.ts +6 -2
- package/dist/types/telemetry/google_cloud.d.ts +9 -0
- package/dist/types/telemetry/setup.d.ts +48 -0
- package/dist/types/telemetry/tracing.d.ts +111 -0
- package/dist/types/utils/client_labels.d.ts +9 -0
- package/dist/types/version.d.ts +1 -1
- package/dist/web/agents/content_processor_utils.js +20 -8
- package/dist/web/agents/functions.js +23 -32
- package/dist/web/agents/llm_agent.js +1 -0
- package/dist/web/artifacts/gcs_artifact_service.js +126 -0
- package/dist/web/common.js +8 -0
- package/dist/web/index.js +1 -1
- package/dist/web/index.js.map +4 -4
- package/dist/web/models/base_llm.js +12 -3
- package/dist/web/models/google_llm.js +6 -20
- package/dist/web/models/registry.js +1 -1
- package/dist/web/telemetry/google_cloud.js +54 -0
- package/dist/web/telemetry/setup.js +67 -0
- package/dist/web/telemetry/tracing.js +210 -0
- package/dist/web/utils/client_labels.js +26 -0
- package/dist/web/version.js +1 -1
- package/package.json +20 -4
|
@@ -3,16 +3,25 @@
|
|
|
3
3
|
* Copyright 2025 Google LLC
|
|
4
4
|
* SPDX-License-Identifier: Apache-2.0
|
|
5
5
|
*/
|
|
6
|
+
import { getClientLabels } from "../utils/client_labels.js";
|
|
6
7
|
class BaseLlm {
|
|
7
8
|
/**
|
|
8
9
|
* Creates an instance of BaseLLM.
|
|
9
|
-
*
|
|
10
|
-
* @param model The name of the LLM, e.g. gemini-1.5-flash or
|
|
10
|
+
* @param params The parameters for creating a BaseLlm instance.
|
|
11
|
+
* @param params.model The name of the LLM, e.g. gemini-1.5-flash or
|
|
11
12
|
* gemini-1.5-flash-001.
|
|
12
13
|
*/
|
|
13
|
-
constructor(model) {
|
|
14
|
+
constructor({ model }) {
|
|
14
15
|
this.model = model;
|
|
15
16
|
}
|
|
17
|
+
get trackingHeaders() {
|
|
18
|
+
const labels = getClientLabels();
|
|
19
|
+
const headerValue = labels.join(" ");
|
|
20
|
+
return {
|
|
21
|
+
"x-goog-api-client": headerValue,
|
|
22
|
+
"user-agent": headerValue
|
|
23
|
+
};
|
|
24
|
+
}
|
|
16
25
|
/**
|
|
17
26
|
* Appends a user content, so that model can continue to output.
|
|
18
27
|
*
|
|
@@ -4,10 +4,8 @@
|
|
|
4
4
|
* SPDX-License-Identifier: Apache-2.0
|
|
5
5
|
*/
|
|
6
6
|
import { createPartFromText, FinishReason, GoogleGenAI } from "@google/genai";
|
|
7
|
-
import { isBrowser } from "../utils/env_aware_utils.js";
|
|
8
7
|
import { logger } from "../utils/logger.js";
|
|
9
8
|
import { GoogleLLMVariant } from "../utils/variant_utils.js";
|
|
10
|
-
import { version } from "../version.js";
|
|
11
9
|
import { BaseLlm } from "./base_llm.js";
|
|
12
10
|
import { GeminiLlmConnection } from "./gemini_llm_connection.js";
|
|
13
11
|
import { createLlmResponse } from "./llm_response.js";
|
|
@@ -18,14 +16,17 @@ class Gemini extends BaseLlm {
|
|
|
18
16
|
* @param params The parameters for creating a Gemini instance.
|
|
19
17
|
*/
|
|
20
18
|
constructor({
|
|
21
|
-
model
|
|
19
|
+
model,
|
|
22
20
|
apiKey,
|
|
23
21
|
vertexai,
|
|
24
22
|
project,
|
|
25
23
|
location,
|
|
26
24
|
headers
|
|
27
|
-
}
|
|
28
|
-
|
|
25
|
+
}) {
|
|
26
|
+
if (!model) {
|
|
27
|
+
model = "gemini-2.5-flash";
|
|
28
|
+
}
|
|
29
|
+
super({ model });
|
|
29
30
|
this.project = project;
|
|
30
31
|
this.location = location;
|
|
31
32
|
this.apiKey = apiKey;
|
|
@@ -182,21 +183,6 @@ class Gemini extends BaseLlm {
|
|
|
182
183
|
}
|
|
183
184
|
return this._apiBackend;
|
|
184
185
|
}
|
|
185
|
-
get trackingHeaders() {
|
|
186
|
-
if (!this._trackingHeaders) {
|
|
187
|
-
let frameworkLabel = `google-adk/${version}`;
|
|
188
|
-
if (!isBrowser() && process.env[AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME]) {
|
|
189
|
-
frameworkLabel = `${frameworkLabel}+${AGENT_ENGINE_TELEMETRY_TAG}`;
|
|
190
|
-
}
|
|
191
|
-
const languageLabel = `gl-typescript/${isBrowser() ? window.navigator.userAgent : process.version}`;
|
|
192
|
-
const versionHeaderValue = `${frameworkLabel} ${languageLabel}`;
|
|
193
|
-
this._trackingHeaders = {
|
|
194
|
-
"x-goog-api-client": versionHeaderValue,
|
|
195
|
-
"user-agent": versionHeaderValue
|
|
196
|
-
};
|
|
197
|
-
}
|
|
198
|
-
return this._trackingHeaders;
|
|
199
|
-
}
|
|
200
186
|
get liveApiVersion() {
|
|
201
187
|
if (!this._liveApiVersion) {
|
|
202
188
|
this._liveApiVersion = this.apiBackend === GoogleLLMVariant.VERTEX_AI ? "v1beta1" : "v1alpha";
|
|
@@ -35,7 +35,7 @@ const _LLMRegistry = class _LLMRegistry {
|
|
|
35
35
|
* @returns The LLM instance.
|
|
36
36
|
*/
|
|
37
37
|
static newLlm(model) {
|
|
38
|
-
return new (_LLMRegistry.resolve(model))(model);
|
|
38
|
+
return new (_LLMRegistry.resolve(model))({ model });
|
|
39
39
|
}
|
|
40
40
|
static _register(modelNameRegex, llmCls) {
|
|
41
41
|
if (_LLMRegistry.llmRegistryDict.has(modelNameRegex)) {
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { GoogleAuth } from "google-auth-library";
|
|
7
|
+
import { PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
|
|
8
|
+
import { detectResources } from "@opentelemetry/resources";
|
|
9
|
+
import { gcpDetector } from "@opentelemetry/resource-detector-gcp";
|
|
10
|
+
import { TraceExporter } from "@google-cloud/opentelemetry-cloud-trace-exporter";
|
|
11
|
+
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
|
|
12
|
+
import { MetricExporter } from "@google-cloud/opentelemetry-cloud-monitoring-exporter";
|
|
13
|
+
import { logger } from "../utils/logger.js";
|
|
14
|
+
const GCP_PROJECT_ERROR_MESSAGE = "Cannot determine GCP Project. OTel GCP Exporters cannot be set up. Please make sure to log into correct GCP Project.";
|
|
15
|
+
async function getGcpProjectId() {
|
|
16
|
+
try {
|
|
17
|
+
const auth = new GoogleAuth();
|
|
18
|
+
const projectId = await auth.getProjectId();
|
|
19
|
+
return projectId || void 0;
|
|
20
|
+
} catch (error) {
|
|
21
|
+
return void 0;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
async function getGcpExporters(config = {}) {
|
|
25
|
+
const {
|
|
26
|
+
enableTracing = false,
|
|
27
|
+
enableMetrics = false
|
|
28
|
+
// enableCloudLogging = false,
|
|
29
|
+
} = config;
|
|
30
|
+
const projectId = await getGcpProjectId();
|
|
31
|
+
if (!projectId) {
|
|
32
|
+
logger.warn(GCP_PROJECT_ERROR_MESSAGE);
|
|
33
|
+
return {};
|
|
34
|
+
}
|
|
35
|
+
return {
|
|
36
|
+
spanProcessors: enableTracing ? [
|
|
37
|
+
new BatchSpanProcessor(new TraceExporter({ projectId }))
|
|
38
|
+
] : [],
|
|
39
|
+
metricReaders: enableMetrics ? [
|
|
40
|
+
new PeriodicExportingMetricReader({
|
|
41
|
+
exporter: new MetricExporter({ projectId }),
|
|
42
|
+
exportIntervalMillis: 5e3
|
|
43
|
+
})
|
|
44
|
+
] : [],
|
|
45
|
+
logRecordProcessors: []
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
function getGcpResource() {
|
|
49
|
+
return detectResources({ detectors: [gcpDetector] });
|
|
50
|
+
}
|
|
51
|
+
export {
|
|
52
|
+
getGcpExporters,
|
|
53
|
+
getGcpResource
|
|
54
|
+
};
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { trace, metrics } from "@opentelemetry/api";
|
|
7
|
+
import { logs } from "@opentelemetry/api-logs";
|
|
8
|
+
import { LoggerProvider, BatchLogRecordProcessor } from "@opentelemetry/sdk-logs";
|
|
9
|
+
import { MeterProvider, PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
|
|
10
|
+
import { detectResources } from "@opentelemetry/resources";
|
|
11
|
+
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
|
|
12
|
+
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
|
|
13
|
+
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
|
|
14
|
+
import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
|
|
15
|
+
import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http";
|
|
16
|
+
function maybeSetOtelProviders(otelHooksToSetup = [], otelResource) {
|
|
17
|
+
const resource = otelResource || getOtelResource();
|
|
18
|
+
const allHooks = [...otelHooksToSetup, getOtelExporters()];
|
|
19
|
+
const spanProcessors = allHooks.flatMap((hooks) => hooks.spanProcessors || []);
|
|
20
|
+
const metricReaders = allHooks.flatMap((hooks) => hooks.metricReaders || []);
|
|
21
|
+
const logRecordProcessors = allHooks.flatMap((hooks) => hooks.logRecordProcessors || []);
|
|
22
|
+
if (spanProcessors.length > 0) {
|
|
23
|
+
const tracerProvider = new NodeTracerProvider({
|
|
24
|
+
resource,
|
|
25
|
+
spanProcessors
|
|
26
|
+
});
|
|
27
|
+
tracerProvider.register();
|
|
28
|
+
trace.setGlobalTracerProvider(tracerProvider);
|
|
29
|
+
}
|
|
30
|
+
if (metricReaders.length > 0) {
|
|
31
|
+
const meterProvider = new MeterProvider({
|
|
32
|
+
readers: metricReaders,
|
|
33
|
+
resource
|
|
34
|
+
});
|
|
35
|
+
metrics.setGlobalMeterProvider(meterProvider);
|
|
36
|
+
}
|
|
37
|
+
if (logRecordProcessors.length > 0) {
|
|
38
|
+
const loggerProvider = new LoggerProvider({
|
|
39
|
+
resource,
|
|
40
|
+
processors: logRecordProcessors
|
|
41
|
+
});
|
|
42
|
+
logs.setGlobalLoggerProvider(loggerProvider);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
function getOtelResource() {
|
|
46
|
+
return detectResources({
|
|
47
|
+
detectors: []
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
function getOtelExportersConfig() {
|
|
51
|
+
return {
|
|
52
|
+
enableTracing: !!(process.env.OTEL_EXPORTER_OTLP_ENDPOINT || process.env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT),
|
|
53
|
+
enableMetrics: !!(process.env.OTEL_EXPORTER_OTLP_ENDPOINT || process.env.OTEL_EXPORTER_OTLP_METRICS_ENDPOINT),
|
|
54
|
+
enableLogging: !!(process.env.OTEL_EXPORTER_OTLP_ENDPOINT || process.env.OTEL_EXPORTER_OTLP_LOGS_ENDPOINT)
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
function getOtelExporters(config = getOtelExportersConfig()) {
|
|
58
|
+
const { enableTracing, enableMetrics, enableLogging } = config;
|
|
59
|
+
return {
|
|
60
|
+
spanProcessors: enableTracing ? [new BatchSpanProcessor(new OTLPTraceExporter())] : [],
|
|
61
|
+
metricReaders: enableMetrics ? [new PeriodicExportingMetricReader({ exporter: new OTLPMetricExporter() })] : [],
|
|
62
|
+
logRecordProcessors: enableLogging ? [new BatchLogRecordProcessor(new OTLPLogExporter())] : []
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
export {
|
|
66
|
+
maybeSetOtelProviders
|
|
67
|
+
};
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { trace, context } from "@opentelemetry/api";
|
|
7
|
+
import { version } from "../version.js";
|
|
8
|
+
const GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description";
|
|
9
|
+
const GEN_AI_AGENT_NAME = "gen_ai.agent.name";
|
|
10
|
+
const GEN_AI_CONVERSATION_ID = "gen_ai.conversation.id";
|
|
11
|
+
const GEN_AI_OPERATION_NAME = "gen_ai.operation.name";
|
|
12
|
+
const GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id";
|
|
13
|
+
const GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description";
|
|
14
|
+
const GEN_AI_TOOL_NAME = "gen_ai.tool.name";
|
|
15
|
+
const GEN_AI_TOOL_TYPE = "gen_ai.tool.type";
|
|
16
|
+
const tracer = trace.getTracer(
|
|
17
|
+
"gcp.vertex.agent",
|
|
18
|
+
version
|
|
19
|
+
);
|
|
20
|
+
function safeJsonSerialize(obj) {
|
|
21
|
+
try {
|
|
22
|
+
return JSON.stringify(obj);
|
|
23
|
+
} catch (error) {
|
|
24
|
+
return "<not serializable>";
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
function traceAgentInvocation({
|
|
28
|
+
agent,
|
|
29
|
+
invocationContext
|
|
30
|
+
}) {
|
|
31
|
+
const span = trace.getActiveSpan();
|
|
32
|
+
if (!span) return;
|
|
33
|
+
span.setAttributes({
|
|
34
|
+
[GEN_AI_OPERATION_NAME]: "invoke_agent",
|
|
35
|
+
// Conditionally Required
|
|
36
|
+
[GEN_AI_AGENT_DESCRIPTION]: agent.description,
|
|
37
|
+
[GEN_AI_AGENT_NAME]: agent.name,
|
|
38
|
+
[GEN_AI_CONVERSATION_ID]: invocationContext.session.id
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
function traceToolCall({
|
|
42
|
+
tool,
|
|
43
|
+
args,
|
|
44
|
+
functionResponseEvent
|
|
45
|
+
}) {
|
|
46
|
+
var _a, _b;
|
|
47
|
+
const span = trace.getActiveSpan();
|
|
48
|
+
if (!span) return;
|
|
49
|
+
span.setAttributes({
|
|
50
|
+
[GEN_AI_OPERATION_NAME]: "execute_tool",
|
|
51
|
+
[GEN_AI_TOOL_DESCRIPTION]: tool.description || "",
|
|
52
|
+
[GEN_AI_TOOL_NAME]: tool.name,
|
|
53
|
+
// e.g. FunctionTool
|
|
54
|
+
[GEN_AI_TOOL_TYPE]: tool.constructor.name,
|
|
55
|
+
// Setting empty llm request and response (as UI expect these) while not
|
|
56
|
+
// applicable for tool_response.
|
|
57
|
+
"gcp.vertex.agent.llm_request": "{}",
|
|
58
|
+
"gcp.vertex.agent.llm_response": "{}",
|
|
59
|
+
"gcp.vertex.agent.tool_call_args": shouldAddRequestResponseToSpans() ? safeJsonSerialize(args) : "{}"
|
|
60
|
+
});
|
|
61
|
+
let toolCallId = "<not specified>";
|
|
62
|
+
let toolResponse = "<not specified>";
|
|
63
|
+
if ((_a = functionResponseEvent.content) == null ? void 0 : _a.parts) {
|
|
64
|
+
const responseParts = functionResponseEvent.content.parts;
|
|
65
|
+
const functionResponse = (_b = responseParts[0]) == null ? void 0 : _b.functionResponse;
|
|
66
|
+
if (functionResponse == null ? void 0 : functionResponse.id) {
|
|
67
|
+
toolCallId = functionResponse.id;
|
|
68
|
+
}
|
|
69
|
+
if (functionResponse == null ? void 0 : functionResponse.response) {
|
|
70
|
+
toolResponse = functionResponse.response;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
if (typeof toolResponse !== "object" || toolResponse === null) {
|
|
74
|
+
toolResponse = { result: toolResponse };
|
|
75
|
+
}
|
|
76
|
+
span.setAttributes({
|
|
77
|
+
[GEN_AI_TOOL_CALL_ID]: toolCallId,
|
|
78
|
+
"gcp.vertex.agent.event_id": functionResponseEvent.id,
|
|
79
|
+
"gcp.vertex.agent.tool_response": shouldAddRequestResponseToSpans() ? safeJsonSerialize(toolResponse) : "{}"
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
function traceMergedToolCalls({
|
|
83
|
+
responseEventId,
|
|
84
|
+
functionResponseEvent
|
|
85
|
+
}) {
|
|
86
|
+
const span = trace.getActiveSpan();
|
|
87
|
+
if (!span) return;
|
|
88
|
+
span.setAttributes({
|
|
89
|
+
[GEN_AI_OPERATION_NAME]: "execute_tool",
|
|
90
|
+
[GEN_AI_TOOL_NAME]: "(merged tools)",
|
|
91
|
+
[GEN_AI_TOOL_DESCRIPTION]: "(merged tools)",
|
|
92
|
+
[GEN_AI_TOOL_CALL_ID]: responseEventId,
|
|
93
|
+
"gcp.vertex.agent.tool_call_args": "N/A",
|
|
94
|
+
"gcp.vertex.agent.event_id": responseEventId,
|
|
95
|
+
// Setting empty llm request and response (as UI expect these) while not
|
|
96
|
+
// applicable for tool_response.
|
|
97
|
+
"gcp.vertex.agent.llm_request": "{}",
|
|
98
|
+
"gcp.vertex.agent.llm_response": "{}"
|
|
99
|
+
});
|
|
100
|
+
span.setAttribute("gcp.vertex.agent.tool_response", shouldAddRequestResponseToSpans() ? safeJsonSerialize(functionResponseEvent) : "{}");
|
|
101
|
+
}
|
|
102
|
+
function traceCallLlm({
|
|
103
|
+
invocationContext,
|
|
104
|
+
eventId,
|
|
105
|
+
llmRequest,
|
|
106
|
+
llmResponse
|
|
107
|
+
}) {
|
|
108
|
+
var _a, _b, _c;
|
|
109
|
+
const span = trace.getActiveSpan();
|
|
110
|
+
if (!span) return;
|
|
111
|
+
span.setAttributes({
|
|
112
|
+
"gen_ai.system": "gcp.vertex.agent",
|
|
113
|
+
"gen_ai.request.model": llmRequest.model,
|
|
114
|
+
"gcp.vertex.agent.invocation_id": invocationContext.invocationId,
|
|
115
|
+
"gcp.vertex.agent.session_id": invocationContext.session.id,
|
|
116
|
+
"gcp.vertex.agent.event_id": eventId,
|
|
117
|
+
// Consider removing once GenAI SDK provides a way to record this info.
|
|
118
|
+
"gcp.vertex.agent.llm_request": shouldAddRequestResponseToSpans() ? safeJsonSerialize(buildLlmRequestForTrace(llmRequest)) : "{}"
|
|
119
|
+
});
|
|
120
|
+
if ((_a = llmRequest.config) == null ? void 0 : _a.topP) {
|
|
121
|
+
span.setAttribute("gen_ai.request.top_p", llmRequest.config.topP);
|
|
122
|
+
}
|
|
123
|
+
if (((_b = llmRequest.config) == null ? void 0 : _b.maxOutputTokens) !== void 0) {
|
|
124
|
+
span.setAttribute("gen_ai.request.max_tokens", llmRequest.config.maxOutputTokens);
|
|
125
|
+
}
|
|
126
|
+
span.setAttribute("gcp.vertex.agent.llm_response", shouldAddRequestResponseToSpans() ? safeJsonSerialize(llmResponse) : "{}");
|
|
127
|
+
if (llmResponse.usageMetadata) {
|
|
128
|
+
span.setAttribute("gen_ai.usage.input_tokens", llmResponse.usageMetadata.promptTokenCount || 0);
|
|
129
|
+
}
|
|
130
|
+
if ((_c = llmResponse.usageMetadata) == null ? void 0 : _c.candidatesTokenCount) {
|
|
131
|
+
span.setAttribute("gen_ai.usage.output_tokens", llmResponse.usageMetadata.candidatesTokenCount);
|
|
132
|
+
}
|
|
133
|
+
if (llmResponse.finishReason) {
|
|
134
|
+
const finishReasonValue = typeof llmResponse.finishReason === "string" ? llmResponse.finishReason.toLowerCase() : String(llmResponse.finishReason).toLowerCase();
|
|
135
|
+
span.setAttribute("gen_ai.response.finish_reasons", [finishReasonValue]);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
function traceSendData({
|
|
139
|
+
invocationContext,
|
|
140
|
+
eventId,
|
|
141
|
+
data
|
|
142
|
+
}) {
|
|
143
|
+
const span = trace.getActiveSpan();
|
|
144
|
+
if (!span) return;
|
|
145
|
+
span.setAttributes({
|
|
146
|
+
"gcp.vertex.agent.invocation_id": invocationContext.invocationId,
|
|
147
|
+
"gcp.vertex.agent.event_id": eventId
|
|
148
|
+
});
|
|
149
|
+
span.setAttribute("gcp.vertex.agent.data", shouldAddRequestResponseToSpans() ? safeJsonSerialize(data) : "{}");
|
|
150
|
+
}
|
|
151
|
+
function buildLlmRequestForTrace(llmRequest) {
|
|
152
|
+
const result = {
|
|
153
|
+
model: llmRequest.model,
|
|
154
|
+
contents: []
|
|
155
|
+
};
|
|
156
|
+
if (llmRequest.config) {
|
|
157
|
+
const { responseSchema, ...cleanConfig } = llmRequest.config;
|
|
158
|
+
result.config = cleanConfig;
|
|
159
|
+
}
|
|
160
|
+
result.contents = llmRequest.contents.map((content) => {
|
|
161
|
+
var _a;
|
|
162
|
+
return {
|
|
163
|
+
role: content.role,
|
|
164
|
+
parts: ((_a = content.parts) == null ? void 0 : _a.filter((part) => !part.inlineData)) || []
|
|
165
|
+
};
|
|
166
|
+
});
|
|
167
|
+
return result;
|
|
168
|
+
}
|
|
169
|
+
function bindAsyncGenerator(ctx, generator) {
|
|
170
|
+
return {
|
|
171
|
+
// Bind the next() method to execute within the provided context
|
|
172
|
+
next: context.bind(ctx, generator.next.bind(generator)),
|
|
173
|
+
// Bind the return() method to execute within the provided context
|
|
174
|
+
return: context.bind(ctx, generator.return.bind(generator)),
|
|
175
|
+
// Bind the throw() method to execute within the provided context
|
|
176
|
+
throw: context.bind(ctx, generator.throw.bind(generator)),
|
|
177
|
+
// Ensure the async iterator symbol also returns a context-bound generator
|
|
178
|
+
[Symbol.asyncIterator]() {
|
|
179
|
+
return bindAsyncGenerator(ctx, generator[Symbol.asyncIterator]());
|
|
180
|
+
}
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
function shouldAddRequestResponseToSpans() {
|
|
184
|
+
const envValue = process.env.ADK_CAPTURE_MESSAGE_CONTENT_IN_SPANS || "true";
|
|
185
|
+
return envValue === "true" || envValue === "1";
|
|
186
|
+
}
|
|
187
|
+
export {
|
|
188
|
+
bindAsyncGenerator,
|
|
189
|
+
traceAgentInvocation,
|
|
190
|
+
traceCallLlm,
|
|
191
|
+
traceMergedToolCalls,
|
|
192
|
+
traceSendData,
|
|
193
|
+
traceToolCall,
|
|
194
|
+
tracer
|
|
195
|
+
};
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { version } from "../version.js";
|
|
7
|
+
import { isBrowser } from "./env_aware_utils.js";
|
|
8
|
+
const ADK_LABEL = "google-adk";
|
|
9
|
+
const LANGUAGE_LABEL = "gl-typescript";
|
|
10
|
+
const AGENT_ENGINE_TELEMETRY_TAG = "remote_reasoning_engine";
|
|
11
|
+
const AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID";
|
|
12
|
+
function _getDefaultLabels() {
|
|
13
|
+
let frameworkLabel = `${ADK_LABEL}/${version}`;
|
|
14
|
+
if (!isBrowser() && process.env[AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME]) {
|
|
15
|
+
frameworkLabel = `${frameworkLabel}+${AGENT_ENGINE_TELEMETRY_TAG}`;
|
|
16
|
+
}
|
|
17
|
+
const languageLabel = `${LANGUAGE_LABEL}/${isBrowser() ? window.navigator.userAgent : process.version}`;
|
|
18
|
+
return [frameworkLabel, languageLabel];
|
|
19
|
+
}
|
|
20
|
+
function getClientLabels() {
|
|
21
|
+
const labels = _getDefaultLabels();
|
|
22
|
+
return labels;
|
|
23
|
+
}
|
|
24
|
+
export {
|
|
25
|
+
getClientLabels
|
|
26
|
+
};
|
package/dist/esm/version.js
CHANGED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { Part } from '@google/genai';
|
|
7
|
+
import { BaseArtifactService, DeleteArtifactRequest, ListArtifactKeysRequest, ListVersionsRequest, LoadArtifactRequest, SaveArtifactRequest } from './base_artifact_service.js';
|
|
8
|
+
export declare class GcsArtifactService implements BaseArtifactService {
|
|
9
|
+
private readonly bucket;
|
|
10
|
+
constructor(bucket: string);
|
|
11
|
+
saveArtifact(request: SaveArtifactRequest): Promise<number>;
|
|
12
|
+
loadArtifact(request: LoadArtifactRequest): Promise<Part | undefined>;
|
|
13
|
+
listArtifactKeys(request: ListArtifactKeysRequest): Promise<string[]>;
|
|
14
|
+
deleteArtifact(request: DeleteArtifactRequest): Promise<void>;
|
|
15
|
+
listVersions(request: ListVersionsRequest): Promise<number[]>;
|
|
16
|
+
}
|
package/dist/types/common.d.ts
CHANGED
|
@@ -21,6 +21,7 @@ export type { BaseCredentialService } from './auth/credential_service/base_crede
|
|
|
21
21
|
export { createEvent, getFunctionCalls, getFunctionResponses, hasTrailingCodeExecutionResult, isFinalResponse, stringifyContent } from './events/event.js';
|
|
22
22
|
export type { Event } from './events/event.js';
|
|
23
23
|
export type { EventActions } from './events/event_actions.js';
|
|
24
|
+
export { createEventActions } from './events/event_actions.js';
|
|
24
25
|
export { InMemoryMemoryService } from './memory/in_memory_memory_service.js';
|
|
25
26
|
export { BaseLlm } from './models/base_llm.js';
|
|
26
27
|
export type { BaseLlmConnection } from './models/base_llm_connection.js';
|
|
@@ -28,6 +29,7 @@ export { Gemini } from './models/google_llm.js';
|
|
|
28
29
|
export type { GeminiParams } from './models/google_llm.js';
|
|
29
30
|
export type { LlmRequest } from './models/llm_request.js';
|
|
30
31
|
export type { LlmResponse } from './models/llm_response.js';
|
|
32
|
+
export { LLMRegistry } from './models/registry.js';
|
|
31
33
|
export { BasePlugin } from './plugins/base_plugin.js';
|
|
32
34
|
export { LoggingPlugin } from './plugins/logging_plugin.js';
|
|
33
35
|
export { PluginManager } from './plugins/plugin_manager.js';
|
|
@@ -38,6 +40,7 @@ export { Runner } from './runner/runner.js';
|
|
|
38
40
|
export { InMemorySessionService } from './sessions/in_memory_session_service.js';
|
|
39
41
|
export { createSession } from './sessions/session.js';
|
|
40
42
|
export type { Session } from './sessions/session.js';
|
|
43
|
+
export { State } from './sessions/state.js';
|
|
41
44
|
export { AgentTool } from './tools/agent_tool.js';
|
|
42
45
|
export { BaseTool } from './tools/base_tool.js';
|
|
43
46
|
export { BaseToolset } from './tools/base_toolset.js';
|
|
@@ -48,6 +51,7 @@ export { ToolConfirmation } from './tools/tool_confirmation.js';
|
|
|
48
51
|
export { ToolContext } from './tools/tool_context.js';
|
|
49
52
|
export { LogLevel, setLogLevel } from './utils/logger.js';
|
|
50
53
|
export { zodObjectToSchema } from './utils/simple_zod_to_json.js';
|
|
54
|
+
export { version } from './version.js';
|
|
51
55
|
export * from './artifacts/base_artifact_service.js';
|
|
52
56
|
export * from './memory/base_memory_service.js';
|
|
53
57
|
export * from './sessions/base_session_service.js';
|
package/dist/types/index.d.ts
CHANGED
|
@@ -7,3 +7,6 @@ export * from './common.js';
|
|
|
7
7
|
export * from './tools/mcp/mcp_session_manager.js';
|
|
8
8
|
export * from './tools/mcp/mcp_tool.js';
|
|
9
9
|
export * from './tools/mcp/mcp_toolset.js';
|
|
10
|
+
export * from './artifacts/gcs_artifact_service.js';
|
|
11
|
+
export * from './telemetry/setup.js';
|
|
12
|
+
export * from './telemetry/google_cloud.js';
|
|
@@ -13,11 +13,13 @@ export declare abstract class BaseLlm {
|
|
|
13
13
|
readonly model: string;
|
|
14
14
|
/**
|
|
15
15
|
* Creates an instance of BaseLLM.
|
|
16
|
-
*
|
|
17
|
-
* @param model The name of the LLM, e.g. gemini-1.5-flash or
|
|
16
|
+
* @param params The parameters for creating a BaseLlm instance.
|
|
17
|
+
* @param params.model The name of the LLM, e.g. gemini-1.5-flash or
|
|
18
18
|
* gemini-1.5-flash-001.
|
|
19
19
|
*/
|
|
20
|
-
constructor(model:
|
|
20
|
+
constructor({ model }: {
|
|
21
|
+
model: string;
|
|
22
|
+
});
|
|
21
23
|
/**
|
|
22
24
|
* List of supported models in regex for LlmRegistry.
|
|
23
25
|
*/
|
|
@@ -38,6 +40,7 @@ export declare abstract class BaseLlm {
|
|
|
38
40
|
* @return A live connection to the LLM.
|
|
39
41
|
*/
|
|
40
42
|
abstract connect(llmRequest: LlmRequest): Promise<BaseLlmConnection>;
|
|
43
|
+
protected get trackingHeaders(): Record<string, string>;
|
|
41
44
|
/**
|
|
42
45
|
* Appends a user content, so that model can continue to output.
|
|
43
46
|
*
|
|
@@ -52,7 +52,7 @@ export declare class Gemini extends BaseLlm {
|
|
|
52
52
|
/**
|
|
53
53
|
* @param params The parameters for creating a Gemini instance.
|
|
54
54
|
*/
|
|
55
|
-
constructor({ model, apiKey, vertexai, project, location, headers }
|
|
55
|
+
constructor({ model, apiKey, vertexai, project, location, headers, }: GeminiParams);
|
|
56
56
|
/**
|
|
57
57
|
* A list of model name patterns that are supported by this LLM.
|
|
58
58
|
*
|
|
@@ -74,7 +74,6 @@ export declare class Gemini extends BaseLlm {
|
|
|
74
74
|
generateContentAsync(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void>;
|
|
75
75
|
get apiClient(): GoogleGenAI;
|
|
76
76
|
get apiBackend(): GoogleLLMVariant;
|
|
77
|
-
get trackingHeaders(): Record<string, string>;
|
|
78
77
|
get liveApiVersion(): string;
|
|
79
78
|
get liveApiClient(): GoogleGenAI;
|
|
80
79
|
/**
|
|
@@ -8,7 +8,9 @@ import { BaseLlm } from './base_llm.js';
|
|
|
8
8
|
* type[BaseLlm] equivalent in TypeScript, represents a class that can be new-ed
|
|
9
9
|
* to create a BaseLlm instance.
|
|
10
10
|
*/
|
|
11
|
-
export type BaseLlmType = (new (
|
|
11
|
+
export type BaseLlmType = (new (params: {
|
|
12
|
+
model: string;
|
|
13
|
+
}) => BaseLlm) & {
|
|
12
14
|
readonly supportedModels: Array<string | RegExp>;
|
|
13
15
|
};
|
|
14
16
|
/**
|
|
@@ -32,7 +34,9 @@ export declare class LLMRegistry {
|
|
|
32
34
|
* Registers a new LLM class.
|
|
33
35
|
* @param llmCls The class that implements the model.
|
|
34
36
|
*/
|
|
35
|
-
static register<T extends BaseLlm>(llmCls: (new (
|
|
37
|
+
static register<T extends BaseLlm>(llmCls: (new (params: {
|
|
38
|
+
model: string;
|
|
39
|
+
}) => T) & {
|
|
36
40
|
readonly supportedModels: Array<string | RegExp>;
|
|
37
41
|
}): void;
|
|
38
42
|
/**
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { Resource } from '@opentelemetry/resources';
|
|
7
|
+
import { OtelExportersConfig, OTelHooks } from './setup.js';
|
|
8
|
+
export declare function getGcpExporters(config?: OtelExportersConfig): Promise<OTelHooks>;
|
|
9
|
+
export declare function getGcpResource(): Resource;
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { LogRecordProcessor } from '@opentelemetry/sdk-logs';
|
|
7
|
+
import { MetricReader } from '@opentelemetry/sdk-metrics';
|
|
8
|
+
import { Resource } from '@opentelemetry/resources';
|
|
9
|
+
import { SpanProcessor } from '@opentelemetry/sdk-trace-base';
|
|
10
|
+
export interface OtelExportersConfig {
|
|
11
|
+
enableTracing?: boolean;
|
|
12
|
+
enableMetrics?: boolean;
|
|
13
|
+
enableLogging?: boolean;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Configuration hooks for OpenTelemetry setup.
|
|
17
|
+
*
|
|
18
|
+
* This interface defines the structure for configuring OpenTelemetry
|
|
19
|
+
* components including span processors, metric readers, and log record processors.
|
|
20
|
+
*/
|
|
21
|
+
export interface OTelHooks {
|
|
22
|
+
spanProcessors?: SpanProcessor[];
|
|
23
|
+
metricReaders?: MetricReader[];
|
|
24
|
+
logRecordProcessors?: LogRecordProcessor[];
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Sets up OTel providers if hooks for a given telemetry type were passed.
|
|
28
|
+
*
|
|
29
|
+
* Additionally adds generic OTLP exporters based on following env variables:
|
|
30
|
+
* OTEL_EXPORTER_OTLP_ENDPOINT
|
|
31
|
+
* OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
|
32
|
+
* OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
|
33
|
+
* OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
|
34
|
+
* See https://opentelemetry.io/docs/languages/sdk-configuration/otlp-exporter/
|
|
35
|
+
* for how they are used.
|
|
36
|
+
*
|
|
37
|
+
* If a provider for a specific telemetry type was already globally set -
|
|
38
|
+
* this function will not override it or register more exporters.
|
|
39
|
+
*
|
|
40
|
+
* @experimental (Experimental, subject to change)
|
|
41
|
+
*
|
|
42
|
+
* @param otelHooksToSetup per-telemetry-type processors and readers to be added
|
|
43
|
+
* to OTel providers. If no hooks for a specific telemetry type are passed -
|
|
44
|
+
* provider will not be set.
|
|
45
|
+
* @param otelResource OTel resource to use in providers.
|
|
46
|
+
* If empty - default OTel resource detection will be used.
|
|
47
|
+
*/
|
|
48
|
+
export declare function maybeSetOtelProviders(otelHooksToSetup?: OTelHooks[], otelResource?: Resource): void;
|