@google/adk 0.1.3 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/agents/content_processor_utils.js +20 -8
- package/dist/cjs/agents/llm_agent.js +1 -0
- package/dist/cjs/common.js +9 -0
- package/dist/cjs/index.js +5 -5
- package/dist/cjs/index.js.map +4 -4
- package/dist/cjs/models/base_llm.js +12 -3
- package/dist/cjs/models/google_llm.js +6 -20
- package/dist/cjs/models/registry.js +1 -1
- package/dist/cjs/telemetry/google_cloud.js +85 -0
- package/dist/cjs/telemetry/setup.js +97 -0
- package/dist/cjs/telemetry/tracing.js +231 -0
- package/dist/cjs/utils/client_labels.js +56 -0
- package/dist/cjs/version.js +1 -1
- package/dist/esm/agents/content_processor_utils.js +20 -8
- package/dist/esm/agents/llm_agent.js +1 -0
- package/dist/esm/common.js +6 -0
- package/dist/esm/index.js +5 -5
- package/dist/esm/index.js.map +4 -4
- package/dist/esm/models/base_llm.js +12 -3
- package/dist/esm/models/google_llm.js +6 -20
- package/dist/esm/models/registry.js +1 -1
- package/dist/esm/telemetry/google_cloud.js +54 -0
- package/dist/esm/telemetry/setup.js +67 -0
- package/dist/esm/telemetry/tracing.js +195 -0
- package/dist/esm/utils/client_labels.js +26 -0
- package/dist/esm/version.js +1 -1
- package/dist/types/common.d.ts +2 -0
- package/dist/types/index.d.ts +2 -0
- package/dist/types/models/base_llm.d.ts +1 -0
- package/dist/types/models/google_llm.d.ts +0 -1
- package/dist/types/telemetry/google_cloud.d.ts +9 -0
- package/dist/types/telemetry/setup.d.ts +48 -0
- package/dist/types/telemetry/tracing.d.ts +111 -0
- package/dist/types/utils/client_labels.d.ts +9 -0
- package/dist/types/version.d.ts +1 -1
- package/dist/web/agents/content_processor_utils.js +20 -8
- package/dist/web/agents/llm_agent.js +1 -0
- package/dist/web/common.js +6 -0
- package/dist/web/index.js +1 -1
- package/dist/web/index.js.map +4 -4
- package/dist/web/models/base_llm.js +12 -3
- package/dist/web/models/google_llm.js +6 -20
- package/dist/web/models/registry.js +1 -1
- package/dist/web/telemetry/google_cloud.js +54 -0
- package/dist/web/telemetry/setup.js +67 -0
- package/dist/web/telemetry/tracing.js +210 -0
- package/dist/web/utils/client_labels.js +26 -0
- package/dist/web/version.js +1 -1
- package/package.json +19 -4
- package/dist/cjs/sessions/database_session_service.js +0 -52
- package/dist/esm/sessions/database_session_service.js +0 -22
- package/dist/types/sessions/database_session_service.d.ts +0 -10
- package/dist/web/sessions/database_session_service.js +0 -22
|
@@ -3,16 +3,25 @@
|
|
|
3
3
|
* Copyright 2025 Google LLC
|
|
4
4
|
* SPDX-License-Identifier: Apache-2.0
|
|
5
5
|
*/
|
|
6
|
+
import { getClientLabels } from "../utils/client_labels.js";
|
|
6
7
|
class BaseLlm {
|
|
7
8
|
/**
|
|
8
9
|
* Creates an instance of BaseLLM.
|
|
9
|
-
*
|
|
10
|
-
* @param model The name of the LLM, e.g. gemini-1.5-flash or
|
|
10
|
+
* @param params The parameters for creating a BaseLlm instance.
|
|
11
|
+
* @param params.model The name of the LLM, e.g. gemini-1.5-flash or
|
|
11
12
|
* gemini-1.5-flash-001.
|
|
12
13
|
*/
|
|
13
|
-
constructor(model) {
|
|
14
|
+
constructor({ model }) {
|
|
14
15
|
this.model = model;
|
|
15
16
|
}
|
|
17
|
+
get trackingHeaders() {
|
|
18
|
+
const labels = getClientLabels();
|
|
19
|
+
const headerValue = labels.join(" ");
|
|
20
|
+
return {
|
|
21
|
+
"x-goog-api-client": headerValue,
|
|
22
|
+
"user-agent": headerValue
|
|
23
|
+
};
|
|
24
|
+
}
|
|
16
25
|
/**
|
|
17
26
|
* Appends a user content, so that model can continue to output.
|
|
18
27
|
*
|
|
@@ -4,10 +4,8 @@
|
|
|
4
4
|
* SPDX-License-Identifier: Apache-2.0
|
|
5
5
|
*/
|
|
6
6
|
import { createPartFromText, FinishReason, GoogleGenAI } from "@google/genai";
|
|
7
|
-
import { isBrowser } from "../utils/env_aware_utils.js";
|
|
8
7
|
import { logger } from "../utils/logger.js";
|
|
9
8
|
import { GoogleLLMVariant } from "../utils/variant_utils.js";
|
|
10
|
-
import { version } from "../version.js";
|
|
11
9
|
import { BaseLlm } from "./base_llm.js";
|
|
12
10
|
import { GeminiLlmConnection } from "./gemini_llm_connection.js";
|
|
13
11
|
import { createLlmResponse } from "./llm_response.js";
|
|
@@ -18,14 +16,17 @@ class Gemini extends BaseLlm {
|
|
|
18
16
|
* @param params The parameters for creating a Gemini instance.
|
|
19
17
|
*/
|
|
20
18
|
constructor({
|
|
21
|
-
model
|
|
19
|
+
model,
|
|
22
20
|
apiKey,
|
|
23
21
|
vertexai,
|
|
24
22
|
project,
|
|
25
23
|
location,
|
|
26
24
|
headers
|
|
27
|
-
}
|
|
28
|
-
|
|
25
|
+
}) {
|
|
26
|
+
if (!model) {
|
|
27
|
+
model = "gemini-2.5-flash";
|
|
28
|
+
}
|
|
29
|
+
super({ model });
|
|
29
30
|
this.project = project;
|
|
30
31
|
this.location = location;
|
|
31
32
|
this.apiKey = apiKey;
|
|
@@ -182,21 +183,6 @@ class Gemini extends BaseLlm {
|
|
|
182
183
|
}
|
|
183
184
|
return this._apiBackend;
|
|
184
185
|
}
|
|
185
|
-
get trackingHeaders() {
|
|
186
|
-
if (!this._trackingHeaders) {
|
|
187
|
-
let frameworkLabel = `google-adk/${version}`;
|
|
188
|
-
if (!isBrowser() && process.env[AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME]) {
|
|
189
|
-
frameworkLabel = `${frameworkLabel}+${AGENT_ENGINE_TELEMETRY_TAG}`;
|
|
190
|
-
}
|
|
191
|
-
const languageLabel = `gl-typescript/${isBrowser() ? window.navigator.userAgent : process.version}`;
|
|
192
|
-
const versionHeaderValue = `${frameworkLabel} ${languageLabel}`;
|
|
193
|
-
this._trackingHeaders = {
|
|
194
|
-
"x-goog-api-client": versionHeaderValue,
|
|
195
|
-
"user-agent": versionHeaderValue
|
|
196
|
-
};
|
|
197
|
-
}
|
|
198
|
-
return this._trackingHeaders;
|
|
199
|
-
}
|
|
200
186
|
get liveApiVersion() {
|
|
201
187
|
if (!this._liveApiVersion) {
|
|
202
188
|
this._liveApiVersion = this.apiBackend === GoogleLLMVariant.VERTEX_AI ? "v1beta1" : "v1alpha";
|
|
@@ -35,7 +35,7 @@ const _LLMRegistry = class _LLMRegistry {
|
|
|
35
35
|
* @returns The LLM instance.
|
|
36
36
|
*/
|
|
37
37
|
static newLlm(model) {
|
|
38
|
-
return new (_LLMRegistry.resolve(model))(model);
|
|
38
|
+
return new (_LLMRegistry.resolve(model))({ model });
|
|
39
39
|
}
|
|
40
40
|
static _register(modelNameRegex, llmCls) {
|
|
41
41
|
if (_LLMRegistry.llmRegistryDict.has(modelNameRegex)) {
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { GoogleAuth } from "google-auth-library";
|
|
7
|
+
import { PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
|
|
8
|
+
import { detectResources } from "@opentelemetry/resources";
|
|
9
|
+
import { gcpDetector } from "@opentelemetry/resource-detector-gcp";
|
|
10
|
+
import { TraceExporter } from "@google-cloud/opentelemetry-cloud-trace-exporter";
|
|
11
|
+
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
|
|
12
|
+
import { MetricExporter } from "@google-cloud/opentelemetry-cloud-monitoring-exporter";
|
|
13
|
+
import { logger } from "../utils/logger.js";
|
|
14
|
+
const GCP_PROJECT_ERROR_MESSAGE = "Cannot determine GCP Project. OTel GCP Exporters cannot be set up. Please make sure to log into correct GCP Project.";
|
|
15
|
+
async function getGcpProjectId() {
|
|
16
|
+
try {
|
|
17
|
+
const auth = new GoogleAuth();
|
|
18
|
+
const projectId = await auth.getProjectId();
|
|
19
|
+
return projectId || void 0;
|
|
20
|
+
} catch (error) {
|
|
21
|
+
return void 0;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
async function getGcpExporters(config = {}) {
|
|
25
|
+
const {
|
|
26
|
+
enableTracing = false,
|
|
27
|
+
enableMetrics = false
|
|
28
|
+
// enableCloudLogging = false,
|
|
29
|
+
} = config;
|
|
30
|
+
const projectId = await getGcpProjectId();
|
|
31
|
+
if (!projectId) {
|
|
32
|
+
logger.warn(GCP_PROJECT_ERROR_MESSAGE);
|
|
33
|
+
return {};
|
|
34
|
+
}
|
|
35
|
+
return {
|
|
36
|
+
spanProcessors: enableTracing ? [
|
|
37
|
+
new BatchSpanProcessor(new TraceExporter({ projectId }))
|
|
38
|
+
] : [],
|
|
39
|
+
metricReaders: enableMetrics ? [
|
|
40
|
+
new PeriodicExportingMetricReader({
|
|
41
|
+
exporter: new MetricExporter({ projectId }),
|
|
42
|
+
exportIntervalMillis: 5e3
|
|
43
|
+
})
|
|
44
|
+
] : [],
|
|
45
|
+
logRecordProcessors: []
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
function getGcpResource() {
|
|
49
|
+
return detectResources({ detectors: [gcpDetector] });
|
|
50
|
+
}
|
|
51
|
+
export {
|
|
52
|
+
getGcpExporters,
|
|
53
|
+
getGcpResource
|
|
54
|
+
};
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { trace, metrics } from "@opentelemetry/api";
|
|
7
|
+
import { logs } from "@opentelemetry/api-logs";
|
|
8
|
+
import { LoggerProvider, BatchLogRecordProcessor } from "@opentelemetry/sdk-logs";
|
|
9
|
+
import { MeterProvider, PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
|
|
10
|
+
import { detectResources } from "@opentelemetry/resources";
|
|
11
|
+
import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
|
|
12
|
+
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
|
|
13
|
+
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
|
|
14
|
+
import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
|
|
15
|
+
import { OTLPLogExporter } from "@opentelemetry/exporter-logs-otlp-http";
|
|
16
|
+
function maybeSetOtelProviders(otelHooksToSetup = [], otelResource) {
|
|
17
|
+
const resource = otelResource || getOtelResource();
|
|
18
|
+
const allHooks = [...otelHooksToSetup, getOtelExporters()];
|
|
19
|
+
const spanProcessors = allHooks.flatMap((hooks) => hooks.spanProcessors || []);
|
|
20
|
+
const metricReaders = allHooks.flatMap((hooks) => hooks.metricReaders || []);
|
|
21
|
+
const logRecordProcessors = allHooks.flatMap((hooks) => hooks.logRecordProcessors || []);
|
|
22
|
+
if (spanProcessors.length > 0) {
|
|
23
|
+
const tracerProvider = new NodeTracerProvider({
|
|
24
|
+
resource,
|
|
25
|
+
spanProcessors
|
|
26
|
+
});
|
|
27
|
+
tracerProvider.register();
|
|
28
|
+
trace.setGlobalTracerProvider(tracerProvider);
|
|
29
|
+
}
|
|
30
|
+
if (metricReaders.length > 0) {
|
|
31
|
+
const meterProvider = new MeterProvider({
|
|
32
|
+
readers: metricReaders,
|
|
33
|
+
resource
|
|
34
|
+
});
|
|
35
|
+
metrics.setGlobalMeterProvider(meterProvider);
|
|
36
|
+
}
|
|
37
|
+
if (logRecordProcessors.length > 0) {
|
|
38
|
+
const loggerProvider = new LoggerProvider({
|
|
39
|
+
resource,
|
|
40
|
+
processors: logRecordProcessors
|
|
41
|
+
});
|
|
42
|
+
logs.setGlobalLoggerProvider(loggerProvider);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
function getOtelResource() {
|
|
46
|
+
return detectResources({
|
|
47
|
+
detectors: []
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
function getOtelExportersConfig() {
|
|
51
|
+
return {
|
|
52
|
+
enableTracing: !!(process.env.OTEL_EXPORTER_OTLP_ENDPOINT || process.env.OTEL_EXPORTER_OTLP_TRACES_ENDPOINT),
|
|
53
|
+
enableMetrics: !!(process.env.OTEL_EXPORTER_OTLP_ENDPOINT || process.env.OTEL_EXPORTER_OTLP_METRICS_ENDPOINT),
|
|
54
|
+
enableLogging: !!(process.env.OTEL_EXPORTER_OTLP_ENDPOINT || process.env.OTEL_EXPORTER_OTLP_LOGS_ENDPOINT)
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
function getOtelExporters(config = getOtelExportersConfig()) {
|
|
58
|
+
const { enableTracing, enableMetrics, enableLogging } = config;
|
|
59
|
+
return {
|
|
60
|
+
spanProcessors: enableTracing ? [new BatchSpanProcessor(new OTLPTraceExporter())] : [],
|
|
61
|
+
metricReaders: enableMetrics ? [new PeriodicExportingMetricReader({ exporter: new OTLPMetricExporter() })] : [],
|
|
62
|
+
logRecordProcessors: enableLogging ? [new BatchLogRecordProcessor(new OTLPLogExporter())] : []
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
export {
|
|
66
|
+
maybeSetOtelProviders
|
|
67
|
+
};
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { trace, context } from "@opentelemetry/api";
|
|
7
|
+
import { version } from "../version.js";
|
|
8
|
+
const GEN_AI_AGENT_DESCRIPTION = "gen_ai.agent.description";
|
|
9
|
+
const GEN_AI_AGENT_NAME = "gen_ai.agent.name";
|
|
10
|
+
const GEN_AI_CONVERSATION_ID = "gen_ai.conversation.id";
|
|
11
|
+
const GEN_AI_OPERATION_NAME = "gen_ai.operation.name";
|
|
12
|
+
const GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id";
|
|
13
|
+
const GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description";
|
|
14
|
+
const GEN_AI_TOOL_NAME = "gen_ai.tool.name";
|
|
15
|
+
const GEN_AI_TOOL_TYPE = "gen_ai.tool.type";
|
|
16
|
+
const tracer = trace.getTracer(
|
|
17
|
+
"gcp.vertex.agent",
|
|
18
|
+
version
|
|
19
|
+
);
|
|
20
|
+
function safeJsonSerialize(obj) {
|
|
21
|
+
try {
|
|
22
|
+
return JSON.stringify(obj);
|
|
23
|
+
} catch (error) {
|
|
24
|
+
return "<not serializable>";
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
function traceAgentInvocation({
|
|
28
|
+
agent,
|
|
29
|
+
invocationContext
|
|
30
|
+
}) {
|
|
31
|
+
const span = trace.getActiveSpan();
|
|
32
|
+
if (!span) return;
|
|
33
|
+
span.setAttributes({
|
|
34
|
+
[GEN_AI_OPERATION_NAME]: "invoke_agent",
|
|
35
|
+
// Conditionally Required
|
|
36
|
+
[GEN_AI_AGENT_DESCRIPTION]: agent.description,
|
|
37
|
+
[GEN_AI_AGENT_NAME]: agent.name,
|
|
38
|
+
[GEN_AI_CONVERSATION_ID]: invocationContext.session.id
|
|
39
|
+
});
|
|
40
|
+
}
|
|
41
|
+
function traceToolCall({
|
|
42
|
+
tool,
|
|
43
|
+
args,
|
|
44
|
+
functionResponseEvent
|
|
45
|
+
}) {
|
|
46
|
+
var _a, _b;
|
|
47
|
+
const span = trace.getActiveSpan();
|
|
48
|
+
if (!span) return;
|
|
49
|
+
span.setAttributes({
|
|
50
|
+
[GEN_AI_OPERATION_NAME]: "execute_tool",
|
|
51
|
+
[GEN_AI_TOOL_DESCRIPTION]: tool.description || "",
|
|
52
|
+
[GEN_AI_TOOL_NAME]: tool.name,
|
|
53
|
+
// e.g. FunctionTool
|
|
54
|
+
[GEN_AI_TOOL_TYPE]: tool.constructor.name,
|
|
55
|
+
// Setting empty llm request and response (as UI expect these) while not
|
|
56
|
+
// applicable for tool_response.
|
|
57
|
+
"gcp.vertex.agent.llm_request": "{}",
|
|
58
|
+
"gcp.vertex.agent.llm_response": "{}",
|
|
59
|
+
"gcp.vertex.agent.tool_call_args": shouldAddRequestResponseToSpans() ? safeJsonSerialize(args) : "{}"
|
|
60
|
+
});
|
|
61
|
+
let toolCallId = "<not specified>";
|
|
62
|
+
let toolResponse = "<not specified>";
|
|
63
|
+
if ((_a = functionResponseEvent.content) == null ? void 0 : _a.parts) {
|
|
64
|
+
const responseParts = functionResponseEvent.content.parts;
|
|
65
|
+
const functionResponse = (_b = responseParts[0]) == null ? void 0 : _b.functionResponse;
|
|
66
|
+
if (functionResponse == null ? void 0 : functionResponse.id) {
|
|
67
|
+
toolCallId = functionResponse.id;
|
|
68
|
+
}
|
|
69
|
+
if (functionResponse == null ? void 0 : functionResponse.response) {
|
|
70
|
+
toolResponse = functionResponse.response;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
if (typeof toolResponse !== "object" || toolResponse === null) {
|
|
74
|
+
toolResponse = { result: toolResponse };
|
|
75
|
+
}
|
|
76
|
+
span.setAttributes({
|
|
77
|
+
[GEN_AI_TOOL_CALL_ID]: toolCallId,
|
|
78
|
+
"gcp.vertex.agent.event_id": functionResponseEvent.id,
|
|
79
|
+
"gcp.vertex.agent.tool_response": shouldAddRequestResponseToSpans() ? safeJsonSerialize(toolResponse) : "{}"
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
function traceMergedToolCalls({
|
|
83
|
+
responseEventId,
|
|
84
|
+
functionResponseEvent
|
|
85
|
+
}) {
|
|
86
|
+
const span = trace.getActiveSpan();
|
|
87
|
+
if (!span) return;
|
|
88
|
+
span.setAttributes({
|
|
89
|
+
[GEN_AI_OPERATION_NAME]: "execute_tool",
|
|
90
|
+
[GEN_AI_TOOL_NAME]: "(merged tools)",
|
|
91
|
+
[GEN_AI_TOOL_DESCRIPTION]: "(merged tools)",
|
|
92
|
+
[GEN_AI_TOOL_CALL_ID]: responseEventId,
|
|
93
|
+
"gcp.vertex.agent.tool_call_args": "N/A",
|
|
94
|
+
"gcp.vertex.agent.event_id": responseEventId,
|
|
95
|
+
// Setting empty llm request and response (as UI expect these) while not
|
|
96
|
+
// applicable for tool_response.
|
|
97
|
+
"gcp.vertex.agent.llm_request": "{}",
|
|
98
|
+
"gcp.vertex.agent.llm_response": "{}"
|
|
99
|
+
});
|
|
100
|
+
span.setAttribute("gcp.vertex.agent.tool_response", shouldAddRequestResponseToSpans() ? safeJsonSerialize(functionResponseEvent) : "{}");
|
|
101
|
+
}
|
|
102
|
+
function traceCallLlm({
|
|
103
|
+
invocationContext,
|
|
104
|
+
eventId,
|
|
105
|
+
llmRequest,
|
|
106
|
+
llmResponse
|
|
107
|
+
}) {
|
|
108
|
+
var _a, _b, _c;
|
|
109
|
+
const span = trace.getActiveSpan();
|
|
110
|
+
if (!span) return;
|
|
111
|
+
span.setAttributes({
|
|
112
|
+
"gen_ai.system": "gcp.vertex.agent",
|
|
113
|
+
"gen_ai.request.model": llmRequest.model,
|
|
114
|
+
"gcp.vertex.agent.invocation_id": invocationContext.invocationId,
|
|
115
|
+
"gcp.vertex.agent.session_id": invocationContext.session.id,
|
|
116
|
+
"gcp.vertex.agent.event_id": eventId,
|
|
117
|
+
// Consider removing once GenAI SDK provides a way to record this info.
|
|
118
|
+
"gcp.vertex.agent.llm_request": shouldAddRequestResponseToSpans() ? safeJsonSerialize(buildLlmRequestForTrace(llmRequest)) : "{}"
|
|
119
|
+
});
|
|
120
|
+
if ((_a = llmRequest.config) == null ? void 0 : _a.topP) {
|
|
121
|
+
span.setAttribute("gen_ai.request.top_p", llmRequest.config.topP);
|
|
122
|
+
}
|
|
123
|
+
if (((_b = llmRequest.config) == null ? void 0 : _b.maxOutputTokens) !== void 0) {
|
|
124
|
+
span.setAttribute("gen_ai.request.max_tokens", llmRequest.config.maxOutputTokens);
|
|
125
|
+
}
|
|
126
|
+
span.setAttribute("gcp.vertex.agent.llm_response", shouldAddRequestResponseToSpans() ? safeJsonSerialize(llmResponse) : "{}");
|
|
127
|
+
if (llmResponse.usageMetadata) {
|
|
128
|
+
span.setAttribute("gen_ai.usage.input_tokens", llmResponse.usageMetadata.promptTokenCount || 0);
|
|
129
|
+
}
|
|
130
|
+
if ((_c = llmResponse.usageMetadata) == null ? void 0 : _c.candidatesTokenCount) {
|
|
131
|
+
span.setAttribute("gen_ai.usage.output_tokens", llmResponse.usageMetadata.candidatesTokenCount);
|
|
132
|
+
}
|
|
133
|
+
if (llmResponse.finishReason) {
|
|
134
|
+
const finishReasonValue = typeof llmResponse.finishReason === "string" ? llmResponse.finishReason.toLowerCase() : String(llmResponse.finishReason).toLowerCase();
|
|
135
|
+
span.setAttribute("gen_ai.response.finish_reasons", [finishReasonValue]);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
function traceSendData({
|
|
139
|
+
invocationContext,
|
|
140
|
+
eventId,
|
|
141
|
+
data
|
|
142
|
+
}) {
|
|
143
|
+
const span = trace.getActiveSpan();
|
|
144
|
+
if (!span) return;
|
|
145
|
+
span.setAttributes({
|
|
146
|
+
"gcp.vertex.agent.invocation_id": invocationContext.invocationId,
|
|
147
|
+
"gcp.vertex.agent.event_id": eventId
|
|
148
|
+
});
|
|
149
|
+
span.setAttribute("gcp.vertex.agent.data", shouldAddRequestResponseToSpans() ? safeJsonSerialize(data) : "{}");
|
|
150
|
+
}
|
|
151
|
+
function buildLlmRequestForTrace(llmRequest) {
|
|
152
|
+
const result = {
|
|
153
|
+
model: llmRequest.model,
|
|
154
|
+
contents: []
|
|
155
|
+
};
|
|
156
|
+
if (llmRequest.config) {
|
|
157
|
+
const { responseSchema, ...cleanConfig } = llmRequest.config;
|
|
158
|
+
result.config = cleanConfig;
|
|
159
|
+
}
|
|
160
|
+
result.contents = llmRequest.contents.map((content) => {
|
|
161
|
+
var _a;
|
|
162
|
+
return {
|
|
163
|
+
role: content.role,
|
|
164
|
+
parts: ((_a = content.parts) == null ? void 0 : _a.filter((part) => !part.inlineData)) || []
|
|
165
|
+
};
|
|
166
|
+
});
|
|
167
|
+
return result;
|
|
168
|
+
}
|
|
169
|
+
function bindAsyncGenerator(ctx, generator) {
|
|
170
|
+
return {
|
|
171
|
+
// Bind the next() method to execute within the provided context
|
|
172
|
+
next: context.bind(ctx, generator.next.bind(generator)),
|
|
173
|
+
// Bind the return() method to execute within the provided context
|
|
174
|
+
return: context.bind(ctx, generator.return.bind(generator)),
|
|
175
|
+
// Bind the throw() method to execute within the provided context
|
|
176
|
+
throw: context.bind(ctx, generator.throw.bind(generator)),
|
|
177
|
+
// Ensure the async iterator symbol also returns a context-bound generator
|
|
178
|
+
[Symbol.asyncIterator]() {
|
|
179
|
+
return bindAsyncGenerator(ctx, generator[Symbol.asyncIterator]());
|
|
180
|
+
}
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
function shouldAddRequestResponseToSpans() {
|
|
184
|
+
const envValue = process.env.ADK_CAPTURE_MESSAGE_CONTENT_IN_SPANS || "true";
|
|
185
|
+
return envValue === "true" || envValue === "1";
|
|
186
|
+
}
|
|
187
|
+
export {
|
|
188
|
+
bindAsyncGenerator,
|
|
189
|
+
traceAgentInvocation,
|
|
190
|
+
traceCallLlm,
|
|
191
|
+
traceMergedToolCalls,
|
|
192
|
+
traceSendData,
|
|
193
|
+
traceToolCall,
|
|
194
|
+
tracer
|
|
195
|
+
};
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { version } from "../version.js";
|
|
7
|
+
import { isBrowser } from "./env_aware_utils.js";
|
|
8
|
+
const ADK_LABEL = "google-adk";
|
|
9
|
+
const LANGUAGE_LABEL = "gl-typescript";
|
|
10
|
+
const AGENT_ENGINE_TELEMETRY_TAG = "remote_reasoning_engine";
|
|
11
|
+
const AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME = "GOOGLE_CLOUD_AGENT_ENGINE_ID";
|
|
12
|
+
function _getDefaultLabels() {
|
|
13
|
+
let frameworkLabel = `${ADK_LABEL}/${version}`;
|
|
14
|
+
if (!isBrowser() && process.env[AGENT_ENGINE_TELEMETRY_ENV_VARIABLE_NAME]) {
|
|
15
|
+
frameworkLabel = `${frameworkLabel}+${AGENT_ENGINE_TELEMETRY_TAG}`;
|
|
16
|
+
}
|
|
17
|
+
const languageLabel = `${LANGUAGE_LABEL}/${isBrowser() ? window.navigator.userAgent : process.version}`;
|
|
18
|
+
return [frameworkLabel, languageLabel];
|
|
19
|
+
}
|
|
20
|
+
function getClientLabels() {
|
|
21
|
+
const labels = _getDefaultLabels();
|
|
22
|
+
return labels;
|
|
23
|
+
}
|
|
24
|
+
export {
|
|
25
|
+
getClientLabels
|
|
26
|
+
};
|
package/dist/esm/version.js
CHANGED
package/dist/types/common.d.ts
CHANGED
|
@@ -40,6 +40,7 @@ export { Runner } from './runner/runner.js';
|
|
|
40
40
|
export { InMemorySessionService } from './sessions/in_memory_session_service.js';
|
|
41
41
|
export { createSession } from './sessions/session.js';
|
|
42
42
|
export type { Session } from './sessions/session.js';
|
|
43
|
+
export { State } from './sessions/state.js';
|
|
43
44
|
export { AgentTool } from './tools/agent_tool.js';
|
|
44
45
|
export { BaseTool } from './tools/base_tool.js';
|
|
45
46
|
export { BaseToolset } from './tools/base_toolset.js';
|
|
@@ -50,6 +51,7 @@ export { ToolConfirmation } from './tools/tool_confirmation.js';
|
|
|
50
51
|
export { ToolContext } from './tools/tool_context.js';
|
|
51
52
|
export { LogLevel, setLogLevel } from './utils/logger.js';
|
|
52
53
|
export { zodObjectToSchema } from './utils/simple_zod_to_json.js';
|
|
54
|
+
export { version } from './version.js';
|
|
53
55
|
export * from './artifacts/base_artifact_service.js';
|
|
54
56
|
export * from './memory/base_memory_service.js';
|
|
55
57
|
export * from './sessions/base_session_service.js';
|
package/dist/types/index.d.ts
CHANGED
|
@@ -8,3 +8,5 @@ export * from './tools/mcp/mcp_session_manager.js';
|
|
|
8
8
|
export * from './tools/mcp/mcp_tool.js';
|
|
9
9
|
export * from './tools/mcp/mcp_toolset.js';
|
|
10
10
|
export * from './artifacts/gcs_artifact_service.js';
|
|
11
|
+
export * from './telemetry/setup.js';
|
|
12
|
+
export * from './telemetry/google_cloud.js';
|
|
@@ -40,6 +40,7 @@ export declare abstract class BaseLlm {
|
|
|
40
40
|
* @return A live connection to the LLM.
|
|
41
41
|
*/
|
|
42
42
|
abstract connect(llmRequest: LlmRequest): Promise<BaseLlmConnection>;
|
|
43
|
+
protected get trackingHeaders(): Record<string, string>;
|
|
43
44
|
/**
|
|
44
45
|
* Appends a user content, so that model can continue to output.
|
|
45
46
|
*
|
|
@@ -74,7 +74,6 @@ export declare class Gemini extends BaseLlm {
|
|
|
74
74
|
generateContentAsync(llmRequest: LlmRequest, stream?: boolean): AsyncGenerator<LlmResponse, void>;
|
|
75
75
|
get apiClient(): GoogleGenAI;
|
|
76
76
|
get apiBackend(): GoogleLLMVariant;
|
|
77
|
-
get trackingHeaders(): Record<string, string>;
|
|
78
77
|
get liveApiVersion(): string;
|
|
79
78
|
get liveApiClient(): GoogleGenAI;
|
|
80
79
|
/**
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { Resource } from '@opentelemetry/resources';
|
|
7
|
+
import { OtelExportersConfig, OTelHooks } from './setup.js';
|
|
8
|
+
export declare function getGcpExporters(config?: OtelExportersConfig): Promise<OTelHooks>;
|
|
9
|
+
export declare function getGcpResource(): Resource;
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { LogRecordProcessor } from '@opentelemetry/sdk-logs';
|
|
7
|
+
import { MetricReader } from '@opentelemetry/sdk-metrics';
|
|
8
|
+
import { Resource } from '@opentelemetry/resources';
|
|
9
|
+
import { SpanProcessor } from '@opentelemetry/sdk-trace-base';
|
|
10
|
+
export interface OtelExportersConfig {
|
|
11
|
+
enableTracing?: boolean;
|
|
12
|
+
enableMetrics?: boolean;
|
|
13
|
+
enableLogging?: boolean;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Configuration hooks for OpenTelemetry setup.
|
|
17
|
+
*
|
|
18
|
+
* This interface defines the structure for configuring OpenTelemetry
|
|
19
|
+
* components including span processors, metric readers, and log record processors.
|
|
20
|
+
*/
|
|
21
|
+
export interface OTelHooks {
|
|
22
|
+
spanProcessors?: SpanProcessor[];
|
|
23
|
+
metricReaders?: MetricReader[];
|
|
24
|
+
logRecordProcessors?: LogRecordProcessor[];
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Sets up OTel providers if hooks for a given telemetry type were passed.
|
|
28
|
+
*
|
|
29
|
+
* Additionally adds generic OTLP exporters based on following env variables:
|
|
30
|
+
* OTEL_EXPORTER_OTLP_ENDPOINT
|
|
31
|
+
* OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
|
32
|
+
* OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
|
33
|
+
* OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
|
34
|
+
* See https://opentelemetry.io/docs/languages/sdk-configuration/otlp-exporter/
|
|
35
|
+
* for how they are used.
|
|
36
|
+
*
|
|
37
|
+
* If a provider for a specific telemetry type was already globally set -
|
|
38
|
+
* this function will not override it or register more exporters.
|
|
39
|
+
*
|
|
40
|
+
* @experimental (Experimental, subject to change)
|
|
41
|
+
*
|
|
42
|
+
* @param otelHooksToSetup per-telemetry-type processors and readers to be added
|
|
43
|
+
* to OTel providers. If no hooks for a specific telemetry type are passed -
|
|
44
|
+
* provider will not be set.
|
|
45
|
+
* @param otelResource OTel resource to use in providers.
|
|
46
|
+
* If empty - default OTel resource detection will be used.
|
|
47
|
+
*/
|
|
48
|
+
export declare function maybeSetOtelProviders(otelHooksToSetup?: OTelHooks[], otelResource?: Resource): void;
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Google LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* NOTE:
|
|
8
|
+
*
|
|
9
|
+
* We expect that the underlying GenAI SDK will provide a certain
|
|
10
|
+
* level of tracing and logging telemetry aligned with Open Telemetry
|
|
11
|
+
* Semantic Conventions (such as logging prompts, responses,
|
|
12
|
+
* request properties, etc.) and so the information that is recorded by the
|
|
13
|
+
* Agent Development Kit should be focused on the higher-level
|
|
14
|
+
* constructs of the framework that are not observable by the SDK.
|
|
15
|
+
*/
|
|
16
|
+
import { Content } from '@google/genai';
|
|
17
|
+
import { Context } from '@opentelemetry/api';
|
|
18
|
+
import { BaseAgent } from '../agents/base_agent.js';
|
|
19
|
+
import { InvocationContext } from '../agents/invocation_context.js';
|
|
20
|
+
import { Event } from '../events/event.js';
|
|
21
|
+
import { LlmRequest } from '../models/llm_request.js';
|
|
22
|
+
import { LlmResponse } from '../models/llm_response.js';
|
|
23
|
+
import { BaseTool } from '../tools/base_tool.js';
|
|
24
|
+
export declare const tracer: import("@opentelemetry/api").Tracer;
|
|
25
|
+
export interface TraceAgentInvocationParams {
|
|
26
|
+
agent: BaseAgent;
|
|
27
|
+
invocationContext: InvocationContext;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Sets span attributes immediately available on agent invocation according to OTEL semconv version 1.37.
|
|
31
|
+
*
|
|
32
|
+
* @param params The parameters object containing agent and invocation context.
|
|
33
|
+
*
|
|
34
|
+
* Inference related fields are not set, due to their planned removal from invoke_agent span:
|
|
35
|
+
* https://github.com/open-telemetry/semantic-conventions/issues/2632
|
|
36
|
+
*
|
|
37
|
+
* `gen_ai.agent.id` is not set because currently it's unclear what attributes this field should have, specifically:
|
|
38
|
+
* - In which scope should it be unique (globally, given project, given agentic flow, given deployment).
|
|
39
|
+
* - Should it be unchanging between deployments, and how this should this be achieved.
|
|
40
|
+
*
|
|
41
|
+
* `gen_ai.data_source.id` is not set because it's not available.
|
|
42
|
+
* Closest type which could contain this information is types.GroundingMetadata, which does not have an ID.
|
|
43
|
+
*
|
|
44
|
+
* `server.*` attributes are not set pending confirmation from aabmass.
|
|
45
|
+
*/
|
|
46
|
+
export declare function traceAgentInvocation({ agent, invocationContext, }: TraceAgentInvocationParams): void;
|
|
47
|
+
export interface TraceToolCallParams {
|
|
48
|
+
tool: BaseTool;
|
|
49
|
+
args: Record<string, unknown>;
|
|
50
|
+
functionResponseEvent: Event;
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Traces tool call.
|
|
54
|
+
*
|
|
55
|
+
* @param params The parameters object containing tool, args, and function response event.
|
|
56
|
+
*/
|
|
57
|
+
export declare function traceToolCall({ tool, args, functionResponseEvent, }: TraceToolCallParams): void;
|
|
58
|
+
export interface TraceMergedToolCallsParams {
|
|
59
|
+
responseEventId: string;
|
|
60
|
+
functionResponseEvent: Event;
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Traces merged tool call events.
|
|
64
|
+
*
|
|
65
|
+
* Calling this function is not needed for telemetry purposes. This is provided
|
|
66
|
+
* for preventing /debug/trace requests (typically sent by web UI).
|
|
67
|
+
*
|
|
68
|
+
* @param params The parameters object containing response event ID and function response event.
|
|
69
|
+
*/
|
|
70
|
+
export declare function traceMergedToolCalls({ responseEventId, functionResponseEvent, }: TraceMergedToolCallsParams): void;
|
|
71
|
+
export interface TraceCallLlmParams {
|
|
72
|
+
invocationContext: InvocationContext;
|
|
73
|
+
eventId: string;
|
|
74
|
+
llmRequest: LlmRequest;
|
|
75
|
+
llmResponse: LlmResponse;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Traces a call to the LLM.
|
|
79
|
+
*
|
|
80
|
+
* This function records details about the LLM request and response as
|
|
81
|
+
* attributes on the current OpenTelemetry span.
|
|
82
|
+
*
|
|
83
|
+
* @param params The parameters object containing invocationContext, eventId, llmRequest, and llmResponse.
|
|
84
|
+
*/
|
|
85
|
+
export declare function traceCallLlm({ invocationContext, eventId, llmRequest, llmResponse, }: TraceCallLlmParams): void;
|
|
86
|
+
export interface TraceSendDataParams {
|
|
87
|
+
/** The invocation context for the current agent run. */
|
|
88
|
+
invocationContext: InvocationContext;
|
|
89
|
+
/** The ID of the event. */
|
|
90
|
+
eventId: string;
|
|
91
|
+
/** A list of content objects. */
|
|
92
|
+
data: Content[];
|
|
93
|
+
}
|
|
94
|
+
/**
|
|
95
|
+
* Traces the sending of data to the agent.
|
|
96
|
+
*
|
|
97
|
+
* This function records details about the data sent to the agent as
|
|
98
|
+
* attributes on the current OpenTelemetry span.
|
|
99
|
+
*
|
|
100
|
+
* @param params The parameters object containing invocationContext, eventId, and data.
|
|
101
|
+
*/
|
|
102
|
+
export declare function traceSendData({ invocationContext, eventId, data, }: TraceSendDataParams): void;
|
|
103
|
+
/**
|
|
104
|
+
* Binds an async generator to OpenTelemetry context for trace propagation.
|
|
105
|
+
* This is a temporary solution.
|
|
106
|
+
* @param ctx - The OpenTelemetry context to bind the generator to
|
|
107
|
+
* @param generator - The async generator to be bound to the context
|
|
108
|
+
*
|
|
109
|
+
* @returns A new async generator that executes all operations within the provided context
|
|
110
|
+
*/
|
|
111
|
+
export declare function bindAsyncGenerator<T = unknown, TReturn = any, TNext = unknown>(ctx: Context, generator: AsyncGenerator<T, TReturn, TNext>): AsyncGenerator<T, TReturn, TNext>;
|
package/dist/types/version.d.ts
CHANGED