@traceloop/instrumentation-bedrock 0.9.2 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +1 -1
- package/dist/index.mjs +285 -0
- package/package.json +4 -3
package/dist/index.js
CHANGED
|
@@ -5,7 +5,7 @@ var api = require('@opentelemetry/api');
|
|
|
5
5
|
var instrumentation = require('@opentelemetry/instrumentation');
|
|
6
6
|
var aiSemanticConventions = require('@traceloop/ai-semantic-conventions');
|
|
7
7
|
|
|
8
|
-
var version = "0.
|
|
8
|
+
var version = "0.11.0";
|
|
9
9
|
|
|
10
10
|
class BedrockInstrumentation extends instrumentation.InstrumentationBase {
|
|
11
11
|
constructor(config = {}) {
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
import { __awaiter, __asyncValues } from 'tslib';
|
|
2
|
+
import { trace, context, SpanStatusCode, SpanKind } from '@opentelemetry/api';
|
|
3
|
+
import { InstrumentationBase, InstrumentationNodeModuleDefinition, safeExecuteInTheMiddle } from '@opentelemetry/instrumentation';
|
|
4
|
+
import { SpanAttributes, LLMRequestTypeValues, CONTEXT_KEY_ALLOW_TRACE_CONTENT } from '@traceloop/ai-semantic-conventions';
|
|
5
|
+
|
|
6
|
+
var version = "0.11.0";
|
|
7
|
+
|
|
8
|
+
class BedrockInstrumentation extends InstrumentationBase {
|
|
9
|
+
constructor(config = {}) {
|
|
10
|
+
super("@traceloop/instrumentation-bedrock", version, config);
|
|
11
|
+
}
|
|
12
|
+
setConfig(config = {}) {
|
|
13
|
+
super.setConfig(config);
|
|
14
|
+
}
|
|
15
|
+
init() {
|
|
16
|
+
const module = new InstrumentationNodeModuleDefinition("@aws-sdk/client-bedrock-runtime", [">=3.499.0"], this.wrap.bind(this), this.unwrap.bind(this));
|
|
17
|
+
return module;
|
|
18
|
+
}
|
|
19
|
+
manuallyInstrument(module) {
|
|
20
|
+
this._diag.debug(`Patching @aws-sdk/client-bedrock-runtime manually`);
|
|
21
|
+
this._wrap(module.BedrockRuntimeClient.prototype, "send", this.wrapperMethod());
|
|
22
|
+
}
|
|
23
|
+
wrap(module, moduleVersion) {
|
|
24
|
+
this._diag.debug(`Patching @aws-sdk/client-bedrock-runtime@${moduleVersion}`);
|
|
25
|
+
this._wrap(module.BedrockRuntimeClient.prototype, "send", this.wrapperMethod());
|
|
26
|
+
return module;
|
|
27
|
+
}
|
|
28
|
+
unwrap(module, moduleVersion) {
|
|
29
|
+
this._diag.debug(`Unpatching @aws-sdk/client-bedrock-runtime@${moduleVersion}`);
|
|
30
|
+
this._unwrap(module.BedrockRuntimeClient.prototype, "send");
|
|
31
|
+
}
|
|
32
|
+
wrapperMethod() {
|
|
33
|
+
// eslint-disable-next-line @typescript-eslint/no-this-alias
|
|
34
|
+
const plugin = this;
|
|
35
|
+
// eslint-disable-next-line @typescript-eslint/ban-types
|
|
36
|
+
return (original) => {
|
|
37
|
+
return function method(...args) {
|
|
38
|
+
const span = plugin._startSpan({
|
|
39
|
+
params: args[0],
|
|
40
|
+
});
|
|
41
|
+
const execContext = trace.setSpan(context.active(), span);
|
|
42
|
+
const execPromise = safeExecuteInTheMiddle(() => {
|
|
43
|
+
return context.with(execContext, () => {
|
|
44
|
+
return original.apply(this, args);
|
|
45
|
+
});
|
|
46
|
+
}, (e) => {
|
|
47
|
+
if (e) {
|
|
48
|
+
plugin._diag.error(`Error in bedrock instrumentation`, e);
|
|
49
|
+
}
|
|
50
|
+
});
|
|
51
|
+
const wrappedPromise = plugin._wrapPromise(span, execPromise);
|
|
52
|
+
return context.bind(execContext, wrappedPromise);
|
|
53
|
+
};
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
_wrapPromise(span, promise) {
|
|
57
|
+
return promise
|
|
58
|
+
.then((result) => __awaiter(this, void 0, void 0, function* () {
|
|
59
|
+
yield this._endSpan({
|
|
60
|
+
span,
|
|
61
|
+
result: result,
|
|
62
|
+
});
|
|
63
|
+
return new Promise((resolve) => resolve(result));
|
|
64
|
+
}))
|
|
65
|
+
.catch((error) => {
|
|
66
|
+
return new Promise((_, reject) => {
|
|
67
|
+
span.setStatus({
|
|
68
|
+
code: SpanStatusCode.ERROR,
|
|
69
|
+
message: error.message,
|
|
70
|
+
});
|
|
71
|
+
span.recordException(error);
|
|
72
|
+
span.end();
|
|
73
|
+
reject(error);
|
|
74
|
+
});
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
_startSpan({ params, }) {
|
|
78
|
+
var _a, _b;
|
|
79
|
+
let attributes = {};
|
|
80
|
+
try {
|
|
81
|
+
const [vendor, model] = params.input.modelId
|
|
82
|
+
? params.input.modelId.split(".")
|
|
83
|
+
: ["", ""];
|
|
84
|
+
attributes = {
|
|
85
|
+
[SpanAttributes.LLM_SYSTEM]: vendor,
|
|
86
|
+
[SpanAttributes.LLM_REQUEST_MODEL]: model,
|
|
87
|
+
[SpanAttributes.LLM_RESPONSE_MODEL]: model,
|
|
88
|
+
[SpanAttributes.LLM_REQUEST_TYPE]: LLMRequestTypeValues.COMPLETION,
|
|
89
|
+
};
|
|
90
|
+
if (typeof params.input.body === "string") {
|
|
91
|
+
const requestBody = JSON.parse(params.input.body);
|
|
92
|
+
attributes = Object.assign(Object.assign({}, attributes), this._setRequestAttributes(vendor, requestBody));
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
catch (e) {
|
|
96
|
+
this._diag.debug(e);
|
|
97
|
+
(_b = (_a = this._config).exceptionLogger) === null || _b === void 0 ? void 0 : _b.call(_a, e);
|
|
98
|
+
}
|
|
99
|
+
return this.tracer.startSpan(`bedrock.completion`, {
|
|
100
|
+
kind: SpanKind.CLIENT,
|
|
101
|
+
attributes,
|
|
102
|
+
});
|
|
103
|
+
}
|
|
104
|
+
_endSpan({ span, result, }) {
|
|
105
|
+
var _a, e_1, _b, _c;
|
|
106
|
+
var _d, _e, _f;
|
|
107
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
108
|
+
try {
|
|
109
|
+
if ("body" in result) {
|
|
110
|
+
const attributes = "attributes" in span
|
|
111
|
+
? span["attributes"]
|
|
112
|
+
: {};
|
|
113
|
+
if (SpanAttributes.LLM_SYSTEM in attributes) {
|
|
114
|
+
if (!(result.body instanceof Object.getPrototypeOf(Uint8Array))) {
|
|
115
|
+
const rawRes = result.body;
|
|
116
|
+
let streamedContent = "";
|
|
117
|
+
try {
|
|
118
|
+
for (var _g = true, rawRes_1 = __asyncValues(rawRes), rawRes_1_1; rawRes_1_1 = yield rawRes_1.next(), _a = rawRes_1_1.done, !_a; _g = true) {
|
|
119
|
+
_c = rawRes_1_1.value;
|
|
120
|
+
_g = false;
|
|
121
|
+
const value = _c;
|
|
122
|
+
// Convert it to a JSON String
|
|
123
|
+
const jsonString = new TextDecoder().decode((_d = value.chunk) === null || _d === void 0 ? void 0 : _d.bytes);
|
|
124
|
+
// Parse the JSON string
|
|
125
|
+
const parsedResponse = JSON.parse(jsonString);
|
|
126
|
+
if ("amazon-bedrock-invocationMetrics" in parsedResponse) {
|
|
127
|
+
span.setAttribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, parsedResponse["amazon-bedrock-invocationMetrics"]["inputTokenCount"]);
|
|
128
|
+
span.setAttribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, parsedResponse["amazon-bedrock-invocationMetrics"]["outputTokenCount"]);
|
|
129
|
+
span.setAttribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, parsedResponse["amazon-bedrock-invocationMetrics"]["inputTokenCount"] +
|
|
130
|
+
parsedResponse["amazon-bedrock-invocationMetrics"]["outputTokenCount"]);
|
|
131
|
+
}
|
|
132
|
+
let responseAttributes = this._setResponseAttributes(attributes[SpanAttributes.LLM_SYSTEM], parsedResponse, true);
|
|
133
|
+
// ! NOTE: This make sure the content always have all streamed chunks
|
|
134
|
+
if (this._shouldSendPrompts()) {
|
|
135
|
+
// Update local value with attribute value that was set by _setResponseAttributes
|
|
136
|
+
streamedContent +=
|
|
137
|
+
responseAttributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`];
|
|
138
|
+
// re-assign the new value to responseAttributes
|
|
139
|
+
responseAttributes = Object.assign(Object.assign({}, responseAttributes), { [`${SpanAttributes.LLM_COMPLETIONS}.0.content`]: streamedContent });
|
|
140
|
+
}
|
|
141
|
+
span.setAttributes(responseAttributes);
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
145
|
+
finally {
|
|
146
|
+
try {
|
|
147
|
+
if (!_g && !_a && (_b = rawRes_1.return)) yield _b.call(rawRes_1);
|
|
148
|
+
}
|
|
149
|
+
finally { if (e_1) throw e_1.error; }
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
else if (result.body instanceof Object.getPrototypeOf(Uint8Array)) {
|
|
153
|
+
// Convert it to a JSON String
|
|
154
|
+
const jsonString = new TextDecoder().decode(result.body);
|
|
155
|
+
// Parse the JSON string
|
|
156
|
+
const parsedResponse = JSON.parse(jsonString);
|
|
157
|
+
const responseAttributes = this._setResponseAttributes(attributes[SpanAttributes.LLM_SYSTEM], parsedResponse);
|
|
158
|
+
span.setAttributes(responseAttributes);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
catch (e) {
|
|
164
|
+
this._diag.debug(e);
|
|
165
|
+
(_f = (_e = this._config).exceptionLogger) === null || _f === void 0 ? void 0 : _f.call(_e, e);
|
|
166
|
+
}
|
|
167
|
+
span.setStatus({ code: SpanStatusCode.OK });
|
|
168
|
+
span.end();
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
_setRequestAttributes(vendor, requestBody) {
|
|
172
|
+
switch (vendor) {
|
|
173
|
+
case "ai21": {
|
|
174
|
+
return Object.assign({ [SpanAttributes.LLM_REQUEST_TOP_P]: requestBody["topP"], [SpanAttributes.LLM_REQUEST_TEMPERATURE]: requestBody["temperature"], [SpanAttributes.LLM_REQUEST_MAX_TOKENS]: requestBody["maxTokens"], [SpanAttributes.LLM_PRESENCE_PENALTY]: requestBody["presencePenalty"]["scale"], [SpanAttributes.LLM_FREQUENCY_PENALTY]: requestBody["frequencyPenalty"]["scale"] }, (this._shouldSendPrompts()
|
|
175
|
+
? {
|
|
176
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.role`]: "user",
|
|
177
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.content`]: requestBody["prompt"],
|
|
178
|
+
}
|
|
179
|
+
: {}));
|
|
180
|
+
}
|
|
181
|
+
case "amazon": {
|
|
182
|
+
return Object.assign({ [SpanAttributes.LLM_REQUEST_TOP_P]: requestBody["textGenerationConfig"]["topP"], [SpanAttributes.LLM_REQUEST_TEMPERATURE]: requestBody["textGenerationConfig"]["temperature"], [SpanAttributes.LLM_REQUEST_MAX_TOKENS]: requestBody["textGenerationConfig"]["maxTokenCount"] }, (this._shouldSendPrompts()
|
|
183
|
+
? {
|
|
184
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.role`]: "user",
|
|
185
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.content`]: requestBody["inputText"],
|
|
186
|
+
}
|
|
187
|
+
: {}));
|
|
188
|
+
}
|
|
189
|
+
case "anthropic": {
|
|
190
|
+
return Object.assign({ [SpanAttributes.LLM_REQUEST_TOP_P]: requestBody["top_p"], [SpanAttributes.LLM_TOP_K]: requestBody["top_k"], [SpanAttributes.LLM_REQUEST_TEMPERATURE]: requestBody["temperature"], [SpanAttributes.LLM_REQUEST_MAX_TOKENS]: requestBody["max_tokens_to_sample"] }, (this._shouldSendPrompts()
|
|
191
|
+
? {
|
|
192
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.role`]: "user",
|
|
193
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.content`]: requestBody["prompt"]
|
|
194
|
+
// The format is removing when we are setting span attribute
|
|
195
|
+
.replace("\n\nHuman:", "")
|
|
196
|
+
.replace("\n\nAssistant:", ""),
|
|
197
|
+
}
|
|
198
|
+
: {}));
|
|
199
|
+
}
|
|
200
|
+
case "cohere": {
|
|
201
|
+
return Object.assign({ [SpanAttributes.LLM_REQUEST_TOP_P]: requestBody["p"], [SpanAttributes.LLM_TOP_K]: requestBody["k"], [SpanAttributes.LLM_REQUEST_TEMPERATURE]: requestBody["temperature"], [SpanAttributes.LLM_REQUEST_MAX_TOKENS]: requestBody["max_tokens"] }, (this._shouldSendPrompts()
|
|
202
|
+
? {
|
|
203
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.role`]: "user",
|
|
204
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.content`]: requestBody["prompt"],
|
|
205
|
+
}
|
|
206
|
+
: {}));
|
|
207
|
+
}
|
|
208
|
+
case "meta": {
|
|
209
|
+
return Object.assign({ [SpanAttributes.LLM_REQUEST_TOP_P]: requestBody["top_p"], [SpanAttributes.LLM_REQUEST_TEMPERATURE]: requestBody["temperature"], [SpanAttributes.LLM_REQUEST_MAX_TOKENS]: requestBody["max_gen_len"] }, (this._shouldSendPrompts()
|
|
210
|
+
? {
|
|
211
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.role`]: "user",
|
|
212
|
+
[`${SpanAttributes.LLM_PROMPTS}.0.content`]: requestBody["prompt"],
|
|
213
|
+
}
|
|
214
|
+
: {}));
|
|
215
|
+
}
|
|
216
|
+
default:
|
|
217
|
+
return {};
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
_setResponseAttributes(vendor, response, isStream = false) {
|
|
221
|
+
switch (vendor) {
|
|
222
|
+
case "ai21": {
|
|
223
|
+
return Object.assign({ [`${SpanAttributes.LLM_COMPLETIONS}.0.finish_reason`]: response["completions"][0]["finishReason"]["reason"], [`${SpanAttributes.LLM_COMPLETIONS}.0.role`]: "assistant" }, (this._shouldSendPrompts()
|
|
224
|
+
? {
|
|
225
|
+
[`${SpanAttributes.LLM_COMPLETIONS}.0.content`]: response["completions"][0]["data"]["text"],
|
|
226
|
+
}
|
|
227
|
+
: {}));
|
|
228
|
+
}
|
|
229
|
+
case "amazon": {
|
|
230
|
+
return Object.assign({ [`${SpanAttributes.LLM_COMPLETIONS}.0.finish_reason`]: isStream
|
|
231
|
+
? response["completionReason"]
|
|
232
|
+
: response["results"][0]["completionReason"], [`${SpanAttributes.LLM_COMPLETIONS}.0.role`]: "assistant", [SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: response["inputTextTokenCount"], [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: isStream
|
|
233
|
+
? response["totalOutputTextTokenCount"]
|
|
234
|
+
: response["results"][0]["tokenCount"], [SpanAttributes.LLM_USAGE_TOTAL_TOKENS]: isStream
|
|
235
|
+
? response["inputTextTokenCount"] +
|
|
236
|
+
response["totalOutputTextTokenCount"]
|
|
237
|
+
: response["inputTextTokenCount"] +
|
|
238
|
+
response["results"][0]["tokenCount"] }, (this._shouldSendPrompts()
|
|
239
|
+
? {
|
|
240
|
+
[`${SpanAttributes.LLM_COMPLETIONS}.0.content`]: isStream
|
|
241
|
+
? response["outputText"]
|
|
242
|
+
: response["results"][0]["outputText"],
|
|
243
|
+
}
|
|
244
|
+
: {}));
|
|
245
|
+
}
|
|
246
|
+
case "anthropic": {
|
|
247
|
+
return Object.assign({ [`${SpanAttributes.LLM_COMPLETIONS}.0.finish_reason`]: response["stop_reason"], [`${SpanAttributes.LLM_COMPLETIONS}.0.role`]: "assistant" }, (this._shouldSendPrompts()
|
|
248
|
+
? {
|
|
249
|
+
[`${SpanAttributes.LLM_COMPLETIONS}.0.content`]: response["completion"],
|
|
250
|
+
}
|
|
251
|
+
: {}));
|
|
252
|
+
}
|
|
253
|
+
case "cohere": {
|
|
254
|
+
return Object.assign({ [`${SpanAttributes.LLM_COMPLETIONS}.0.finish_reason`]: response["generations"][0]["finish_reason"], [`${SpanAttributes.LLM_COMPLETIONS}.0.role`]: "assistant" }, (this._shouldSendPrompts()
|
|
255
|
+
? {
|
|
256
|
+
[`${SpanAttributes.LLM_COMPLETIONS}.0.content`]: response["generations"][0]["text"],
|
|
257
|
+
}
|
|
258
|
+
: {}));
|
|
259
|
+
}
|
|
260
|
+
case "meta": {
|
|
261
|
+
return Object.assign({ [`${SpanAttributes.LLM_COMPLETIONS}.0.finish_reason`]: response["stop_reason"], [`${SpanAttributes.LLM_COMPLETIONS}.0.role`]: "assistant", [SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: response["prompt_token_count"], [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: response["generation_token_count"], [SpanAttributes.LLM_USAGE_TOTAL_TOKENS]: response["prompt_token_count"] + response["generation_token_count"] }, (this._shouldSendPrompts()
|
|
262
|
+
? {
|
|
263
|
+
[`${SpanAttributes.LLM_COMPLETIONS}.0.content`]: response["generation"],
|
|
264
|
+
}
|
|
265
|
+
: {}));
|
|
266
|
+
}
|
|
267
|
+
default:
|
|
268
|
+
return {};
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
_shouldSendPrompts() {
|
|
272
|
+
const contextShouldSendPrompts = context
|
|
273
|
+
.active()
|
|
274
|
+
.getValue(CONTEXT_KEY_ALLOW_TRACE_CONTENT);
|
|
275
|
+
if (contextShouldSendPrompts !== undefined) {
|
|
276
|
+
return contextShouldSendPrompts;
|
|
277
|
+
}
|
|
278
|
+
return this._config.traceContent !== undefined
|
|
279
|
+
? this._config.traceContent
|
|
280
|
+
: true;
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
export { BedrockInstrumentation };
|
|
285
|
+
//# sourceMappingURL=index.mjs.map
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@traceloop/instrumentation-bedrock",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.11.0",
|
|
4
4
|
"description": "Amazon Bedrock Instrumentation",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"module": "dist/index.mjs",
|
|
@@ -26,6 +26,7 @@
|
|
|
26
26
|
},
|
|
27
27
|
"files": [
|
|
28
28
|
"dist/**/*.js",
|
|
29
|
+
"dist/**/*.mjs",
|
|
29
30
|
"dist/**/*.js.map",
|
|
30
31
|
"dist/**/*.d.ts",
|
|
31
32
|
"doc",
|
|
@@ -40,7 +41,7 @@
|
|
|
40
41
|
"@opentelemetry/core": "^1.25.0",
|
|
41
42
|
"@opentelemetry/instrumentation": "^0.52.0",
|
|
42
43
|
"@opentelemetry/semantic-conventions": "^1.25.0",
|
|
43
|
-
"@traceloop/ai-semantic-conventions": "^0.
|
|
44
|
+
"@traceloop/ai-semantic-conventions": "^0.11.0",
|
|
44
45
|
"tslib": "^2.3.0"
|
|
45
46
|
},
|
|
46
47
|
"devDependencies": {
|
|
@@ -50,5 +51,5 @@
|
|
|
50
51
|
"@pollyjs/persister-fs": "^6.0.6"
|
|
51
52
|
},
|
|
52
53
|
"homepage": "https://github.com/traceloop/openllmetry-js/tree/main/packages/instrumentation-openai",
|
|
53
|
-
"gitHead": "
|
|
54
|
+
"gitHead": "77fe2d5853f90f5a9c1f1fedece8e3e8fc702adf"
|
|
54
55
|
}
|