dd-trace 5.24.0 → 5.26.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE-3rdparty.csv +3 -0
- package/index.d.ts +345 -8
- package/init.js +60 -47
- package/package.json +16 -7
- package/packages/datadog-code-origin/index.js +4 -4
- package/packages/datadog-core/index.js +1 -3
- package/packages/datadog-core/src/storage.js +21 -0
- package/packages/datadog-core/src/utils/src/parse-tags.js +33 -0
- package/packages/datadog-esbuild/index.js +4 -2
- package/packages/datadog-instrumentations/src/amqplib.js +65 -5
- package/packages/datadog-instrumentations/src/child_process.js +135 -27
- package/packages/datadog-instrumentations/src/express.js +1 -1
- package/packages/datadog-instrumentations/src/handlebars.js +40 -0
- package/packages/datadog-instrumentations/src/helpers/hooks.js +5 -0
- package/packages/datadog-instrumentations/src/helpers/register.js +9 -0
- package/packages/datadog-instrumentations/src/jest.js +6 -2
- package/packages/datadog-instrumentations/src/kafkajs.js +123 -63
- package/packages/datadog-instrumentations/src/mocha/utils.js +2 -2
- package/packages/datadog-instrumentations/src/multer.js +37 -0
- package/packages/datadog-instrumentations/src/openai.js +2 -2
- package/packages/datadog-instrumentations/src/pug.js +23 -0
- package/packages/datadog-instrumentations/src/router.js +2 -3
- package/packages/datadog-instrumentations/src/url.js +84 -0
- package/packages/datadog-instrumentations/src/utils/src/extract-package-and-module-path.js +7 -4
- package/packages/datadog-plugin-amqplib/src/consumer.js +6 -5
- package/packages/datadog-plugin-aws-sdk/src/base.js +5 -0
- package/packages/datadog-plugin-aws-sdk/src/services/eventbridge.js +1 -0
- package/packages/datadog-plugin-aws-sdk/src/services/kinesis.js +10 -7
- package/packages/datadog-plugin-aws-sdk/src/services/s3.js +35 -0
- package/packages/datadog-plugin-aws-sdk/src/services/sqs.js +11 -9
- package/packages/datadog-plugin-cypress/src/cypress-plugin.js +59 -45
- package/packages/datadog-plugin-cypress/src/support.js +1 -0
- package/packages/datadog-plugin-fastify/src/code_origin.js +2 -2
- package/packages/datadog-plugin-google-cloud-pubsub/src/consumer.js +10 -2
- package/packages/datadog-plugin-google-cloud-pubsub/src/producer.js +8 -0
- package/packages/datadog-plugin-grpc/src/client.js +3 -0
- package/packages/datadog-plugin-grpc/src/server.js +5 -1
- package/packages/datadog-plugin-http/src/client.js +42 -1
- package/packages/datadog-plugin-http2/src/client.js +26 -1
- package/packages/datadog-plugin-jest/src/index.js +2 -1
- package/packages/datadog-plugin-kafkajs/src/batch-consumer.js +6 -3
- package/packages/datadog-plugin-kafkajs/src/consumer.js +10 -5
- package/packages/datadog-plugin-kafkajs/src/producer.js +10 -4
- package/packages/datadog-plugin-mocha/src/index.js +5 -2
- package/packages/datadog-plugin-moleculer/src/server.js +2 -2
- package/packages/datadog-plugin-openai/src/index.js +9 -1015
- package/packages/datadog-plugin-openai/src/tracing.js +1023 -0
- package/packages/datadog-plugin-rhea/src/consumer.js +2 -1
- package/packages/datadog-plugin-vitest/src/index.js +2 -1
- package/packages/dd-trace/src/appsec/addresses.js +2 -0
- package/packages/dd-trace/src/appsec/api_security_sampler.js +50 -27
- package/packages/dd-trace/src/appsec/channels.js +3 -1
- package/packages/dd-trace/src/appsec/iast/analyzers/analyzers.js +1 -0
- package/packages/dd-trace/src/appsec/iast/analyzers/header-injection-analyzer.js +33 -16
- package/packages/dd-trace/src/appsec/iast/analyzers/template-injection-analyzer.js +18 -0
- package/packages/dd-trace/src/appsec/iast/taint-tracking/plugin.js +55 -7
- package/packages/dd-trace/src/appsec/iast/vulnerabilities-formatter/evidence-redaction/sensitive-handler.js +3 -2
- package/packages/dd-trace/src/appsec/iast/vulnerabilities.js +1 -0
- package/packages/dd-trace/src/appsec/iast/vulnerability-reporter.js +4 -2
- package/packages/dd-trace/src/appsec/index.js +9 -6
- package/packages/dd-trace/src/appsec/rasp/command_injection.js +49 -0
- package/packages/dd-trace/src/appsec/rasp/index.js +3 -0
- package/packages/dd-trace/src/appsec/rasp/ssrf.js +4 -3
- package/packages/dd-trace/src/appsec/rasp/utils.js +3 -2
- package/packages/dd-trace/src/appsec/recommended.json +354 -158
- package/packages/dd-trace/src/appsec/remote_config/capabilities.js +2 -1
- package/packages/dd-trace/src/appsec/remote_config/index.js +2 -7
- package/packages/dd-trace/src/appsec/reporter.js +6 -4
- package/packages/dd-trace/src/appsec/sdk/track_event.js +5 -3
- package/packages/dd-trace/src/appsec/waf/waf_manager.js +4 -0
- package/packages/dd-trace/src/azure_metadata.js +120 -0
- package/packages/dd-trace/src/ci-visibility/dynamic-instrumentation/index.js +97 -0
- package/packages/dd-trace/src/ci-visibility/dynamic-instrumentation/worker/index.js +90 -0
- package/packages/dd-trace/src/ci-visibility/exporters/agent-proxy/index.js +19 -1
- package/packages/dd-trace/src/ci-visibility/exporters/agentless/di-logs-writer.js +53 -0
- package/packages/dd-trace/src/ci-visibility/exporters/agentless/index.js +8 -1
- package/packages/dd-trace/src/ci-visibility/exporters/ci-visibility-exporter.js +43 -0
- package/packages/dd-trace/src/config.js +88 -10
- package/packages/dd-trace/src/constants.js +8 -1
- package/packages/dd-trace/src/crashtracking/crashtracker.js +98 -0
- package/packages/dd-trace/src/crashtracking/index.js +15 -0
- package/packages/dd-trace/src/crashtracking/noop.js +8 -0
- package/packages/dd-trace/src/datastreams/pathway.js +1 -0
- package/packages/dd-trace/src/debugger/devtools_client/index.js +9 -13
- package/packages/dd-trace/src/debugger/devtools_client/send.js +15 -1
- package/packages/dd-trace/src/debugger/devtools_client/snapshot/collector.js +57 -23
- package/packages/dd-trace/src/debugger/devtools_client/snapshot/index.js +12 -2
- package/packages/dd-trace/src/debugger/devtools_client/snapshot/processor.js +31 -20
- package/packages/dd-trace/src/debugger/devtools_client/snapshot/symbols.js +6 -0
- package/packages/dd-trace/src/debugger/devtools_client/state.js +11 -2
- package/packages/dd-trace/src/debugger/index.js +10 -3
- package/packages/dd-trace/src/llmobs/constants/tags.js +34 -0
- package/packages/dd-trace/src/llmobs/constants/text.js +6 -0
- package/packages/dd-trace/src/llmobs/constants/writers.js +13 -0
- package/packages/dd-trace/src/llmobs/index.js +103 -0
- package/packages/dd-trace/src/llmobs/noop.js +82 -0
- package/packages/dd-trace/src/llmobs/plugins/base.js +65 -0
- package/packages/dd-trace/src/llmobs/plugins/openai.js +205 -0
- package/packages/dd-trace/src/llmobs/sdk.js +377 -0
- package/packages/dd-trace/src/llmobs/span_processor.js +195 -0
- package/packages/dd-trace/src/llmobs/storage.js +7 -0
- package/packages/dd-trace/src/llmobs/tagger.js +322 -0
- package/packages/dd-trace/src/llmobs/util.js +176 -0
- package/packages/dd-trace/src/llmobs/writers/base.js +111 -0
- package/packages/dd-trace/src/llmobs/writers/evaluations.js +29 -0
- package/packages/dd-trace/src/llmobs/writers/spans/agentProxy.js +23 -0
- package/packages/dd-trace/src/llmobs/writers/spans/agentless.js +17 -0
- package/packages/dd-trace/src/llmobs/writers/spans/base.js +52 -0
- package/packages/dd-trace/src/log/index.js +10 -13
- package/packages/dd-trace/src/log/log.js +52 -0
- package/packages/dd-trace/src/log/writer.js +50 -19
- package/packages/dd-trace/src/noop/proxy.js +3 -0
- package/packages/dd-trace/src/noop/span.js +4 -0
- package/packages/dd-trace/src/opentelemetry/span.js +16 -1
- package/packages/dd-trace/src/opentelemetry/tracer.js +1 -0
- package/packages/dd-trace/src/opentracing/propagation/text_map.js +106 -32
- package/packages/dd-trace/src/opentracing/span.js +26 -0
- package/packages/dd-trace/src/opentracing/span_context.js +1 -0
- package/packages/dd-trace/src/opentracing/tracer.js +8 -1
- package/packages/dd-trace/src/payload-tagging/config/aws.json +71 -3
- package/packages/dd-trace/src/plugins/outbound.js +9 -0
- package/packages/dd-trace/src/plugins/tracing.js +3 -3
- package/packages/dd-trace/src/plugins/util/inferred_proxy.js +121 -0
- package/packages/dd-trace/src/plugins/util/ip_extractor.js +0 -1
- package/packages/dd-trace/src/plugins/util/web.js +39 -11
- package/packages/dd-trace/src/priority_sampler.js +16 -0
- package/packages/dd-trace/src/profiling/config.js +3 -1
- package/packages/dd-trace/src/profiling/exporters/agent.js +7 -5
- package/packages/dd-trace/src/profiling/profilers/wall.js +2 -1
- package/packages/dd-trace/src/proxy.js +13 -1
- package/packages/dd-trace/src/span_processor.js +5 -0
- package/packages/dd-trace/src/telemetry/index.js +11 -1
- package/packages/dd-trace/src/telemetry/logs/index.js +16 -11
- package/packages/dd-trace/src/telemetry/logs/log-collector.js +3 -8
- package/packages/dd-trace/src/telemetry/metrics.js +6 -1
- package/packages/dd-trace/src/util.js +16 -1
- package/version.js +4 -2
- /package/packages/dd-trace/src/appsec/iast/vulnerabilities-formatter/evidence-redaction/sensitive-analyzers/{code-injection-sensitive-analyzer.js → tainted-range-based-sensitive-analyzer.js} +0 -0
|
@@ -0,0 +1,1023 @@
|
|
|
1
|
+
'use strict'
|
|
2
|
+
|
|
3
|
+
const path = require('path')
|
|
4
|
+
|
|
5
|
+
const TracingPlugin = require('../../dd-trace/src/plugins/tracing')
|
|
6
|
+
const { storage } = require('../../datadog-core')
|
|
7
|
+
const services = require('./services')
|
|
8
|
+
const Sampler = require('../../dd-trace/src/sampler')
|
|
9
|
+
const { MEASURED } = require('../../../ext/tags')
|
|
10
|
+
const { estimateTokens } = require('./token-estimator')
|
|
11
|
+
|
|
12
|
+
// String#replaceAll unavailable on Node.js@v14 (dd-trace@<=v3)
|
|
13
|
+
const RE_NEWLINE = /\n/g
|
|
14
|
+
const RE_TAB = /\t/g
|
|
15
|
+
|
|
16
|
+
// TODO: In the future we should refactor config.js to make it requirable
|
|
17
|
+
let MAX_TEXT_LEN = 128
|
|
18
|
+
|
|
19
|
+
function safeRequire (path) {
|
|
20
|
+
try {
|
|
21
|
+
// eslint-disable-next-line import/no-extraneous-dependencies
|
|
22
|
+
return require(path)
|
|
23
|
+
} catch {
|
|
24
|
+
return null
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const encodingForModel = safeRequire('tiktoken')?.encoding_for_model
|
|
29
|
+
|
|
30
|
+
class OpenAiTracingPlugin extends TracingPlugin {
|
|
31
|
+
static get id () { return 'openai' }
|
|
32
|
+
static get operation () { return 'request' }
|
|
33
|
+
static get system () { return 'openai' }
|
|
34
|
+
static get prefix () {
|
|
35
|
+
return 'tracing:apm:openai:request'
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
constructor (...args) {
|
|
39
|
+
super(...args)
|
|
40
|
+
|
|
41
|
+
const { metrics, logger } = services.init(this._tracerConfig)
|
|
42
|
+
this.metrics = metrics
|
|
43
|
+
this.logger = logger
|
|
44
|
+
|
|
45
|
+
this.sampler = new Sampler(0.1) // default 10% log sampling
|
|
46
|
+
|
|
47
|
+
// hoist the max length env var to avoid making all of these functions a class method
|
|
48
|
+
if (this._tracerConfig) {
|
|
49
|
+
MAX_TEXT_LEN = this._tracerConfig.openaiSpanCharLimit
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
configure (config) {
|
|
54
|
+
if (config.enabled === false) {
|
|
55
|
+
services.shutdown()
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
super.configure(config)
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
bindStart (ctx) {
|
|
62
|
+
const { methodName, args, basePath, apiKey } = ctx
|
|
63
|
+
const payload = normalizeRequestPayload(methodName, args)
|
|
64
|
+
const store = storage.getStore() || {}
|
|
65
|
+
|
|
66
|
+
const span = this.startSpan('openai.request', {
|
|
67
|
+
service: this.config.service,
|
|
68
|
+
resource: methodName,
|
|
69
|
+
type: 'openai',
|
|
70
|
+
kind: 'client',
|
|
71
|
+
meta: {
|
|
72
|
+
[MEASURED]: 1,
|
|
73
|
+
// Data that is always available with a request
|
|
74
|
+
'openai.user.api_key': truncateApiKey(apiKey),
|
|
75
|
+
'openai.api_base': basePath,
|
|
76
|
+
// The openai.api_type (openai|azure) is present in Python but not in Node.js
|
|
77
|
+
// Add support once https://github.com/openai/openai-node/issues/53 is closed
|
|
78
|
+
|
|
79
|
+
// Data that is common across many requests
|
|
80
|
+
'openai.request.best_of': payload.best_of,
|
|
81
|
+
'openai.request.echo': payload.echo,
|
|
82
|
+
'openai.request.logprobs': payload.logprobs,
|
|
83
|
+
'openai.request.max_tokens': payload.max_tokens,
|
|
84
|
+
'openai.request.model': payload.model, // vague model
|
|
85
|
+
'openai.request.n': payload.n,
|
|
86
|
+
'openai.request.presence_penalty': payload.presence_penalty,
|
|
87
|
+
'openai.request.frequency_penalty': payload.frequency_penalty,
|
|
88
|
+
'openai.request.stop': payload.stop,
|
|
89
|
+
'openai.request.suffix': payload.suffix,
|
|
90
|
+
'openai.request.temperature': payload.temperature,
|
|
91
|
+
'openai.request.top_p': payload.top_p,
|
|
92
|
+
'openai.request.user': payload.user,
|
|
93
|
+
'openai.request.file_id': payload.file_id // deleteFile, retrieveFile, downloadFile
|
|
94
|
+
}
|
|
95
|
+
}, false)
|
|
96
|
+
|
|
97
|
+
const openaiStore = Object.create(null)
|
|
98
|
+
|
|
99
|
+
const tags = {} // The remaining tags are added one at a time
|
|
100
|
+
|
|
101
|
+
// createChatCompletion, createCompletion, createImage, createImageEdit, createTranscription, createTranslation
|
|
102
|
+
if (payload.prompt) {
|
|
103
|
+
const prompt = payload.prompt
|
|
104
|
+
openaiStore.prompt = prompt
|
|
105
|
+
if (typeof prompt === 'string' || (Array.isArray(prompt) && typeof prompt[0] === 'number')) {
|
|
106
|
+
// This is a single prompt, either String or [Number]
|
|
107
|
+
tags['openai.request.prompt'] = normalizeStringOrTokenArray(prompt, true)
|
|
108
|
+
} else if (Array.isArray(prompt)) {
|
|
109
|
+
// This is multiple prompts, either [String] or [[Number]]
|
|
110
|
+
for (let i = 0; i < prompt.length; i++) {
|
|
111
|
+
tags[`openai.request.prompt.${i}`] = normalizeStringOrTokenArray(prompt[i], true)
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// createEdit, createEmbedding, createModeration
|
|
117
|
+
if (payload.input) {
|
|
118
|
+
const normalized = normalizeStringOrTokenArray(payload.input, false)
|
|
119
|
+
tags['openai.request.input'] = truncateText(normalized)
|
|
120
|
+
openaiStore.input = normalized
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// createChatCompletion, createCompletion
|
|
124
|
+
if (payload.logit_bias !== null && typeof payload.logit_bias === 'object') {
|
|
125
|
+
for (const [tokenId, bias] of Object.entries(payload.logit_bias)) {
|
|
126
|
+
tags[`openai.request.logit_bias.${tokenId}`] = bias
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
if (payload.stream) {
|
|
131
|
+
tags['openai.request.stream'] = payload.stream
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
switch (methodName) {
|
|
135
|
+
case 'createFineTune':
|
|
136
|
+
case 'fine_tuning.jobs.create':
|
|
137
|
+
case 'fine-tune.create':
|
|
138
|
+
createFineTuneRequestExtraction(tags, payload)
|
|
139
|
+
break
|
|
140
|
+
|
|
141
|
+
case 'createImage':
|
|
142
|
+
case 'images.generate':
|
|
143
|
+
case 'createImageEdit':
|
|
144
|
+
case 'images.edit':
|
|
145
|
+
case 'createImageVariation':
|
|
146
|
+
case 'images.createVariation':
|
|
147
|
+
commonCreateImageRequestExtraction(tags, payload, openaiStore)
|
|
148
|
+
break
|
|
149
|
+
|
|
150
|
+
case 'createChatCompletion':
|
|
151
|
+
case 'chat.completions.create':
|
|
152
|
+
createChatCompletionRequestExtraction(tags, payload, openaiStore)
|
|
153
|
+
break
|
|
154
|
+
|
|
155
|
+
case 'createFile':
|
|
156
|
+
case 'files.create':
|
|
157
|
+
case 'retrieveFile':
|
|
158
|
+
case 'files.retrieve':
|
|
159
|
+
commonFileRequestExtraction(tags, payload)
|
|
160
|
+
break
|
|
161
|
+
|
|
162
|
+
case 'createTranscription':
|
|
163
|
+
case 'audio.transcriptions.create':
|
|
164
|
+
case 'createTranslation':
|
|
165
|
+
case 'audio.translations.create':
|
|
166
|
+
commonCreateAudioRequestExtraction(tags, payload, openaiStore)
|
|
167
|
+
break
|
|
168
|
+
|
|
169
|
+
case 'retrieveModel':
|
|
170
|
+
case 'models.retrieve':
|
|
171
|
+
retrieveModelRequestExtraction(tags, payload)
|
|
172
|
+
break
|
|
173
|
+
|
|
174
|
+
case 'listFineTuneEvents':
|
|
175
|
+
case 'fine_tuning.jobs.listEvents':
|
|
176
|
+
case 'fine-tune.listEvents':
|
|
177
|
+
case 'retrieveFineTune':
|
|
178
|
+
case 'fine_tuning.jobs.retrieve':
|
|
179
|
+
case 'fine-tune.retrieve':
|
|
180
|
+
case 'deleteModel':
|
|
181
|
+
case 'models.del':
|
|
182
|
+
case 'cancelFineTune':
|
|
183
|
+
case 'fine_tuning.jobs.cancel':
|
|
184
|
+
case 'fine-tune.cancel':
|
|
185
|
+
commonLookupFineTuneRequestExtraction(tags, payload)
|
|
186
|
+
break
|
|
187
|
+
|
|
188
|
+
case 'createEdit':
|
|
189
|
+
case 'edits.create':
|
|
190
|
+
createEditRequestExtraction(tags, payload, openaiStore)
|
|
191
|
+
break
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
span.addTags(tags)
|
|
195
|
+
|
|
196
|
+
ctx.currentStore = { ...store, span, openai: openaiStore }
|
|
197
|
+
|
|
198
|
+
return ctx.currentStore
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
asyncEnd (ctx) {
|
|
202
|
+
const { result } = ctx
|
|
203
|
+
const store = ctx.currentStore
|
|
204
|
+
|
|
205
|
+
const span = store?.span
|
|
206
|
+
if (!span) return
|
|
207
|
+
|
|
208
|
+
const error = !!span.context()._tags.error
|
|
209
|
+
|
|
210
|
+
let headers, body, method, path
|
|
211
|
+
if (!error) {
|
|
212
|
+
headers = result.headers
|
|
213
|
+
body = result.data
|
|
214
|
+
method = result.request.method
|
|
215
|
+
path = result.request.path
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
if (!error && headers?.constructor.name === 'Headers') {
|
|
219
|
+
headers = Object.fromEntries(headers)
|
|
220
|
+
}
|
|
221
|
+
const methodName = span._spanContext._tags['resource.name']
|
|
222
|
+
|
|
223
|
+
body = coerceResponseBody(body, methodName)
|
|
224
|
+
|
|
225
|
+
const openaiStore = store.openai
|
|
226
|
+
|
|
227
|
+
if (!error && (path?.startsWith('https://') || path?.startsWith('http://'))) {
|
|
228
|
+
// basic checking for if the path was set as a full URL
|
|
229
|
+
// not using a full regex as it will likely be "https://api.openai.com/..."
|
|
230
|
+
path = new URL(path).pathname
|
|
231
|
+
}
|
|
232
|
+
const endpoint = lookupOperationEndpoint(methodName, path)
|
|
233
|
+
|
|
234
|
+
const tags = error
|
|
235
|
+
? {}
|
|
236
|
+
: {
|
|
237
|
+
'openai.request.endpoint': endpoint,
|
|
238
|
+
'openai.request.method': method.toUpperCase(),
|
|
239
|
+
|
|
240
|
+
'openai.organization.id': body.organization_id, // only available in fine-tunes endpoints
|
|
241
|
+
'openai.organization.name': headers['openai-organization'],
|
|
242
|
+
|
|
243
|
+
'openai.response.model': headers['openai-model'] || body.model, // specific model, often undefined
|
|
244
|
+
'openai.response.id': body.id, // common creation value, numeric epoch
|
|
245
|
+
'openai.response.deleted': body.deleted, // common boolean field in delete responses
|
|
246
|
+
|
|
247
|
+
// The OpenAI API appears to use both created and created_at in different places
|
|
248
|
+
// Here we're conciously choosing to surface this inconsistency instead of normalizing
|
|
249
|
+
'openai.response.created': body.created,
|
|
250
|
+
'openai.response.created_at': body.created_at
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
responseDataExtractionByMethod(methodName, tags, body, openaiStore)
|
|
254
|
+
span.addTags(tags)
|
|
255
|
+
|
|
256
|
+
span.finish()
|
|
257
|
+
this.sendLog(methodName, span, tags, openaiStore, error)
|
|
258
|
+
this.sendMetrics(headers, body, endpoint, span._duration, error, tags)
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
sendMetrics (headers, body, endpoint, duration, error, spanTags) {
|
|
262
|
+
const tags = [`error:${Number(!!error)}`]
|
|
263
|
+
if (error) {
|
|
264
|
+
this.metrics.increment('openai.request.error', 1, tags)
|
|
265
|
+
} else {
|
|
266
|
+
tags.push(`org:${headers['openai-organization']}`)
|
|
267
|
+
tags.push(`endpoint:${endpoint}`) // just "/v1/models", no method
|
|
268
|
+
tags.push(`model:${headers['openai-model'] || body.model}`)
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
this.metrics.distribution('openai.request.duration', duration * 1000, tags)
|
|
272
|
+
|
|
273
|
+
const promptTokens = spanTags['openai.response.usage.prompt_tokens']
|
|
274
|
+
const promptTokensEstimated = spanTags['openai.response.usage.prompt_tokens_estimated']
|
|
275
|
+
|
|
276
|
+
const completionTokens = spanTags['openai.response.usage.completion_tokens']
|
|
277
|
+
const completionTokensEstimated = spanTags['openai.response.usage.completion_tokens_estimated']
|
|
278
|
+
|
|
279
|
+
const totalTokens = spanTags['openai.response.usage.total_tokens']
|
|
280
|
+
|
|
281
|
+
if (!error) {
|
|
282
|
+
if (promptTokens != null) {
|
|
283
|
+
if (promptTokensEstimated) {
|
|
284
|
+
this.metrics.distribution(
|
|
285
|
+
'openai.tokens.prompt', promptTokens, [...tags, 'openai.estimated:true'])
|
|
286
|
+
} else {
|
|
287
|
+
this.metrics.distribution('openai.tokens.prompt', promptTokens, tags)
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
if (completionTokens != null) {
|
|
292
|
+
if (completionTokensEstimated) {
|
|
293
|
+
this.metrics.distribution(
|
|
294
|
+
'openai.tokens.completion', completionTokens, [...tags, 'openai.estimated:true'])
|
|
295
|
+
} else {
|
|
296
|
+
this.metrics.distribution('openai.tokens.completion', completionTokens, tags)
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
if (totalTokens != null) {
|
|
301
|
+
if (promptTokensEstimated || completionTokensEstimated) {
|
|
302
|
+
this.metrics.distribution(
|
|
303
|
+
'openai.tokens.total', totalTokens, [...tags, 'openai.estimated:true'])
|
|
304
|
+
} else {
|
|
305
|
+
this.metrics.distribution('openai.tokens.total', totalTokens, tags)
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
if (headers) {
|
|
311
|
+
if (headers['x-ratelimit-limit-requests']) {
|
|
312
|
+
this.metrics.gauge('openai.ratelimit.requests', Number(headers['x-ratelimit-limit-requests']), tags)
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
if (headers['x-ratelimit-remaining-requests']) {
|
|
316
|
+
this.metrics.gauge(
|
|
317
|
+
'openai.ratelimit.remaining.requests', Number(headers['x-ratelimit-remaining-requests']), tags
|
|
318
|
+
)
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
if (headers['x-ratelimit-limit-tokens']) {
|
|
322
|
+
this.metrics.gauge('openai.ratelimit.tokens', Number(headers['x-ratelimit-limit-tokens']), tags)
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
if (headers['x-ratelimit-remaining-tokens']) {
|
|
326
|
+
this.metrics.gauge('openai.ratelimit.remaining.tokens', Number(headers['x-ratelimit-remaining-tokens']), tags)
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
sendLog (methodName, span, tags, openaiStore, error) {
|
|
332
|
+
if (!openaiStore) return
|
|
333
|
+
if (!Object.keys(openaiStore).length) return
|
|
334
|
+
if (!this.sampler.isSampled()) return
|
|
335
|
+
|
|
336
|
+
const log = {
|
|
337
|
+
status: error ? 'error' : 'info',
|
|
338
|
+
message: `sampled ${methodName}`,
|
|
339
|
+
...openaiStore
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
this.logger.log(log, span, tags)
|
|
343
|
+
}
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
function countPromptTokens (methodName, payload, model) {
|
|
347
|
+
let promptTokens = 0
|
|
348
|
+
let promptEstimated = false
|
|
349
|
+
if (methodName === 'chat.completions.create') {
|
|
350
|
+
const messages = payload.messages
|
|
351
|
+
for (const message of messages) {
|
|
352
|
+
const content = message.content
|
|
353
|
+
if (typeof content === 'string') {
|
|
354
|
+
const { tokens, estimated } = countTokens(content, model)
|
|
355
|
+
promptTokens += tokens
|
|
356
|
+
promptEstimated = estimated
|
|
357
|
+
} else if (Array.isArray(content)) {
|
|
358
|
+
for (const c of content) {
|
|
359
|
+
if (c.type === 'text') {
|
|
360
|
+
const { tokens, estimated } = countTokens(c.text, model)
|
|
361
|
+
promptTokens += tokens
|
|
362
|
+
promptEstimated = estimated
|
|
363
|
+
}
|
|
364
|
+
// unsupported token computation for image_url
|
|
365
|
+
// as even though URL is a string, its true token count
|
|
366
|
+
// is based on the image itself, something onerous to do client-side
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
} else if (methodName === 'completions.create') {
|
|
371
|
+
let prompt = payload.prompt
|
|
372
|
+
if (!Array.isArray(prompt)) prompt = [prompt]
|
|
373
|
+
|
|
374
|
+
for (const p of prompt) {
|
|
375
|
+
const { tokens, estimated } = countTokens(p, model)
|
|
376
|
+
promptTokens += tokens
|
|
377
|
+
promptEstimated = estimated
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
return { promptTokens, promptEstimated }
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
function countCompletionTokens (body, model) {
|
|
385
|
+
let completionTokens = 0
|
|
386
|
+
let completionEstimated = false
|
|
387
|
+
if (body?.choices) {
|
|
388
|
+
for (const choice of body.choices) {
|
|
389
|
+
const message = choice.message || choice.delta // delta for streamed responses
|
|
390
|
+
const text = choice.text
|
|
391
|
+
const content = text || message?.content
|
|
392
|
+
|
|
393
|
+
const { tokens, estimated } = countTokens(content, model)
|
|
394
|
+
completionTokens += tokens
|
|
395
|
+
completionEstimated = estimated
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
return { completionTokens, completionEstimated }
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
function countTokens (content, model) {
|
|
403
|
+
if (encodingForModel) {
|
|
404
|
+
try {
|
|
405
|
+
// try using tiktoken if it was available
|
|
406
|
+
const encoder = encodingForModel(model)
|
|
407
|
+
const tokens = encoder.encode(content).length
|
|
408
|
+
encoder.free()
|
|
409
|
+
return { tokens, estimated: false }
|
|
410
|
+
} catch {
|
|
411
|
+
// possible errors from tiktoken:
|
|
412
|
+
// * model not available for token counts
|
|
413
|
+
// * issue encoding content
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
return {
|
|
418
|
+
tokens: estimateTokens(content),
|
|
419
|
+
estimated: true
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
function createEditRequestExtraction (tags, payload, openaiStore) {
|
|
424
|
+
const instruction = payload.instruction
|
|
425
|
+
tags['openai.request.instruction'] = instruction
|
|
426
|
+
openaiStore.instruction = instruction
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
function retrieveModelRequestExtraction (tags, payload) {
|
|
430
|
+
tags['openai.request.id'] = payload.id
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
function createChatCompletionRequestExtraction (tags, payload, openaiStore) {
|
|
434
|
+
const messages = payload.messages
|
|
435
|
+
if (!defensiveArrayLength(messages)) return
|
|
436
|
+
|
|
437
|
+
openaiStore.messages = payload.messages
|
|
438
|
+
for (let i = 0; i < payload.messages.length; i++) {
|
|
439
|
+
const message = payload.messages[i]
|
|
440
|
+
tagChatCompletionRequestContent(message.content, i, tags)
|
|
441
|
+
tags[`openai.request.messages.${i}.role`] = message.role
|
|
442
|
+
tags[`openai.request.messages.${i}.name`] = message.name
|
|
443
|
+
tags[`openai.request.messages.${i}.finish_reason`] = message.finish_reason
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
function commonCreateImageRequestExtraction (tags, payload, openaiStore) {
|
|
448
|
+
// createImageEdit, createImageVariation
|
|
449
|
+
const img = payload.file || payload.image
|
|
450
|
+
if (img !== null && typeof img === 'object' && img.path) {
|
|
451
|
+
const file = path.basename(img.path)
|
|
452
|
+
tags['openai.request.image'] = file
|
|
453
|
+
openaiStore.file = file
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
// createImageEdit
|
|
457
|
+
if (payload.mask !== null && typeof payload.mask === 'object' && payload.mask.path) {
|
|
458
|
+
const mask = path.basename(payload.mask.path)
|
|
459
|
+
tags['openai.request.mask'] = mask
|
|
460
|
+
openaiStore.mask = mask
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
tags['openai.request.size'] = payload.size
|
|
464
|
+
tags['openai.request.response_format'] = payload.response_format
|
|
465
|
+
tags['openai.request.language'] = payload.language
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
function responseDataExtractionByMethod (methodName, tags, body, openaiStore) {
|
|
469
|
+
switch (methodName) {
|
|
470
|
+
case 'createModeration':
|
|
471
|
+
case 'moderations.create':
|
|
472
|
+
createModerationResponseExtraction(tags, body)
|
|
473
|
+
break
|
|
474
|
+
|
|
475
|
+
case 'createCompletion':
|
|
476
|
+
case 'completions.create':
|
|
477
|
+
case 'createChatCompletion':
|
|
478
|
+
case 'chat.completions.create':
|
|
479
|
+
case 'createEdit':
|
|
480
|
+
case 'edits.create':
|
|
481
|
+
commonCreateResponseExtraction(tags, body, openaiStore, methodName)
|
|
482
|
+
break
|
|
483
|
+
|
|
484
|
+
case 'listFiles':
|
|
485
|
+
case 'files.list':
|
|
486
|
+
case 'listFineTunes':
|
|
487
|
+
case 'fine_tuning.jobs.list':
|
|
488
|
+
case 'fine-tune.list':
|
|
489
|
+
case 'listFineTuneEvents':
|
|
490
|
+
case 'fine_tuning.jobs.listEvents':
|
|
491
|
+
case 'fine-tune.listEvents':
|
|
492
|
+
commonListCountResponseExtraction(tags, body)
|
|
493
|
+
break
|
|
494
|
+
|
|
495
|
+
case 'createEmbedding':
|
|
496
|
+
case 'embeddings.create':
|
|
497
|
+
createEmbeddingResponseExtraction(tags, body, openaiStore)
|
|
498
|
+
break
|
|
499
|
+
|
|
500
|
+
case 'createFile':
|
|
501
|
+
case 'files.create':
|
|
502
|
+
case 'retrieveFile':
|
|
503
|
+
case 'files.retrieve':
|
|
504
|
+
createRetrieveFileResponseExtraction(tags, body)
|
|
505
|
+
break
|
|
506
|
+
|
|
507
|
+
case 'deleteFile':
|
|
508
|
+
case 'files.del':
|
|
509
|
+
deleteFileResponseExtraction(tags, body)
|
|
510
|
+
break
|
|
511
|
+
|
|
512
|
+
case 'downloadFile':
|
|
513
|
+
case 'files.retrieveContent':
|
|
514
|
+
case 'files.content':
|
|
515
|
+
downloadFileResponseExtraction(tags, body)
|
|
516
|
+
break
|
|
517
|
+
|
|
518
|
+
case 'createFineTune':
|
|
519
|
+
case 'fine_tuning.jobs.create':
|
|
520
|
+
case 'fine-tune.create':
|
|
521
|
+
case 'retrieveFineTune':
|
|
522
|
+
case 'fine_tuning.jobs.retrieve':
|
|
523
|
+
case 'fine-tune.retrieve':
|
|
524
|
+
case 'cancelFineTune':
|
|
525
|
+
case 'fine_tuning.jobs.cancel':
|
|
526
|
+
case 'fine-tune.cancel':
|
|
527
|
+
commonFineTuneResponseExtraction(tags, body)
|
|
528
|
+
break
|
|
529
|
+
|
|
530
|
+
case 'createTranscription':
|
|
531
|
+
case 'audio.transcriptions.create':
|
|
532
|
+
case 'createTranslation':
|
|
533
|
+
case 'audio.translations.create':
|
|
534
|
+
createAudioResponseExtraction(tags, body)
|
|
535
|
+
break
|
|
536
|
+
|
|
537
|
+
case 'createImage':
|
|
538
|
+
case 'images.generate':
|
|
539
|
+
case 'createImageEdit':
|
|
540
|
+
case 'images.edit':
|
|
541
|
+
case 'createImageVariation':
|
|
542
|
+
case 'images.createVariation':
|
|
543
|
+
commonImageResponseExtraction(tags, body)
|
|
544
|
+
break
|
|
545
|
+
|
|
546
|
+
case 'listModels':
|
|
547
|
+
case 'models.list':
|
|
548
|
+
listModelsResponseExtraction(tags, body)
|
|
549
|
+
break
|
|
550
|
+
|
|
551
|
+
case 'retrieveModel':
|
|
552
|
+
case 'models.retrieve':
|
|
553
|
+
retrieveModelResponseExtraction(tags, body)
|
|
554
|
+
break
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
function retrieveModelResponseExtraction (tags, body) {
|
|
559
|
+
tags['openai.response.owned_by'] = body.owned_by
|
|
560
|
+
tags['openai.response.parent'] = body.parent
|
|
561
|
+
tags['openai.response.root'] = body.root
|
|
562
|
+
|
|
563
|
+
if (!body.permission) return
|
|
564
|
+
|
|
565
|
+
tags['openai.response.permission.id'] = body.permission[0].id
|
|
566
|
+
tags['openai.response.permission.created'] = body.permission[0].created
|
|
567
|
+
tags['openai.response.permission.allow_create_engine'] = body.permission[0].allow_create_engine
|
|
568
|
+
tags['openai.response.permission.allow_sampling'] = body.permission[0].allow_sampling
|
|
569
|
+
tags['openai.response.permission.allow_logprobs'] = body.permission[0].allow_logprobs
|
|
570
|
+
tags['openai.response.permission.allow_search_indices'] = body.permission[0].allow_search_indices
|
|
571
|
+
tags['openai.response.permission.allow_view'] = body.permission[0].allow_view
|
|
572
|
+
tags['openai.response.permission.allow_fine_tuning'] = body.permission[0].allow_fine_tuning
|
|
573
|
+
tags['openai.response.permission.organization'] = body.permission[0].organization
|
|
574
|
+
tags['openai.response.permission.group'] = body.permission[0].group
|
|
575
|
+
tags['openai.response.permission.is_blocking'] = body.permission[0].is_blocking
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
function commonLookupFineTuneRequestExtraction (tags, body) {
|
|
579
|
+
tags['openai.request.fine_tune_id'] = body.fine_tune_id
|
|
580
|
+
tags['openai.request.stream'] = !!body.stream // listFineTuneEvents
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
function listModelsResponseExtraction (tags, body) {
|
|
584
|
+
if (!body.data) return
|
|
585
|
+
|
|
586
|
+
tags['openai.response.count'] = body.data.length
|
|
587
|
+
}
|
|
588
|
+
|
|
589
|
+
function commonImageResponseExtraction (tags, body) {
|
|
590
|
+
if (!body.data) return
|
|
591
|
+
|
|
592
|
+
tags['openai.response.images_count'] = body.data.length
|
|
593
|
+
|
|
594
|
+
for (let i = 0; i < body.data.length; i++) {
|
|
595
|
+
const image = body.data[i]
|
|
596
|
+
// exactly one of these two options is provided
|
|
597
|
+
tags[`openai.response.images.${i}.url`] = truncateText(image.url)
|
|
598
|
+
tags[`openai.response.images.${i}.b64_json`] = image.b64_json && 'returned'
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
function createAudioResponseExtraction (tags, body) {
|
|
603
|
+
tags['openai.response.text'] = body.text
|
|
604
|
+
tags['openai.response.language'] = body.language
|
|
605
|
+
tags['openai.response.duration'] = body.duration
|
|
606
|
+
tags['openai.response.segments_count'] = defensiveArrayLength(body.segments)
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
function createFineTuneRequestExtraction (tags, body) {
|
|
610
|
+
tags['openai.request.training_file'] = body.training_file
|
|
611
|
+
tags['openai.request.validation_file'] = body.validation_file
|
|
612
|
+
tags['openai.request.n_epochs'] = body.n_epochs
|
|
613
|
+
tags['openai.request.batch_size'] = body.batch_size
|
|
614
|
+
tags['openai.request.learning_rate_multiplier'] = body.learning_rate_multiplier
|
|
615
|
+
tags['openai.request.prompt_loss_weight'] = body.prompt_loss_weight
|
|
616
|
+
tags['openai.request.compute_classification_metrics'] = body.compute_classification_metrics
|
|
617
|
+
tags['openai.request.classification_n_classes'] = body.classification_n_classes
|
|
618
|
+
tags['openai.request.classification_positive_class'] = body.classification_positive_class
|
|
619
|
+
tags['openai.request.classification_betas_count'] = defensiveArrayLength(body.classification_betas)
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
function commonFineTuneResponseExtraction (tags, body) {
|
|
623
|
+
tags['openai.response.events_count'] = defensiveArrayLength(body.events)
|
|
624
|
+
tags['openai.response.fine_tuned_model'] = body.fine_tuned_model
|
|
625
|
+
|
|
626
|
+
const hyperparams = body.hyperparams || body.hyperparameters
|
|
627
|
+
const hyperparamsKey = body.hyperparams ? 'hyperparams' : 'hyperparameters'
|
|
628
|
+
|
|
629
|
+
if (hyperparams) {
|
|
630
|
+
tags[`openai.response.${hyperparamsKey}.n_epochs`] = hyperparams.n_epochs
|
|
631
|
+
tags[`openai.response.${hyperparamsKey}.batch_size`] = hyperparams.batch_size
|
|
632
|
+
tags[`openai.response.${hyperparamsKey}.prompt_loss_weight`] = hyperparams.prompt_loss_weight
|
|
633
|
+
tags[`openai.response.${hyperparamsKey}.learning_rate_multiplier`] = hyperparams.learning_rate_multiplier
|
|
634
|
+
}
|
|
635
|
+
tags['openai.response.training_files_count'] = defensiveArrayLength(body.training_files || body.training_file)
|
|
636
|
+
tags['openai.response.result_files_count'] = defensiveArrayLength(body.result_files)
|
|
637
|
+
tags['openai.response.validation_files_count'] = defensiveArrayLength(body.validation_files || body.validation_file)
|
|
638
|
+
tags['openai.response.updated_at'] = body.updated_at
|
|
639
|
+
tags['openai.response.status'] = body.status
|
|
640
|
+
}
|
|
641
|
+
|
|
642
|
+
// the OpenAI package appears to stream the content download then provide it all as a singular string
|
|
643
|
+
function downloadFileResponseExtraction (tags, body) {
|
|
644
|
+
if (!body.file) return
|
|
645
|
+
tags['openai.response.total_bytes'] = body.file.length
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
function deleteFileResponseExtraction (tags, body) {
|
|
649
|
+
tags['openai.response.id'] = body.id
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
function commonCreateAudioRequestExtraction (tags, body, openaiStore) {
|
|
653
|
+
tags['openai.request.response_format'] = body.response_format
|
|
654
|
+
tags['openai.request.language'] = body.language
|
|
655
|
+
|
|
656
|
+
if (body.file !== null && typeof body.file === 'object' && body.file.path) {
|
|
657
|
+
const filename = path.basename(body.file.path)
|
|
658
|
+
tags['openai.request.filename'] = filename
|
|
659
|
+
openaiStore.file = filename
|
|
660
|
+
}
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
function commonFileRequestExtraction (tags, body) {
|
|
664
|
+
tags['openai.request.purpose'] = body.purpose
|
|
665
|
+
|
|
666
|
+
// User can provider either exact file contents or a file read stream
|
|
667
|
+
// With the stream we extract the filepath
|
|
668
|
+
// This is a best effort attempt to extract the filename during the request
|
|
669
|
+
if (body.file !== null && typeof body.file === 'object' && body.file.path) {
|
|
670
|
+
tags['openai.request.filename'] = path.basename(body.file.path)
|
|
671
|
+
}
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
function createRetrieveFileResponseExtraction (tags, body) {
|
|
675
|
+
tags['openai.response.filename'] = body.filename
|
|
676
|
+
tags['openai.response.purpose'] = body.purpose
|
|
677
|
+
tags['openai.response.bytes'] = body.bytes
|
|
678
|
+
tags['openai.response.status'] = body.status
|
|
679
|
+
tags['openai.response.status_details'] = body.status_details
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
function createEmbeddingResponseExtraction (tags, body, openaiStore) {
|
|
683
|
+
usageExtraction(tags, body, openaiStore)
|
|
684
|
+
|
|
685
|
+
if (!body.data) return
|
|
686
|
+
|
|
687
|
+
tags['openai.response.embeddings_count'] = body.data.length
|
|
688
|
+
for (let i = 0; i < body.data.length; i++) {
|
|
689
|
+
tags[`openai.response.embedding.${i}.embedding_length`] = body.data[i].embedding.length
|
|
690
|
+
}
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
function commonListCountResponseExtraction (tags, body) {
|
|
694
|
+
if (!body.data) return
|
|
695
|
+
tags['openai.response.count'] = body.data.length
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
// TODO: Is there ever more than one entry in body.results?
|
|
699
|
+
function createModerationResponseExtraction (tags, body) {
|
|
700
|
+
tags['openai.response.id'] = body.id
|
|
701
|
+
// tags[`openai.response.model`] = body.model // redundant, already extracted globally
|
|
702
|
+
|
|
703
|
+
if (!body.results) return
|
|
704
|
+
|
|
705
|
+
tags['openai.response.flagged'] = body.results[0].flagged
|
|
706
|
+
|
|
707
|
+
for (const [category, match] of Object.entries(body.results[0].categories)) {
|
|
708
|
+
tags[`openai.response.categories.${category}`] = match
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
for (const [category, score] of Object.entries(body.results[0].category_scores)) {
|
|
712
|
+
tags[`openai.response.category_scores.${category}`] = score
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
// createCompletion, createChatCompletion, createEdit
|
|
717
|
+
function commonCreateResponseExtraction (tags, body, openaiStore, methodName) {
|
|
718
|
+
usageExtraction(tags, body, methodName, openaiStore)
|
|
719
|
+
|
|
720
|
+
if (!body.choices) return
|
|
721
|
+
|
|
722
|
+
tags['openai.response.choices_count'] = body.choices.length
|
|
723
|
+
|
|
724
|
+
openaiStore.choices = body.choices
|
|
725
|
+
|
|
726
|
+
for (let choiceIdx = 0; choiceIdx < body.choices.length; choiceIdx++) {
|
|
727
|
+
const choice = body.choices[choiceIdx]
|
|
728
|
+
|
|
729
|
+
// logprobs can be null and we still want to tag it as 'returned' even when set to 'null'
|
|
730
|
+
const specifiesLogProb = Object.keys(choice).indexOf('logprobs') !== -1
|
|
731
|
+
|
|
732
|
+
tags[`openai.response.choices.${choiceIdx}.finish_reason`] = choice.finish_reason
|
|
733
|
+
tags[`openai.response.choices.${choiceIdx}.logprobs`] = specifiesLogProb ? 'returned' : undefined
|
|
734
|
+
tags[`openai.response.choices.${choiceIdx}.text`] = truncateText(choice.text)
|
|
735
|
+
|
|
736
|
+
// createChatCompletion only
|
|
737
|
+
const message = choice.message || choice.delta // delta for streamed responses
|
|
738
|
+
if (message) {
|
|
739
|
+
tags[`openai.response.choices.${choiceIdx}.message.role`] = message.role
|
|
740
|
+
tags[`openai.response.choices.${choiceIdx}.message.content`] = truncateText(message.content)
|
|
741
|
+
tags[`openai.response.choices.${choiceIdx}.message.name`] = truncateText(message.name)
|
|
742
|
+
if (message.tool_calls) {
|
|
743
|
+
const toolCalls = message.tool_calls
|
|
744
|
+
for (let toolIdx = 0; toolIdx < toolCalls.length; toolIdx++) {
|
|
745
|
+
tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.function.name`] =
|
|
746
|
+
toolCalls[toolIdx].function.name
|
|
747
|
+
tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.function.arguments`] =
|
|
748
|
+
toolCalls[toolIdx].function.arguments
|
|
749
|
+
tags[`openai.response.choices.${choiceIdx}.message.tool_calls.${toolIdx}.id`] =
|
|
750
|
+
toolCalls[toolIdx].id
|
|
751
|
+
}
|
|
752
|
+
}
|
|
753
|
+
}
|
|
754
|
+
}
|
|
755
|
+
}
|
|
756
|
+
|
|
757
|
+
// createCompletion, createChatCompletion, createEdit, createEmbedding
|
|
758
|
+
function usageExtraction (tags, body, methodName, openaiStore) {
|
|
759
|
+
let promptTokens = 0
|
|
760
|
+
let completionTokens = 0
|
|
761
|
+
let totalTokens = 0
|
|
762
|
+
if (body && body.usage) {
|
|
763
|
+
promptTokens = body.usage.prompt_tokens
|
|
764
|
+
completionTokens = body.usage.completion_tokens
|
|
765
|
+
totalTokens = body.usage.total_tokens
|
|
766
|
+
} else if (body.model && ['chat.completions.create', 'completions.create'].includes(methodName)) {
|
|
767
|
+
// estimate tokens based on method name for completions and chat completions
|
|
768
|
+
const { model } = body
|
|
769
|
+
let promptEstimated = false
|
|
770
|
+
let completionEstimated = false
|
|
771
|
+
|
|
772
|
+
// prompt tokens
|
|
773
|
+
const payload = openaiStore
|
|
774
|
+
const promptTokensCount = countPromptTokens(methodName, payload, model)
|
|
775
|
+
promptTokens = promptTokensCount.promptTokens
|
|
776
|
+
promptEstimated = promptTokensCount.promptEstimated
|
|
777
|
+
|
|
778
|
+
// completion tokens
|
|
779
|
+
const completionTokensCount = countCompletionTokens(body, model)
|
|
780
|
+
completionTokens = completionTokensCount.completionTokens
|
|
781
|
+
completionEstimated = completionTokensCount.completionEstimated
|
|
782
|
+
|
|
783
|
+
// total tokens
|
|
784
|
+
totalTokens = promptTokens + completionTokens
|
|
785
|
+
if (promptEstimated) tags['openai.response.usage.prompt_tokens_estimated'] = true
|
|
786
|
+
if (completionEstimated) tags['openai.response.usage.completion_tokens_estimated'] = true
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
if (promptTokens != null) tags['openai.response.usage.prompt_tokens'] = promptTokens
|
|
790
|
+
if (completionTokens != null) tags['openai.response.usage.completion_tokens'] = completionTokens
|
|
791
|
+
if (totalTokens != null) tags['openai.response.usage.total_tokens'] = totalTokens
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
function truncateApiKey (apiKey) {
|
|
795
|
+
return apiKey && `sk-...${apiKey.substr(apiKey.length - 4)}`
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
/**
|
|
799
|
+
* for cleaning up prompt and response
|
|
800
|
+
*/
|
|
801
|
+
function truncateText (text) {
|
|
802
|
+
if (!text) return
|
|
803
|
+
if (typeof text !== 'string' || !text || (typeof text === 'string' && text.length === 0)) return
|
|
804
|
+
|
|
805
|
+
text = text
|
|
806
|
+
.replace(RE_NEWLINE, '\\n')
|
|
807
|
+
.replace(RE_TAB, '\\t')
|
|
808
|
+
|
|
809
|
+
if (text.length > MAX_TEXT_LEN) {
|
|
810
|
+
return text.substring(0, MAX_TEXT_LEN) + '...'
|
|
811
|
+
}
|
|
812
|
+
|
|
813
|
+
return text
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
function tagChatCompletionRequestContent (contents, messageIdx, tags) {
|
|
817
|
+
if (typeof contents === 'string') {
|
|
818
|
+
tags[`openai.request.messages.${messageIdx}.content`] = contents
|
|
819
|
+
} else if (Array.isArray(contents)) {
|
|
820
|
+
// content can also be an array of objects
|
|
821
|
+
// which represent text input or image url
|
|
822
|
+
for (const contentIdx in contents) {
|
|
823
|
+
const content = contents[contentIdx]
|
|
824
|
+
const type = content.type
|
|
825
|
+
tags[`openai.request.messages.${messageIdx}.content.${contentIdx}.type`] = content.type
|
|
826
|
+
if (type === 'text') {
|
|
827
|
+
tags[`openai.request.messages.${messageIdx}.content.${contentIdx}.text`] = truncateText(content.text)
|
|
828
|
+
} else if (type === 'image_url') {
|
|
829
|
+
tags[`openai.request.messages.${messageIdx}.content.${contentIdx}.image_url.url`] =
|
|
830
|
+
truncateText(content.image_url.url)
|
|
831
|
+
}
|
|
832
|
+
// unsupported type otherwise, won't be tagged
|
|
833
|
+
}
|
|
834
|
+
}
|
|
835
|
+
// unsupported type otherwise, won't be tagged
|
|
836
|
+
}
|
|
837
|
+
|
|
838
|
+
// The server almost always responds with JSON
|
|
839
|
+
function coerceResponseBody (body, methodName) {
|
|
840
|
+
switch (methodName) {
|
|
841
|
+
case 'downloadFile':
|
|
842
|
+
case 'files.retrieveContent':
|
|
843
|
+
case 'files.content':
|
|
844
|
+
return { file: body }
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
const type = typeof body
|
|
848
|
+
if (type === 'string') {
|
|
849
|
+
try {
|
|
850
|
+
return JSON.parse(body)
|
|
851
|
+
} catch {
|
|
852
|
+
return body
|
|
853
|
+
}
|
|
854
|
+
} else if (type === 'object') {
|
|
855
|
+
return body
|
|
856
|
+
} else {
|
|
857
|
+
return {}
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
// This method is used to replace a dynamic URL segment with an asterisk
|
|
862
|
+
function lookupOperationEndpoint (operationId, url) {
|
|
863
|
+
switch (operationId) {
|
|
864
|
+
case 'deleteModel':
|
|
865
|
+
case 'models.del':
|
|
866
|
+
case 'retrieveModel':
|
|
867
|
+
case 'models.retrieve':
|
|
868
|
+
return '/v1/models/*'
|
|
869
|
+
|
|
870
|
+
case 'deleteFile':
|
|
871
|
+
case 'files.del':
|
|
872
|
+
case 'retrieveFile':
|
|
873
|
+
case 'files.retrieve':
|
|
874
|
+
return '/v1/files/*'
|
|
875
|
+
|
|
876
|
+
case 'downloadFile':
|
|
877
|
+
case 'files.retrieveContent':
|
|
878
|
+
case 'files.content':
|
|
879
|
+
return '/v1/files/*/content'
|
|
880
|
+
|
|
881
|
+
case 'retrieveFineTune':
|
|
882
|
+
case 'fine-tune.retrieve':
|
|
883
|
+
return '/v1/fine-tunes/*'
|
|
884
|
+
case 'fine_tuning.jobs.retrieve':
|
|
885
|
+
return '/v1/fine_tuning/jobs/*'
|
|
886
|
+
|
|
887
|
+
case 'listFineTuneEvents':
|
|
888
|
+
case 'fine-tune.listEvents':
|
|
889
|
+
return '/v1/fine-tunes/*/events'
|
|
890
|
+
case 'fine_tuning.jobs.listEvents':
|
|
891
|
+
return '/v1/fine_tuning/jobs/*/events'
|
|
892
|
+
|
|
893
|
+
case 'cancelFineTune':
|
|
894
|
+
case 'fine-tune.cancel':
|
|
895
|
+
return '/v1/fine-tunes/*/cancel'
|
|
896
|
+
case 'fine_tuning.jobs.cancel':
|
|
897
|
+
return '/v1/fine_tuning/jobs/*/cancel'
|
|
898
|
+
}
|
|
899
|
+
|
|
900
|
+
return url
|
|
901
|
+
}
|
|
902
|
+
|
|
903
|
+
/**
|
|
904
|
+
* This function essentially normalizes the OpenAI method interface. Many methods accept
|
|
905
|
+
* a single object argument. The remaining ones take individual arguments. This function
|
|
906
|
+
* turns the individual arguments into an object to make extracting properties consistent.
|
|
907
|
+
*/
|
|
908
|
+
function normalizeRequestPayload (methodName, args) {
|
|
909
|
+
switch (methodName) {
|
|
910
|
+
case 'listModels':
|
|
911
|
+
case 'models.list':
|
|
912
|
+
case 'listFiles':
|
|
913
|
+
case 'files.list':
|
|
914
|
+
case 'listFineTunes':
|
|
915
|
+
case 'fine_tuning.jobs.list':
|
|
916
|
+
case 'fine-tune.list':
|
|
917
|
+
// no argument
|
|
918
|
+
return {}
|
|
919
|
+
|
|
920
|
+
case 'retrieveModel':
|
|
921
|
+
case 'models.retrieve':
|
|
922
|
+
return { id: args[0] }
|
|
923
|
+
|
|
924
|
+
case 'createFile':
|
|
925
|
+
return {
|
|
926
|
+
file: args[0],
|
|
927
|
+
purpose: args[1]
|
|
928
|
+
}
|
|
929
|
+
|
|
930
|
+
case 'deleteFile':
|
|
931
|
+
case 'files.del':
|
|
932
|
+
case 'retrieveFile':
|
|
933
|
+
case 'files.retrieve':
|
|
934
|
+
case 'downloadFile':
|
|
935
|
+
case 'files.retrieveContent':
|
|
936
|
+
case 'files.content':
|
|
937
|
+
return { file_id: args[0] }
|
|
938
|
+
|
|
939
|
+
case 'listFineTuneEvents':
|
|
940
|
+
case 'fine_tuning.jobs.listEvents':
|
|
941
|
+
case 'fine-tune.listEvents':
|
|
942
|
+
return {
|
|
943
|
+
fine_tune_id: args[0],
|
|
944
|
+
stream: args[1] // undocumented
|
|
945
|
+
}
|
|
946
|
+
|
|
947
|
+
case 'retrieveFineTune':
|
|
948
|
+
case 'fine_tuning.jobs.retrieve':
|
|
949
|
+
case 'fine-tune.retrieve':
|
|
950
|
+
case 'deleteModel':
|
|
951
|
+
case 'models.del':
|
|
952
|
+
case 'cancelFineTune':
|
|
953
|
+
case 'fine_tuning.jobs.cancel':
|
|
954
|
+
case 'fine-tune.cancel':
|
|
955
|
+
return { fine_tune_id: args[0] }
|
|
956
|
+
|
|
957
|
+
case 'createImageEdit':
|
|
958
|
+
return {
|
|
959
|
+
file: args[0],
|
|
960
|
+
prompt: args[1], // Note: order of prompt/mask in Node.js lib differs from public docs
|
|
961
|
+
mask: args[2],
|
|
962
|
+
n: args[3],
|
|
963
|
+
size: args[4],
|
|
964
|
+
response_format: args[5],
|
|
965
|
+
user: args[6]
|
|
966
|
+
}
|
|
967
|
+
|
|
968
|
+
case 'createImageVariation':
|
|
969
|
+
return {
|
|
970
|
+
file: args[0],
|
|
971
|
+
n: args[1],
|
|
972
|
+
size: args[2],
|
|
973
|
+
response_format: args[3],
|
|
974
|
+
user: args[4]
|
|
975
|
+
}
|
|
976
|
+
|
|
977
|
+
case 'createTranscription':
|
|
978
|
+
case 'createTranslation':
|
|
979
|
+
return {
|
|
980
|
+
file: args[0],
|
|
981
|
+
model: args[1],
|
|
982
|
+
prompt: args[2],
|
|
983
|
+
response_format: args[3],
|
|
984
|
+
temperature: args[4],
|
|
985
|
+
language: args[5] // only used for createTranscription
|
|
986
|
+
}
|
|
987
|
+
}
|
|
988
|
+
|
|
989
|
+
// Remaining OpenAI methods take a single object argument
|
|
990
|
+
return args[0]
|
|
991
|
+
}
|
|
992
|
+
|
|
993
|
+
/**
|
|
994
|
+
* Converts an array of tokens to a string
|
|
995
|
+
* If input is already a string it's returned
|
|
996
|
+
* In either case the value is truncated
|
|
997
|
+
|
|
998
|
+
* It's intentional that the array be truncated arbitrarily, e.g. "[999, 888, 77..."
|
|
999
|
+
|
|
1000
|
+
* "foo" -> "foo"
|
|
1001
|
+
* [1,2,3] -> "[1, 2, 3]"
|
|
1002
|
+
*/
|
|
1003
|
+
function normalizeStringOrTokenArray (input, truncate) {
|
|
1004
|
+
const normalized = Array.isArray(input)
|
|
1005
|
+
? `[${input.join(', ')}]` // "[1, 2, 999]"
|
|
1006
|
+
: input // "foo"
|
|
1007
|
+
return truncate ? truncateText(normalized) : normalized
|
|
1008
|
+
}
|
|
1009
|
+
|
|
1010
|
+
function defensiveArrayLength (maybeArray) {
|
|
1011
|
+
if (maybeArray) {
|
|
1012
|
+
if (Array.isArray(maybeArray)) {
|
|
1013
|
+
return maybeArray.length
|
|
1014
|
+
} else {
|
|
1015
|
+
// case of a singular item (ie body.training_file vs body.training_files)
|
|
1016
|
+
return 1
|
|
1017
|
+
}
|
|
1018
|
+
}
|
|
1019
|
+
|
|
1020
|
+
return undefined
|
|
1021
|
+
}
|
|
1022
|
+
|
|
1023
|
+
module.exports = OpenAiTracingPlugin
|