dd-trace 3.24.0 → 3.26.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE-3rdparty.csv +4 -3
- package/index.d.ts +27 -0
- package/package.json +4 -4
- package/packages/datadog-instrumentations/src/aws-sdk.js +5 -0
- package/packages/datadog-instrumentations/src/cassandra-driver.js +6 -3
- package/packages/datadog-instrumentations/src/elasticsearch.js +39 -1
- package/packages/datadog-instrumentations/src/express.js +23 -0
- package/packages/datadog-instrumentations/src/helpers/hooks.js +4 -0
- package/packages/datadog-instrumentations/src/kafkajs.js +2 -2
- package/packages/datadog-instrumentations/src/openai.js +50 -0
- package/packages/datadog-instrumentations/src/opensearch.js +2 -1
- package/packages/datadog-instrumentations/src/passport-http.js +22 -0
- package/packages/datadog-instrumentations/src/passport-local.js +22 -0
- package/packages/datadog-instrumentations/src/passport-utils.js +36 -0
- package/packages/datadog-instrumentations/src/pg.js +17 -4
- package/packages/datadog-plugin-aws-sdk/src/base.js +3 -3
- package/packages/datadog-plugin-aws-sdk/src/services/dynamodb.js +1 -0
- package/packages/datadog-plugin-aws-sdk/src/services/kinesis.js +1 -0
- package/packages/datadog-plugin-aws-sdk/src/services/s3.js +1 -0
- package/packages/datadog-plugin-aws-sdk/src/services/sns.js +1 -0
- package/packages/datadog-plugin-aws-sdk/src/services/sqs.js +1 -0
- package/packages/datadog-plugin-cassandra-driver/src/index.js +6 -6
- package/packages/datadog-plugin-dns/src/lookup.js +1 -1
- package/packages/datadog-plugin-elasticsearch/src/index.js +2 -2
- package/packages/datadog-plugin-google-cloud-pubsub/src/consumer.js +1 -1
- package/packages/datadog-plugin-graphql/src/execute.js +1 -1
- package/packages/datadog-plugin-graphql/src/parse.js +1 -1
- package/packages/datadog-plugin-graphql/src/resolve.js +0 -5
- package/packages/datadog-plugin-graphql/src/validate.js +1 -1
- package/packages/datadog-plugin-grpc/src/client.js +9 -3
- package/packages/datadog-plugin-grpc/src/server.js +3 -3
- package/packages/datadog-plugin-http/src/client.js +1 -1
- package/packages/datadog-plugin-http/src/server.js +38 -34
- package/packages/datadog-plugin-http2/src/client.js +0 -5
- package/packages/datadog-plugin-http2/src/server.js +23 -23
- package/packages/datadog-plugin-kafkajs/src/consumer.js +6 -1
- package/packages/datadog-plugin-kafkajs/src/producer.js +8 -1
- package/packages/datadog-plugin-mocha/src/index.js +3 -3
- package/packages/datadog-plugin-moleculer/src/client.js +3 -3
- package/packages/datadog-plugin-moleculer/src/server.js +2 -2
- package/packages/datadog-plugin-mongodb-core/src/index.js +15 -4
- package/packages/datadog-plugin-next/src/index.js +50 -52
- package/packages/datadog-plugin-openai/src/index.js +685 -0
- package/packages/datadog-plugin-openai/src/services.js +43 -0
- package/packages/datadog-plugin-oracledb/src/index.js +3 -10
- package/packages/datadog-plugin-pg/src/index.js +3 -11
- package/packages/datadog-plugin-sharedb/src/index.js +1 -1
- package/packages/dd-trace/src/appsec/channels.js +1 -0
- package/packages/dd-trace/src/appsec/iast/taint-tracking/origin-types.js +3 -2
- package/packages/dd-trace/src/appsec/iast/taint-tracking/plugin.js +12 -2
- package/packages/dd-trace/src/appsec/index.js +20 -0
- package/packages/dd-trace/src/appsec/passport.js +110 -0
- package/packages/dd-trace/src/appsec/sdk/track_event.js +14 -5
- package/packages/dd-trace/src/ci-visibility/exporters/git/git_metadata.js +17 -4
- package/packages/dd-trace/src/ci-visibility/test-api-manual/test-api-manual-plugin.js +45 -0
- package/packages/dd-trace/src/config.js +38 -1
- package/packages/dd-trace/src/constants.js +2 -0
- package/packages/dd-trace/src/data_streams_context.js +15 -0
- package/packages/dd-trace/src/datastreams/pathway.js +58 -0
- package/packages/dd-trace/src/datastreams/processor.js +194 -0
- package/packages/dd-trace/src/datastreams/writer.js +66 -0
- package/packages/dd-trace/src/dogstatsd.js +12 -4
- package/packages/dd-trace/src/external-logger/src/index.js +4 -0
- package/packages/dd-trace/src/opentelemetry/span.js +1 -0
- package/packages/dd-trace/src/opentracing/span.js +32 -0
- package/packages/dd-trace/src/opentracing/tracer.js +3 -1
- package/packages/dd-trace/src/plugin_manager.js +7 -2
- package/packages/dd-trace/src/plugins/client.js +1 -0
- package/packages/dd-trace/src/plugins/database.js +2 -1
- package/packages/dd-trace/src/plugins/index.js +2 -0
- package/packages/dd-trace/src/plugins/outbound.js +59 -1
- package/packages/dd-trace/src/plugins/server.js +2 -0
- package/packages/dd-trace/src/plugins/tracing.js +5 -1
- package/packages/dd-trace/src/plugins/util/exec.js +2 -0
- package/packages/dd-trace/src/plugins/util/git.js +38 -10
- package/packages/dd-trace/src/plugins/util/user-provided-git.js +36 -2
- package/packages/dd-trace/src/profiling/config.js +34 -7
- package/packages/dd-trace/src/proxy.js +6 -0
- package/packages/dd-trace/src/service-naming/index.js +13 -1
- package/packages/dd-trace/src/service-naming/schemas/v0/index.js +2 -1
- package/packages/dd-trace/src/service-naming/schemas/v0/storage.js +34 -1
- package/packages/dd-trace/src/service-naming/schemas/v0/web.js +27 -0
- package/packages/dd-trace/src/service-naming/schemas/v1/index.js +2 -1
- package/packages/dd-trace/src/service-naming/schemas/v1/storage.js +31 -0
- package/packages/dd-trace/src/service-naming/schemas/v1/web.js +26 -0
- package/packages/dd-trace/src/telemetry/index.js +3 -0
- package/packages/dd-trace/src/telemetry/metrics.js +281 -0
- package/packages/dd-trace/src/tracer.js +19 -1
|
@@ -0,0 +1,685 @@
|
|
|
1
|
+
'use strict'
|
|
2
|
+
|
|
3
|
+
const path = require('path')
|
|
4
|
+
|
|
5
|
+
const TracingPlugin = require('../../dd-trace/src/plugins/tracing')
|
|
6
|
+
const { storage } = require('../../datadog-core')
|
|
7
|
+
const services = require('./services')
|
|
8
|
+
const Sampler = require('../../dd-trace/src/sampler')
|
|
9
|
+
const { MEASURED } = require('../../../ext/tags')
|
|
10
|
+
|
|
11
|
+
// TODO: In the future we should refactor config.js to make it requirable
|
|
12
|
+
let MAX_TEXT_LEN = 128
|
|
13
|
+
|
|
14
|
+
class OpenApiPlugin extends TracingPlugin {
|
|
15
|
+
static get id () { return 'openai' }
|
|
16
|
+
static get operation () { return 'request' }
|
|
17
|
+
static get system () { return 'openai' }
|
|
18
|
+
|
|
19
|
+
constructor (...args) {
|
|
20
|
+
super(...args)
|
|
21
|
+
|
|
22
|
+
const { metrics, logger } = services.init(this._tracerConfig)
|
|
23
|
+
this.metrics = metrics
|
|
24
|
+
this.logger = logger
|
|
25
|
+
|
|
26
|
+
this.sampler = new Sampler(0.1) // default 10% log sampling
|
|
27
|
+
|
|
28
|
+
// hoist the max length env var to avoid making all of these functions a class method
|
|
29
|
+
MAX_TEXT_LEN = this._tracerConfig.openaiSpanCharLimit
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
configure (config) {
|
|
33
|
+
if (config.enabled === false) {
|
|
34
|
+
services.shutdown()
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
super.configure(config)
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
start ({ methodName, args, basePath, apiKey }) {
|
|
41
|
+
const payload = normalizeRequestPayload(methodName, args)
|
|
42
|
+
|
|
43
|
+
const span = this.startSpan('openai.request', {
|
|
44
|
+
service: this.config.service,
|
|
45
|
+
resource: methodName,
|
|
46
|
+
type: 'openai',
|
|
47
|
+
kind: 'client',
|
|
48
|
+
meta: {
|
|
49
|
+
[MEASURED]: 1,
|
|
50
|
+
// Data that is always available with a request
|
|
51
|
+
'openai.user.api_key': truncateApiKey(apiKey),
|
|
52
|
+
'openai.api_base': basePath,
|
|
53
|
+
// The openai.api_type (openai|azure) is present in Python but not in Node.js
|
|
54
|
+
// Add support once https://github.com/openai/openai-node/issues/53 is closed
|
|
55
|
+
|
|
56
|
+
// Data that is common across many requests
|
|
57
|
+
'openai.request.best_of': payload.best_of,
|
|
58
|
+
'openai.request.echo': payload.echo,
|
|
59
|
+
'openai.request.logprobs': payload.logprobs,
|
|
60
|
+
'openai.request.max_tokens': payload.max_tokens,
|
|
61
|
+
'openai.request.model': payload.model, // vague model
|
|
62
|
+
'openai.request.n': payload.n,
|
|
63
|
+
'openai.request.presence_penalty': payload.presence_penalty,
|
|
64
|
+
'openai.request.frequency_penalty': payload.frequency_penalty,
|
|
65
|
+
'openai.request.stop': payload.stop,
|
|
66
|
+
'openai.request.suffix': payload.suffix,
|
|
67
|
+
'openai.request.temperature': payload.temperature,
|
|
68
|
+
'openai.request.top_p': payload.top_p,
|
|
69
|
+
'openai.request.user': payload.user,
|
|
70
|
+
'openai.request.file_id': payload.file_id // deleteFile, retrieveFile, downloadFile
|
|
71
|
+
}
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
const fullStore = storage.getStore() || {} // certain request body fields are later used for logs
|
|
75
|
+
const store = Object.create(null)
|
|
76
|
+
fullStore.openai = store // namespacing these fields
|
|
77
|
+
|
|
78
|
+
const tags = {} // The remaining tags are added one at a time
|
|
79
|
+
|
|
80
|
+
// createChatCompletion, createCompletion, createImage, createImageEdit, createTranscription, createTranslation
|
|
81
|
+
if ('prompt' in payload) {
|
|
82
|
+
const prompt = payload.prompt
|
|
83
|
+
store.prompt = prompt
|
|
84
|
+
if (typeof prompt === 'string' || (Array.isArray(prompt) && typeof prompt[0] === 'number')) {
|
|
85
|
+
// This is a single prompt, either String or [Number]
|
|
86
|
+
tags[`openai.request.prompt`] = normalizeStringOrTokenArray(prompt)
|
|
87
|
+
} else if (Array.isArray(prompt)) {
|
|
88
|
+
// This is multiple prompts, either [String] or [[Number]]
|
|
89
|
+
for (let i = 0; i < prompt.length; i++) {
|
|
90
|
+
tags[`openai.request.prompt.${i}`] = normalizeStringOrTokenArray(prompt[i])
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// createEdit, createEmbedding, createModeration
|
|
96
|
+
if ('input' in payload) {
|
|
97
|
+
const normalized = normalizeStringOrTokenArray(payload.input, false)
|
|
98
|
+
tags[`openai.request.input`] = truncateText(normalized)
|
|
99
|
+
store.input = normalized
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// createChatCompletion, createCompletion
|
|
103
|
+
if (typeof payload.logit_bias === 'object' && payload.logit_bias) {
|
|
104
|
+
for (const [tokenId, bias] of Object.entries(payload.logit_bias)) {
|
|
105
|
+
tags[`openai.request.logit_bias.${tokenId}`] = bias
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
switch (methodName) {
|
|
110
|
+
case 'createFineTune':
|
|
111
|
+
createFineTuneRequestExtraction(tags, payload)
|
|
112
|
+
break
|
|
113
|
+
|
|
114
|
+
case 'createImage':
|
|
115
|
+
case 'createImageEdit':
|
|
116
|
+
case 'createImageVariation':
|
|
117
|
+
commonCreateImageRequestExtraction(tags, payload, store)
|
|
118
|
+
break
|
|
119
|
+
|
|
120
|
+
case 'createChatCompletion':
|
|
121
|
+
createChatCompletionRequestExtraction(tags, payload, store)
|
|
122
|
+
break
|
|
123
|
+
|
|
124
|
+
case 'createFile':
|
|
125
|
+
case 'retrieveFile':
|
|
126
|
+
commonFileRequestExtraction(tags, payload)
|
|
127
|
+
break
|
|
128
|
+
|
|
129
|
+
case 'createTranscription':
|
|
130
|
+
case 'createTranslation':
|
|
131
|
+
commonCreateAudioRequestExtraction(tags, payload, store)
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
case 'retrieveModel':
|
|
135
|
+
retrieveModelRequestExtraction(tags, payload)
|
|
136
|
+
break
|
|
137
|
+
|
|
138
|
+
case 'listFineTuneEvents':
|
|
139
|
+
case 'retrieveFineTune':
|
|
140
|
+
case 'deleteModel':
|
|
141
|
+
case 'cancelFineTune':
|
|
142
|
+
commonLookupFineTuneRequestExtraction(tags, payload)
|
|
143
|
+
break
|
|
144
|
+
|
|
145
|
+
case 'createEdit':
|
|
146
|
+
createEditRequestExtraction(tags, payload, store)
|
|
147
|
+
break
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
span.addTags(tags)
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
finish ({ headers, body, method, path }) {
|
|
154
|
+
const span = this.activeSpan
|
|
155
|
+
const methodName = span._spanContext._tags['resource.name']
|
|
156
|
+
|
|
157
|
+
body = coerceResponseBody(body, methodName)
|
|
158
|
+
|
|
159
|
+
const fullStore = storage.getStore()
|
|
160
|
+
const store = fullStore.openai
|
|
161
|
+
|
|
162
|
+
const endpoint = lookupOperationEndpoint(methodName, path)
|
|
163
|
+
|
|
164
|
+
const tags = {
|
|
165
|
+
'openai.request.endpoint': endpoint,
|
|
166
|
+
'openai.request.method': method,
|
|
167
|
+
|
|
168
|
+
'openai.organization.id': body.organization_id, // only available in fine-tunes endpoints
|
|
169
|
+
'openai.organization.name': headers['openai-organization'],
|
|
170
|
+
|
|
171
|
+
'openai.response.model': headers['openai-model'] || body.model, // specific model, often undefined
|
|
172
|
+
'openai.response.id': body.id, // common creation value, numeric epoch
|
|
173
|
+
'openai.response.deleted': body.deleted, // common boolean field in delete responses
|
|
174
|
+
|
|
175
|
+
// The OpenAI API appears to use both created and created_at in different places
|
|
176
|
+
// Here we're conciously choosing to surface this inconsistency instead of normalizing
|
|
177
|
+
'openai.response.created': body.created,
|
|
178
|
+
'openai.response.created_at': body.created_at
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
responseDataExtractionByMethod(methodName, tags, body, store)
|
|
182
|
+
span.addTags(tags)
|
|
183
|
+
|
|
184
|
+
super.finish()
|
|
185
|
+
this.sendLog(methodName, span, tags, store, false)
|
|
186
|
+
this.sendMetrics(headers, body, endpoint, span._duration)
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
error (...args) {
|
|
190
|
+
super.error(...args)
|
|
191
|
+
|
|
192
|
+
const span = this.activeSpan
|
|
193
|
+
const methodName = span._spanContext._tags['resource.name']
|
|
194
|
+
|
|
195
|
+
const fullStore = storage.getStore()
|
|
196
|
+
const store = fullStore.openai
|
|
197
|
+
|
|
198
|
+
// We don't know most information about the request when it fails
|
|
199
|
+
|
|
200
|
+
const tags = [`error:1`]
|
|
201
|
+
this.metrics.distribution('openai.request.duration', span._duration * 1000, tags)
|
|
202
|
+
this.metrics.increment('openai.request.error', 1, tags)
|
|
203
|
+
|
|
204
|
+
this.sendLog(methodName, span, {}, store, true)
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
sendMetrics (headers, body, endpoint, duration) {
|
|
208
|
+
const tags = [
|
|
209
|
+
`org:${headers['openai-organization']}`,
|
|
210
|
+
`endpoint:${endpoint}`, // just "/v1/models", no method
|
|
211
|
+
`model:${headers['openai-model']}`,
|
|
212
|
+
`error:0`
|
|
213
|
+
]
|
|
214
|
+
|
|
215
|
+
this.metrics.distribution('openai.request.duration', duration * 1000, tags)
|
|
216
|
+
|
|
217
|
+
if (body && ('usage' in body)) {
|
|
218
|
+
const promptTokens = body.usage.prompt_tokens
|
|
219
|
+
const completionTokens = body.usage.completion_tokens
|
|
220
|
+
this.metrics.distribution('openai.tokens.prompt', promptTokens, tags)
|
|
221
|
+
this.metrics.distribution('openai.tokens.completion', completionTokens, tags)
|
|
222
|
+
this.metrics.distribution('openai.tokens.total', promptTokens + completionTokens, tags)
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
if ('x-ratelimit-limit-requests' in headers) {
|
|
226
|
+
this.metrics.gauge('openai.ratelimit.requests', Number(headers['x-ratelimit-limit-requests']), tags)
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
if ('x-ratelimit-remaining-requests' in headers) {
|
|
230
|
+
this.metrics.gauge('openai.ratelimit.remaining.requests', Number(headers['x-ratelimit-remaining-requests']), tags)
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
if ('x-ratelimit-limit-tokens' in headers) {
|
|
234
|
+
this.metrics.gauge('openai.ratelimit.tokens', Number(headers['x-ratelimit-limit-tokens']), tags)
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
if ('x-ratelimit-remaining-tokens' in headers) {
|
|
238
|
+
this.metrics.gauge('openai.ratelimit.remaining.tokens', Number(headers['x-ratelimit-remaining-tokens']), tags)
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
sendLog (methodName, span, tags, store, error) {
|
|
243
|
+
if (!Object.keys(store).length) return
|
|
244
|
+
if (!this.sampler.isSampled()) return
|
|
245
|
+
|
|
246
|
+
const log = {
|
|
247
|
+
status: error ? 'error' : 'info',
|
|
248
|
+
message: `sampled ${methodName}`,
|
|
249
|
+
...store
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
this.logger.log(log, span, tags)
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
function createEditRequestExtraction (tags, payload, store) {
|
|
257
|
+
const instruction = payload.instruction
|
|
258
|
+
tags['openai.request.instruction'] = instruction
|
|
259
|
+
store.instruction = instruction
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
function retrieveModelRequestExtraction (tags, payload) {
|
|
263
|
+
tags['openai.request.id'] = payload.id
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
function createChatCompletionRequestExtraction (tags, payload, store) {
|
|
267
|
+
if (!defensiveArrayLength(payload.messages)) return
|
|
268
|
+
|
|
269
|
+
store.messages = payload.messages
|
|
270
|
+
for (let i = 0; i < payload.messages.length; i++) {
|
|
271
|
+
const message = payload.messages[i]
|
|
272
|
+
tags[`openai.request.${i}.content`] = truncateText(message.content)
|
|
273
|
+
tags[`openai.request.${i}.role`] = message.role
|
|
274
|
+
tags[`openai.request.${i}.name`] = message.name
|
|
275
|
+
tags[`openai.request.${i}.finish_reason`] = message.finish_reason
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
function commonCreateImageRequestExtraction (tags, payload, store) {
|
|
280
|
+
// createImageEdit, createImageVariation
|
|
281
|
+
if (payload.file && typeof payload.file === 'object' && payload.file.path) {
|
|
282
|
+
const file = path.basename(payload.file.path)
|
|
283
|
+
tags['openai.request.image'] = file
|
|
284
|
+
store.file = file
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// createImageEdit
|
|
288
|
+
if (payload.mask && typeof payload.mask === 'object' && payload.mask.path) {
|
|
289
|
+
const mask = path.basename(payload.mask.path)
|
|
290
|
+
tags['openai.request.mask'] = mask
|
|
291
|
+
store.mask = mask
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
tags['openai.request.size'] = payload.size
|
|
295
|
+
tags['openai.request.response_format'] = payload.response_format
|
|
296
|
+
tags['openai.request.language'] = payload.language
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
function responseDataExtractionByMethod (methodName, tags, body, store) {
|
|
300
|
+
switch (methodName) {
|
|
301
|
+
case 'createModeration':
|
|
302
|
+
createModerationResponseExtraction(tags, body)
|
|
303
|
+
break
|
|
304
|
+
|
|
305
|
+
case 'createCompletion':
|
|
306
|
+
case 'createChatCompletion':
|
|
307
|
+
case 'createEdit':
|
|
308
|
+
commonCreateResponseExtraction(tags, body, store)
|
|
309
|
+
break
|
|
310
|
+
|
|
311
|
+
case 'listFiles':
|
|
312
|
+
case 'listFineTunes':
|
|
313
|
+
case 'listFineTuneEvents':
|
|
314
|
+
commonListCountResponseExtraction(tags, body)
|
|
315
|
+
break
|
|
316
|
+
|
|
317
|
+
case 'createEmbedding':
|
|
318
|
+
createEmbeddingResponseExtraction(tags, body)
|
|
319
|
+
break
|
|
320
|
+
|
|
321
|
+
case 'createFile':
|
|
322
|
+
case 'retrieveFile':
|
|
323
|
+
createRetrieveFileResponseExtraction(tags, body)
|
|
324
|
+
break
|
|
325
|
+
|
|
326
|
+
case 'deleteFile':
|
|
327
|
+
deleteFileResponseExtraction(tags, body)
|
|
328
|
+
break
|
|
329
|
+
|
|
330
|
+
case 'downloadFile':
|
|
331
|
+
downloadFileResponseExtraction(tags, body)
|
|
332
|
+
break
|
|
333
|
+
|
|
334
|
+
case 'createFineTune':
|
|
335
|
+
case 'retrieveFineTune':
|
|
336
|
+
case 'cancelFineTune':
|
|
337
|
+
commonFineTuneResponseExtraction(tags, body)
|
|
338
|
+
break
|
|
339
|
+
|
|
340
|
+
case 'createTranscription':
|
|
341
|
+
case 'createTranslation':
|
|
342
|
+
createAudioResponseExtraction(tags, body)
|
|
343
|
+
break
|
|
344
|
+
|
|
345
|
+
case 'createImage':
|
|
346
|
+
case 'createImageEdit':
|
|
347
|
+
case 'createImageVariation':
|
|
348
|
+
commonImageResponseExtraction(tags, body)
|
|
349
|
+
break
|
|
350
|
+
|
|
351
|
+
case 'listModels':
|
|
352
|
+
listModelsResponseExtraction(tags, body)
|
|
353
|
+
break
|
|
354
|
+
|
|
355
|
+
case 'retrieveModel':
|
|
356
|
+
retrieveModelResponseExtraction(tags, body)
|
|
357
|
+
break
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
function retrieveModelResponseExtraction (tags, body) {
|
|
362
|
+
tags['openai.response.owned_by'] = body.owned_by
|
|
363
|
+
tags['openai.response.parent'] = body.parent
|
|
364
|
+
tags['openai.response.root'] = body.root
|
|
365
|
+
|
|
366
|
+
tags['openai.response.permission.id'] = body.permission[0].id
|
|
367
|
+
tags['openai.response.permission.created'] = body.permission[0].created
|
|
368
|
+
tags['openai.response.permission.allow_create_engine'] = body.permission[0].allow_create_engine
|
|
369
|
+
tags['openai.response.permission.allow_sampling'] = body.permission[0].allow_sampling
|
|
370
|
+
tags['openai.response.permission.allow_logprobs'] = body.permission[0].allow_logprobs
|
|
371
|
+
tags['openai.response.permission.allow_search_indices'] = body.permission[0].allow_search_indices
|
|
372
|
+
tags['openai.response.permission.allow_view'] = body.permission[0].allow_view
|
|
373
|
+
tags['openai.response.permission.allow_fine_tuning'] = body.permission[0].allow_fine_tuning
|
|
374
|
+
tags['openai.response.permission.organization'] = body.permission[0].organization
|
|
375
|
+
tags['openai.response.permission.group'] = body.permission[0].group
|
|
376
|
+
tags['openai.response.permission.is_blocking'] = body.permission[0].is_blocking
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
function commonLookupFineTuneRequestExtraction (tags, body) {
|
|
380
|
+
tags['openai.request.fine_tune_id'] = body.fine_tune_id
|
|
381
|
+
tags['openai.request.stream'] = !!body.stream // listFineTuneEvents
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
function listModelsResponseExtraction (tags, body) {
|
|
385
|
+
tags['openai.response.count'] = body.data.length
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
function commonImageResponseExtraction (tags, body) {
|
|
389
|
+
tags['openai.response.images_count'] = body.data.length
|
|
390
|
+
|
|
391
|
+
for (let i = 0; i < body.data.length; i++) {
|
|
392
|
+
const image = body.data[i]
|
|
393
|
+
// exactly one of these two options is provided
|
|
394
|
+
tags[`openai.response.images.${i}.url`] = truncateText(image.url)
|
|
395
|
+
tags[`openai.response.images.${i}.b64_json`] = image.b64_json && 'returned'
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
function createAudioResponseExtraction (tags, body) {
|
|
400
|
+
tags['openai.response.text'] = body.text
|
|
401
|
+
tags['openai.response.language'] = body.language
|
|
402
|
+
tags['openai.response.duration'] = body.duration
|
|
403
|
+
tags['openai.response.segments_count'] = body.segments.length
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
function createFineTuneRequestExtraction (tags, body) {
|
|
407
|
+
tags['openai.request.training_file'] = body.training_file
|
|
408
|
+
tags['openai.request.validation_file'] = body.validation_file
|
|
409
|
+
tags['openai.request.n_epochs'] = body.n_epochs
|
|
410
|
+
tags['openai.request.batch_size'] = body.batch_size
|
|
411
|
+
tags['openai.request.learning_rate_multiplier'] = body.learning_rate_multiplier
|
|
412
|
+
tags['openai.request.prompt_loss_weight'] = body.prompt_loss_weight
|
|
413
|
+
tags['openai.request.compute_classification_metrics'] = body.compute_classification_metrics
|
|
414
|
+
tags['openai.request.classification_n_classes'] = body.classification_n_classes
|
|
415
|
+
tags['openai.request.classification_positive_class'] = body.classification_positive_class
|
|
416
|
+
tags['openai.request.classification_betas_count'] = defensiveArrayLength(body.classification_betas)
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
function commonFineTuneResponseExtraction (tags, body) {
|
|
420
|
+
tags['openai.response.events_count'] = body.events.length
|
|
421
|
+
tags['openai.response.fine_tuned_model'] = body.fine_tuned_model
|
|
422
|
+
tags['openai.response.hyperparams.n_epochs'] = body.hyperparams.n_epochs
|
|
423
|
+
tags['openai.response.hyperparams.batch_size'] = body.hyperparams.batch_size
|
|
424
|
+
tags['openai.response.hyperparams.prompt_loss_weight'] = body.hyperparams.prompt_loss_weight
|
|
425
|
+
tags['openai.response.hyperparams.learning_rate_multiplier'] = body.hyperparams.learning_rate_multiplier
|
|
426
|
+
tags['openai.response.training_files_count'] = body.training_files.length
|
|
427
|
+
tags['openai.response.result_files_count'] = body.result_files.length
|
|
428
|
+
tags['openai.response.validation_files_count'] = body.validation_files.length
|
|
429
|
+
tags['openai.response.updated_at'] = body.updated_at
|
|
430
|
+
tags['openai.response.status'] = body.status
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
// the OpenAI package appears to stream the content download then provide it all as a singular string
|
|
434
|
+
function downloadFileResponseExtraction (tags, body) {
|
|
435
|
+
tags['openai.response.total_bytes'] = body.file.length
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
function deleteFileResponseExtraction (tags, body) {
|
|
439
|
+
tags['openai.response.id'] = body.id
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
function commonCreateAudioRequestExtraction (tags, body, store) {
|
|
443
|
+
tags['openai.request.response_format'] = body.response_format
|
|
444
|
+
tags['openai.request.language'] = body.language
|
|
445
|
+
|
|
446
|
+
if (body.file && typeof body.file === 'object' && body.file.path) {
|
|
447
|
+
const filename = path.basename(body.file.path)
|
|
448
|
+
tags['openai.request.filename'] = filename
|
|
449
|
+
store.file = filename
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
function commonFileRequestExtraction (tags, body) {
|
|
454
|
+
tags['openai.request.purpose'] = body.purpose
|
|
455
|
+
|
|
456
|
+
// User can provider either exact file contents or a file read stream
|
|
457
|
+
// With the stream we extract the filepath
|
|
458
|
+
// This is a best effort attempt to extract the filename during the request
|
|
459
|
+
if (body.file && typeof body.file === 'object' && body.file.path) {
|
|
460
|
+
tags['openai.request.filename'] = path.basename(body.file.path)
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
function createRetrieveFileResponseExtraction (tags, body) {
|
|
465
|
+
tags['openai.response.filename'] = body.filename
|
|
466
|
+
tags['openai.response.purpose'] = body.purpose
|
|
467
|
+
tags['openai.response.bytes'] = body.bytes
|
|
468
|
+
tags['openai.response.status'] = body.status
|
|
469
|
+
tags['openai.response.status_details'] = body.status_details
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
function createEmbeddingResponseExtraction (tags, body) {
|
|
473
|
+
usageExtraction(tags, body)
|
|
474
|
+
|
|
475
|
+
tags['openai.response.embeddings_count'] = body.data.length
|
|
476
|
+
for (let i = 0; i < body.data.length; i++) {
|
|
477
|
+
tags[`openai.response.embedding.${i}.embedding_length`] = body.data[i].embedding.length
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
function commonListCountResponseExtraction (tags, body) {
|
|
482
|
+
tags['openai.response.count'] = body.data.length
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
// TODO: Is there ever more than one entry in body.results?
|
|
486
|
+
function createModerationResponseExtraction (tags, body) {
|
|
487
|
+
tags['openai.response.id'] = body.id
|
|
488
|
+
// tags[`openai.response.model`] = body.model // redundant, already extracted globally
|
|
489
|
+
tags['openai.response.flagged'] = body.results[0].flagged
|
|
490
|
+
|
|
491
|
+
for (const [category, match] of Object.entries(body.results[0].categories)) {
|
|
492
|
+
tags[`openai.response.categories.${category}`] = match
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
for (const [category, score] of Object.entries(body.results[0].category_scores)) {
|
|
496
|
+
tags[`openai.response.category_scores.${category}`] = score
|
|
497
|
+
}
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
// createCompletion, createChatCompletion, createEdit
|
|
501
|
+
function commonCreateResponseExtraction (tags, body, store) {
|
|
502
|
+
usageExtraction(tags, body)
|
|
503
|
+
|
|
504
|
+
tags['openai.response.choices_count'] = body.choices.length
|
|
505
|
+
|
|
506
|
+
store.choices = body.choices
|
|
507
|
+
|
|
508
|
+
for (let i = 0; i < body.choices.length; i++) {
|
|
509
|
+
const choice = body.choices[i]
|
|
510
|
+
tags[`openai.response.choices.${i}.finish_reason`] = choice.finish_reason
|
|
511
|
+
tags[`openai.response.choices.${i}.logprobs`] = ('logprobs' in choice) ? 'returned' : undefined
|
|
512
|
+
tags[`openai.response.choices.${i}.text`] = truncateText(choice.text)
|
|
513
|
+
|
|
514
|
+
// createChatCompletion only
|
|
515
|
+
if ('message' in choice) {
|
|
516
|
+
const message = choice.message
|
|
517
|
+
tags[`openai.response.choices.${i}.message.role`] = message.role
|
|
518
|
+
tags[`openai.response.choices.${i}.message.content`] = truncateText(message.content)
|
|
519
|
+
tags[`openai.response.choices.${i}.message.name`] = truncateText(message.name)
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
// createCompletion, createChatCompletion, createEdit, createEmbedding
|
|
525
|
+
function usageExtraction (tags, body) {
|
|
526
|
+
if (typeof body.usage !== 'object' || !body.usage) return
|
|
527
|
+
tags['openai.response.usage.prompt_tokens'] = body.usage.prompt_tokens
|
|
528
|
+
tags['openai.response.usage.completion_tokens'] = body.usage.completion_tokens
|
|
529
|
+
tags['openai.response.usage.total_tokens'] = body.usage.total_tokens
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
function truncateApiKey (apiKey) {
|
|
533
|
+
return `sk-...${apiKey.substr(apiKey.length - 4)}`
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
/**
|
|
537
|
+
* for cleaning up prompt and response
|
|
538
|
+
*/
|
|
539
|
+
function truncateText (text) {
|
|
540
|
+
if (!text) return
|
|
541
|
+
|
|
542
|
+
text = text
|
|
543
|
+
.replaceAll('\n', '\\n')
|
|
544
|
+
.replaceAll('\t', '\\t')
|
|
545
|
+
|
|
546
|
+
if (text.length > MAX_TEXT_LEN) {
|
|
547
|
+
return text.substring(0, MAX_TEXT_LEN) + '...'
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
return text
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
// The server almost always responds with JSON
|
|
554
|
+
function coerceResponseBody (body, methodName) {
|
|
555
|
+
switch (methodName) {
|
|
556
|
+
case 'downloadFile':
|
|
557
|
+
return { file: body }
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
return typeof body === 'object' ? body : {}
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
// This method is used to replace a dynamic URL segment with an asterisk
|
|
564
|
+
function lookupOperationEndpoint (operationId, url) {
|
|
565
|
+
switch (operationId) {
|
|
566
|
+
case 'deleteModel':
|
|
567
|
+
case 'retrieveModel':
|
|
568
|
+
return '/v1/models/*'
|
|
569
|
+
|
|
570
|
+
case 'deleteFile':
|
|
571
|
+
case 'retrieveFile':
|
|
572
|
+
return '/v1/files/*'
|
|
573
|
+
|
|
574
|
+
case 'downloadFile':
|
|
575
|
+
return '/v1/files/*/content'
|
|
576
|
+
|
|
577
|
+
case 'retrieveFineTune':
|
|
578
|
+
return '/v1/fine-tunes/*'
|
|
579
|
+
|
|
580
|
+
case 'listFineTuneEvents':
|
|
581
|
+
return '/v1/fine-tunes/*/events'
|
|
582
|
+
|
|
583
|
+
case 'cancelFineTune':
|
|
584
|
+
return '/v1/fine-tunes/*/cancel'
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
return url
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
/**
|
|
591
|
+
* This function essentially normalizes the OpenAI method interface. Many methods accept
|
|
592
|
+
* a single object argument. The remaining ones take individual arguments. This function
|
|
593
|
+
* turns the individual arguments into an object to make extracting properties consistent.
|
|
594
|
+
*/
|
|
595
|
+
function normalizeRequestPayload (methodName, args) {
|
|
596
|
+
switch (methodName) {
|
|
597
|
+
case 'listModels':
|
|
598
|
+
case 'listFiles':
|
|
599
|
+
case 'listFineTunes':
|
|
600
|
+
// no argument
|
|
601
|
+
return {}
|
|
602
|
+
|
|
603
|
+
case 'retrieveModel':
|
|
604
|
+
return { id: args[0] }
|
|
605
|
+
|
|
606
|
+
case 'createFile':
|
|
607
|
+
return {
|
|
608
|
+
file: args[0],
|
|
609
|
+
purpose: args[1]
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
case 'deleteFile':
|
|
613
|
+
case 'retrieveFile':
|
|
614
|
+
case 'downloadFile':
|
|
615
|
+
return { file_id: args[0] }
|
|
616
|
+
|
|
617
|
+
case 'listFineTuneEvents':
|
|
618
|
+
return {
|
|
619
|
+
fine_tune_id: args[0],
|
|
620
|
+
stream: args[1] // undocumented
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
case 'retrieveFineTune':
|
|
624
|
+
case 'deleteModel':
|
|
625
|
+
case 'cancelFineTune':
|
|
626
|
+
return { fine_tune_id: args[0] }
|
|
627
|
+
|
|
628
|
+
case 'createImageEdit':
|
|
629
|
+
return {
|
|
630
|
+
file: args[0],
|
|
631
|
+
prompt: args[1], // Note: order of prompt/mask in Node.js lib differs from public docs
|
|
632
|
+
mask: args[2],
|
|
633
|
+
n: args[3],
|
|
634
|
+
size: args[4],
|
|
635
|
+
response_format: args[5],
|
|
636
|
+
user: args[6]
|
|
637
|
+
}
|
|
638
|
+
|
|
639
|
+
case 'createImageVariation':
|
|
640
|
+
return {
|
|
641
|
+
file: args[0],
|
|
642
|
+
n: args[1],
|
|
643
|
+
size: args[2],
|
|
644
|
+
response_format: args[3],
|
|
645
|
+
user: args[4]
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
case 'createTranscription':
|
|
649
|
+
case 'createTranslation':
|
|
650
|
+
return {
|
|
651
|
+
file: args[0],
|
|
652
|
+
model: args[1],
|
|
653
|
+
prompt: args[2],
|
|
654
|
+
response_format: args[3],
|
|
655
|
+
temperature: args[4],
|
|
656
|
+
language: args[5] // only used for createTranscription
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
// Remaining OpenAI methods take a single object argument
|
|
661
|
+
return args[0]
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
/**
|
|
665
|
+
* Converts an array of tokens to a string
|
|
666
|
+
* If input is already a string it's returned
|
|
667
|
+
* In either case the value is truncated
|
|
668
|
+
|
|
669
|
+
* It's intentional that the array be truncated arbitrarily, e.g. "[999, 888, 77..."
|
|
670
|
+
|
|
671
|
+
* "foo" -> "foo"
|
|
672
|
+
* [1,2,3] -> "[1, 2, 3]"
|
|
673
|
+
*/
|
|
674
|
+
function normalizeStringOrTokenArray (input, truncate = true) {
|
|
675
|
+
const normalized = Array.isArray(input)
|
|
676
|
+
? `[${input.join(', ')}]` // "[1, 2, 999]"
|
|
677
|
+
: input // "foo"
|
|
678
|
+
return truncate ? truncateText(normalized) : normalized
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
function defensiveArrayLength (maybeArray) {
|
|
682
|
+
return Array.isArray(maybeArray) ? maybeArray.length : undefined
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
module.exports = OpenApiPlugin
|