@fallom/trace 0.1.12 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +215 -178
- package/dist/chunk-2BP4H4AD.mjs +3012 -0
- package/dist/chunk-7P6ASYW6.mjs +9 -0
- package/dist/chunk-K7HYYE4Y.mjs +2930 -0
- package/dist/chunk-KAZ5NEU2.mjs +2237 -0
- package/dist/chunk-KMA4IPED.mjs +252 -0
- package/dist/chunk-W6M2RQ3W.mjs +251 -0
- package/dist/index.d.mts +210 -256
- package/dist/index.d.ts +210 -256
- package/dist/index.js +792 -789
- package/dist/index.mjs +592 -590
- package/dist/models-2Y6DRQPS.mjs +9 -0
- package/dist/models-BUHMMTWK.mjs +9 -0
- package/dist/models-JIO5LVMB.mjs +8 -0
- package/dist/models-JKMOBZUO.mjs +8 -0
- package/dist/prompts-XSZHTCX7.mjs +15 -0
- package/package.json +1 -1
|
@@ -0,0 +1,2930 @@
|
|
|
1
|
+
import {
|
|
2
|
+
__export
|
|
3
|
+
} from "./chunk-7P6ASYW6.mjs";
|
|
4
|
+
|
|
5
|
+
// src/models.ts
|
|
6
|
+
var models_exports = {};
|
|
7
|
+
__export(models_exports, {
|
|
8
|
+
get: () => get,
|
|
9
|
+
init: () => init2
|
|
10
|
+
});
|
|
11
|
+
import { createHash } from "crypto";
|
|
12
|
+
|
|
13
|
+
// src/trace.ts
|
|
14
|
+
var trace_exports = {};
|
|
15
|
+
__export(trace_exports, {
|
|
16
|
+
FallomSession: () => FallomSession,
|
|
17
|
+
clearSession: () => clearSession,
|
|
18
|
+
getSession: () => getSession,
|
|
19
|
+
init: () => init,
|
|
20
|
+
runWithSession: () => runWithSession,
|
|
21
|
+
session: () => session,
|
|
22
|
+
setSession: () => setSession,
|
|
23
|
+
shutdown: () => shutdown,
|
|
24
|
+
span: () => span,
|
|
25
|
+
wrapAISDK: () => wrapAISDK,
|
|
26
|
+
wrapAnthropic: () => wrapAnthropic,
|
|
27
|
+
wrapGoogleAI: () => wrapGoogleAI,
|
|
28
|
+
wrapMastraAgent: () => wrapMastraAgent,
|
|
29
|
+
wrapOpenAI: () => wrapOpenAI
|
|
30
|
+
});
|
|
31
|
+
import { AsyncLocalStorage } from "async_hooks";
|
|
32
|
+
import { NodeSDK } from "@opentelemetry/sdk-node";
|
|
33
|
+
import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
|
|
34
|
+
|
|
35
|
+
// node_modules/@opentelemetry/resources/build/esm/Resource.js
|
|
36
|
+
import { diag } from "@opentelemetry/api";
|
|
37
|
+
|
|
38
|
+
// node_modules/@opentelemetry/resources/node_modules/@opentelemetry/semantic-conventions/build/esm/resource/SemanticResourceAttributes.js
|
|
39
|
+
var SemanticResourceAttributes = {
|
|
40
|
+
/**
|
|
41
|
+
* Name of the cloud provider.
|
|
42
|
+
*/
|
|
43
|
+
CLOUD_PROVIDER: "cloud.provider",
|
|
44
|
+
/**
|
|
45
|
+
* The cloud account ID the resource is assigned to.
|
|
46
|
+
*/
|
|
47
|
+
CLOUD_ACCOUNT_ID: "cloud.account.id",
|
|
48
|
+
/**
|
|
49
|
+
* The geographical region the resource is running. Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), or [Google Cloud regions](https://cloud.google.com/about/locations).
|
|
50
|
+
*/
|
|
51
|
+
CLOUD_REGION: "cloud.region",
|
|
52
|
+
/**
|
|
53
|
+
* Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running.
|
|
54
|
+
*
|
|
55
|
+
* Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
|
|
56
|
+
*/
|
|
57
|
+
CLOUD_AVAILABILITY_ZONE: "cloud.availability_zone",
|
|
58
|
+
/**
|
|
59
|
+
* The cloud platform in use.
|
|
60
|
+
*
|
|
61
|
+
* Note: The prefix of the service SHOULD match the one specified in `cloud.provider`.
|
|
62
|
+
*/
|
|
63
|
+
CLOUD_PLATFORM: "cloud.platform",
|
|
64
|
+
/**
|
|
65
|
+
* The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
|
|
66
|
+
*/
|
|
67
|
+
AWS_ECS_CONTAINER_ARN: "aws.ecs.container.arn",
|
|
68
|
+
/**
|
|
69
|
+
* The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
|
|
70
|
+
*/
|
|
71
|
+
AWS_ECS_CLUSTER_ARN: "aws.ecs.cluster.arn",
|
|
72
|
+
/**
|
|
73
|
+
* The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task.
|
|
74
|
+
*/
|
|
75
|
+
AWS_ECS_LAUNCHTYPE: "aws.ecs.launchtype",
|
|
76
|
+
/**
|
|
77
|
+
* The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
|
|
78
|
+
*/
|
|
79
|
+
AWS_ECS_TASK_ARN: "aws.ecs.task.arn",
|
|
80
|
+
/**
|
|
81
|
+
* The task definition family this task definition is a member of.
|
|
82
|
+
*/
|
|
83
|
+
AWS_ECS_TASK_FAMILY: "aws.ecs.task.family",
|
|
84
|
+
/**
|
|
85
|
+
* The revision for this task definition.
|
|
86
|
+
*/
|
|
87
|
+
AWS_ECS_TASK_REVISION: "aws.ecs.task.revision",
|
|
88
|
+
/**
|
|
89
|
+
* The ARN of an EKS cluster.
|
|
90
|
+
*/
|
|
91
|
+
AWS_EKS_CLUSTER_ARN: "aws.eks.cluster.arn",
|
|
92
|
+
/**
|
|
93
|
+
* The name(s) of the AWS log group(s) an application is writing to.
|
|
94
|
+
*
|
|
95
|
+
* Note: Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group.
|
|
96
|
+
*/
|
|
97
|
+
AWS_LOG_GROUP_NAMES: "aws.log.group.names",
|
|
98
|
+
/**
|
|
99
|
+
* The Amazon Resource Name(s) (ARN) of the AWS log group(s).
|
|
100
|
+
*
|
|
101
|
+
* Note: See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
|
|
102
|
+
*/
|
|
103
|
+
AWS_LOG_GROUP_ARNS: "aws.log.group.arns",
|
|
104
|
+
/**
|
|
105
|
+
* The name(s) of the AWS log stream(s) an application is writing to.
|
|
106
|
+
*/
|
|
107
|
+
AWS_LOG_STREAM_NAMES: "aws.log.stream.names",
|
|
108
|
+
/**
|
|
109
|
+
* The ARN(s) of the AWS log stream(s).
|
|
110
|
+
*
|
|
111
|
+
* Note: See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream.
|
|
112
|
+
*/
|
|
113
|
+
AWS_LOG_STREAM_ARNS: "aws.log.stream.arns",
|
|
114
|
+
/**
|
|
115
|
+
* Container name.
|
|
116
|
+
*/
|
|
117
|
+
CONTAINER_NAME: "container.name",
|
|
118
|
+
/**
|
|
119
|
+
* Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated.
|
|
120
|
+
*/
|
|
121
|
+
CONTAINER_ID: "container.id",
|
|
122
|
+
/**
|
|
123
|
+
* The container runtime managing this container.
|
|
124
|
+
*/
|
|
125
|
+
CONTAINER_RUNTIME: "container.runtime",
|
|
126
|
+
/**
|
|
127
|
+
* Name of the image the container was built on.
|
|
128
|
+
*/
|
|
129
|
+
CONTAINER_IMAGE_NAME: "container.image.name",
|
|
130
|
+
/**
|
|
131
|
+
* Container image tag.
|
|
132
|
+
*/
|
|
133
|
+
CONTAINER_IMAGE_TAG: "container.image.tag",
|
|
134
|
+
/**
|
|
135
|
+
* Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier).
|
|
136
|
+
*/
|
|
137
|
+
DEPLOYMENT_ENVIRONMENT: "deployment.environment",
|
|
138
|
+
/**
|
|
139
|
+
* A unique identifier representing the device.
|
|
140
|
+
*
|
|
141
|
+
* Note: The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence.
|
|
142
|
+
*/
|
|
143
|
+
DEVICE_ID: "device.id",
|
|
144
|
+
/**
|
|
145
|
+
* The model identifier for the device.
|
|
146
|
+
*
|
|
147
|
+
* Note: It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device.
|
|
148
|
+
*/
|
|
149
|
+
DEVICE_MODEL_IDENTIFIER: "device.model.identifier",
|
|
150
|
+
/**
|
|
151
|
+
* The marketing name for the device model.
|
|
152
|
+
*
|
|
153
|
+
* Note: It's recommended this value represents a human readable version of the device model rather than a machine readable alternative.
|
|
154
|
+
*/
|
|
155
|
+
DEVICE_MODEL_NAME: "device.model.name",
|
|
156
|
+
/**
|
|
157
|
+
* The name of the single function that this runtime instance executes.
|
|
158
|
+
*
|
|
159
|
+
* Note: This is the name of the function as configured/deployed on the FaaS platform and is usually different from the name of the callback function (which may be stored in the [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) span attributes).
|
|
160
|
+
*/
|
|
161
|
+
FAAS_NAME: "faas.name",
|
|
162
|
+
/**
|
|
163
|
+
* The unique ID of the single function that this runtime instance executes.
|
|
164
|
+
*
|
|
165
|
+
* Note: Depending on the cloud provider, use:
|
|
166
|
+
|
|
167
|
+
* **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
|
|
168
|
+
Take care not to use the "invoked ARN" directly but replace any
|
|
169
|
+
[alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invokable with multiple
|
|
170
|
+
different aliases.
|
|
171
|
+
* **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names)
|
|
172
|
+
* **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id).
|
|
173
|
+
|
|
174
|
+
On some providers, it may not be possible to determine the full ID at startup,
|
|
175
|
+
which is why this field cannot be made required. For example, on AWS the account ID
|
|
176
|
+
part of the ARN is not available without calling another AWS API
|
|
177
|
+
which may be deemed too slow for a short-running lambda function.
|
|
178
|
+
As an alternative, consider setting `faas.id` as a span attribute instead.
|
|
179
|
+
*/
|
|
180
|
+
FAAS_ID: "faas.id",
|
|
181
|
+
/**
|
|
182
|
+
* The immutable version of the function being executed.
|
|
183
|
+
*
|
|
184
|
+
* Note: Depending on the cloud provider and platform, use:
|
|
185
|
+
|
|
186
|
+
* **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
|
|
187
|
+
(an integer represented as a decimal string).
|
|
188
|
+
* **Google Cloud Run:** The [revision](https://cloud.google.com/run/docs/managing/revisions)
|
|
189
|
+
(i.e., the function name plus the revision suffix).
|
|
190
|
+
* **Google Cloud Functions:** The value of the
|
|
191
|
+
[`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
|
|
192
|
+
* **Azure Functions:** Not applicable. Do not set this attribute.
|
|
193
|
+
*/
|
|
194
|
+
FAAS_VERSION: "faas.version",
|
|
195
|
+
/**
|
|
196
|
+
* The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version.
|
|
197
|
+
*
|
|
198
|
+
* Note: * **AWS Lambda:** Use the (full) log stream name.
|
|
199
|
+
*/
|
|
200
|
+
FAAS_INSTANCE: "faas.instance",
|
|
201
|
+
/**
|
|
202
|
+
* The amount of memory available to the serverless function in MiB.
|
|
203
|
+
*
|
|
204
|
+
* Note: It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
|
|
205
|
+
*/
|
|
206
|
+
FAAS_MAX_MEMORY: "faas.max_memory",
|
|
207
|
+
/**
|
|
208
|
+
* Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider.
|
|
209
|
+
*/
|
|
210
|
+
HOST_ID: "host.id",
|
|
211
|
+
/**
|
|
212
|
+
* Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user.
|
|
213
|
+
*/
|
|
214
|
+
HOST_NAME: "host.name",
|
|
215
|
+
/**
|
|
216
|
+
* Type of host. For Cloud, this must be the machine type.
|
|
217
|
+
*/
|
|
218
|
+
HOST_TYPE: "host.type",
|
|
219
|
+
/**
|
|
220
|
+
* The CPU architecture the host system is running on.
|
|
221
|
+
*/
|
|
222
|
+
HOST_ARCH: "host.arch",
|
|
223
|
+
/**
|
|
224
|
+
* Name of the VM image or OS install the host was instantiated from.
|
|
225
|
+
*/
|
|
226
|
+
HOST_IMAGE_NAME: "host.image.name",
|
|
227
|
+
/**
|
|
228
|
+
* VM image ID. For Cloud, this value is from the provider.
|
|
229
|
+
*/
|
|
230
|
+
HOST_IMAGE_ID: "host.image.id",
|
|
231
|
+
/**
|
|
232
|
+
* The version string of the VM image as defined in [Version SpanAttributes](README.md#version-attributes).
|
|
233
|
+
*/
|
|
234
|
+
HOST_IMAGE_VERSION: "host.image.version",
|
|
235
|
+
/**
|
|
236
|
+
* The name of the cluster.
|
|
237
|
+
*/
|
|
238
|
+
K8S_CLUSTER_NAME: "k8s.cluster.name",
|
|
239
|
+
/**
|
|
240
|
+
* The name of the Node.
|
|
241
|
+
*/
|
|
242
|
+
K8S_NODE_NAME: "k8s.node.name",
|
|
243
|
+
/**
|
|
244
|
+
* The UID of the Node.
|
|
245
|
+
*/
|
|
246
|
+
K8S_NODE_UID: "k8s.node.uid",
|
|
247
|
+
/**
|
|
248
|
+
* The name of the namespace that the pod is running in.
|
|
249
|
+
*/
|
|
250
|
+
K8S_NAMESPACE_NAME: "k8s.namespace.name",
|
|
251
|
+
/**
|
|
252
|
+
* The UID of the Pod.
|
|
253
|
+
*/
|
|
254
|
+
K8S_POD_UID: "k8s.pod.uid",
|
|
255
|
+
/**
|
|
256
|
+
* The name of the Pod.
|
|
257
|
+
*/
|
|
258
|
+
K8S_POD_NAME: "k8s.pod.name",
|
|
259
|
+
/**
|
|
260
|
+
* The name of the Container in a Pod template.
|
|
261
|
+
*/
|
|
262
|
+
K8S_CONTAINER_NAME: "k8s.container.name",
|
|
263
|
+
/**
|
|
264
|
+
* The UID of the ReplicaSet.
|
|
265
|
+
*/
|
|
266
|
+
K8S_REPLICASET_UID: "k8s.replicaset.uid",
|
|
267
|
+
/**
|
|
268
|
+
* The name of the ReplicaSet.
|
|
269
|
+
*/
|
|
270
|
+
K8S_REPLICASET_NAME: "k8s.replicaset.name",
|
|
271
|
+
/**
|
|
272
|
+
* The UID of the Deployment.
|
|
273
|
+
*/
|
|
274
|
+
K8S_DEPLOYMENT_UID: "k8s.deployment.uid",
|
|
275
|
+
/**
|
|
276
|
+
* The name of the Deployment.
|
|
277
|
+
*/
|
|
278
|
+
K8S_DEPLOYMENT_NAME: "k8s.deployment.name",
|
|
279
|
+
/**
|
|
280
|
+
* The UID of the StatefulSet.
|
|
281
|
+
*/
|
|
282
|
+
K8S_STATEFULSET_UID: "k8s.statefulset.uid",
|
|
283
|
+
/**
|
|
284
|
+
* The name of the StatefulSet.
|
|
285
|
+
*/
|
|
286
|
+
K8S_STATEFULSET_NAME: "k8s.statefulset.name",
|
|
287
|
+
/**
|
|
288
|
+
* The UID of the DaemonSet.
|
|
289
|
+
*/
|
|
290
|
+
K8S_DAEMONSET_UID: "k8s.daemonset.uid",
|
|
291
|
+
/**
|
|
292
|
+
* The name of the DaemonSet.
|
|
293
|
+
*/
|
|
294
|
+
K8S_DAEMONSET_NAME: "k8s.daemonset.name",
|
|
295
|
+
/**
|
|
296
|
+
* The UID of the Job.
|
|
297
|
+
*/
|
|
298
|
+
K8S_JOB_UID: "k8s.job.uid",
|
|
299
|
+
/**
|
|
300
|
+
* The name of the Job.
|
|
301
|
+
*/
|
|
302
|
+
K8S_JOB_NAME: "k8s.job.name",
|
|
303
|
+
/**
|
|
304
|
+
* The UID of the CronJob.
|
|
305
|
+
*/
|
|
306
|
+
K8S_CRONJOB_UID: "k8s.cronjob.uid",
|
|
307
|
+
/**
|
|
308
|
+
* The name of the CronJob.
|
|
309
|
+
*/
|
|
310
|
+
K8S_CRONJOB_NAME: "k8s.cronjob.name",
|
|
311
|
+
/**
|
|
312
|
+
* The operating system type.
|
|
313
|
+
*/
|
|
314
|
+
OS_TYPE: "os.type",
|
|
315
|
+
/**
|
|
316
|
+
* Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands.
|
|
317
|
+
*/
|
|
318
|
+
OS_DESCRIPTION: "os.description",
|
|
319
|
+
/**
|
|
320
|
+
* Human readable operating system name.
|
|
321
|
+
*/
|
|
322
|
+
OS_NAME: "os.name",
|
|
323
|
+
/**
|
|
324
|
+
* The version string of the operating system as defined in [Version SpanAttributes](../../resource/semantic_conventions/README.md#version-attributes).
|
|
325
|
+
*/
|
|
326
|
+
OS_VERSION: "os.version",
|
|
327
|
+
/**
|
|
328
|
+
* Process identifier (PID).
|
|
329
|
+
*/
|
|
330
|
+
PROCESS_PID: "process.pid",
|
|
331
|
+
/**
|
|
332
|
+
* The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW`.
|
|
333
|
+
*/
|
|
334
|
+
PROCESS_EXECUTABLE_NAME: "process.executable.name",
|
|
335
|
+
/**
|
|
336
|
+
* The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`.
|
|
337
|
+
*/
|
|
338
|
+
PROCESS_EXECUTABLE_PATH: "process.executable.path",
|
|
339
|
+
/**
|
|
340
|
+
* The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`.
|
|
341
|
+
*/
|
|
342
|
+
PROCESS_COMMAND: "process.command",
|
|
343
|
+
/**
|
|
344
|
+
* The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead.
|
|
345
|
+
*/
|
|
346
|
+
PROCESS_COMMAND_LINE: "process.command_line",
|
|
347
|
+
/**
|
|
348
|
+
* All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`.
|
|
349
|
+
*/
|
|
350
|
+
PROCESS_COMMAND_ARGS: "process.command_args",
|
|
351
|
+
/**
|
|
352
|
+
* The username of the user that owns the process.
|
|
353
|
+
*/
|
|
354
|
+
PROCESS_OWNER: "process.owner",
|
|
355
|
+
/**
|
|
356
|
+
* The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler.
|
|
357
|
+
*/
|
|
358
|
+
PROCESS_RUNTIME_NAME: "process.runtime.name",
|
|
359
|
+
/**
|
|
360
|
+
* The version of the runtime of this process, as returned by the runtime without modification.
|
|
361
|
+
*/
|
|
362
|
+
PROCESS_RUNTIME_VERSION: "process.runtime.version",
|
|
363
|
+
/**
|
|
364
|
+
* An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment.
|
|
365
|
+
*/
|
|
366
|
+
PROCESS_RUNTIME_DESCRIPTION: "process.runtime.description",
|
|
367
|
+
/**
|
|
368
|
+
* Logical name of the service.
|
|
369
|
+
*
|
|
370
|
+
* Note: MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md#process), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service`.
|
|
371
|
+
*/
|
|
372
|
+
SERVICE_NAME: "service.name",
|
|
373
|
+
/**
|
|
374
|
+
* A namespace for `service.name`.
|
|
375
|
+
*
|
|
376
|
+
* Note: A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace.
|
|
377
|
+
*/
|
|
378
|
+
SERVICE_NAMESPACE: "service.namespace",
|
|
379
|
+
/**
|
|
380
|
+
* The string ID of the service instance.
|
|
381
|
+
*
|
|
382
|
+
* Note: MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). It is preferable for the ID to be persistent and stay the same for the lifetime of the service instance, however it is acceptable that the ID is ephemeral and changes during important lifetime events for the service (e.g. service restarts). If the service has no inherent unique ID that can be used as the value of this attribute it is recommended to generate a random Version 1 or Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use Version 5, see RFC 4122 for more recommendations).
|
|
383
|
+
*/
|
|
384
|
+
SERVICE_INSTANCE_ID: "service.instance.id",
|
|
385
|
+
/**
|
|
386
|
+
* The version string of the service API or implementation.
|
|
387
|
+
*/
|
|
388
|
+
SERVICE_VERSION: "service.version",
|
|
389
|
+
/**
|
|
390
|
+
* The name of the telemetry SDK as defined above.
|
|
391
|
+
*/
|
|
392
|
+
TELEMETRY_SDK_NAME: "telemetry.sdk.name",
|
|
393
|
+
/**
|
|
394
|
+
* The language of the telemetry SDK.
|
|
395
|
+
*/
|
|
396
|
+
TELEMETRY_SDK_LANGUAGE: "telemetry.sdk.language",
|
|
397
|
+
/**
|
|
398
|
+
* The version string of the telemetry SDK.
|
|
399
|
+
*/
|
|
400
|
+
TELEMETRY_SDK_VERSION: "telemetry.sdk.version",
|
|
401
|
+
/**
|
|
402
|
+
* The version string of the auto instrumentation agent, if used.
|
|
403
|
+
*/
|
|
404
|
+
TELEMETRY_AUTO_VERSION: "telemetry.auto.version",
|
|
405
|
+
/**
|
|
406
|
+
* The name of the web engine.
|
|
407
|
+
*/
|
|
408
|
+
WEBENGINE_NAME: "webengine.name",
|
|
409
|
+
/**
|
|
410
|
+
* The version of the web engine.
|
|
411
|
+
*/
|
|
412
|
+
WEBENGINE_VERSION: "webengine.version",
|
|
413
|
+
/**
|
|
414
|
+
* Additional description of the web engine (e.g. detailed version and edition information).
|
|
415
|
+
*/
|
|
416
|
+
WEBENGINE_DESCRIPTION: "webengine.description"
|
|
417
|
+
};
|
|
418
|
+
|
|
419
|
+
// node_modules/@opentelemetry/resources/build/esm/Resource.js
|
|
420
|
+
import { SDK_INFO } from "@opentelemetry/core";
|
|
421
|
+
|
|
422
|
+
// node_modules/@opentelemetry/resources/build/esm/platform/node/default-service-name.js
|
|
423
|
+
function defaultServiceName() {
|
|
424
|
+
return "unknown_service:" + process.argv0;
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
// node_modules/@opentelemetry/resources/build/esm/Resource.js
|
|
428
|
+
var __assign = function() {
|
|
429
|
+
__assign = Object.assign || function(t) {
|
|
430
|
+
for (var s, i = 1, n = arguments.length; i < n; i++) {
|
|
431
|
+
s = arguments[i];
|
|
432
|
+
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
|
|
433
|
+
t[p] = s[p];
|
|
434
|
+
}
|
|
435
|
+
return t;
|
|
436
|
+
};
|
|
437
|
+
return __assign.apply(this, arguments);
|
|
438
|
+
};
|
|
439
|
+
var __awaiter = function(thisArg, _arguments, P, generator) {
|
|
440
|
+
function adopt(value) {
|
|
441
|
+
return value instanceof P ? value : new P(function(resolve) {
|
|
442
|
+
resolve(value);
|
|
443
|
+
});
|
|
444
|
+
}
|
|
445
|
+
return new (P || (P = Promise))(function(resolve, reject) {
|
|
446
|
+
function fulfilled(value) {
|
|
447
|
+
try {
|
|
448
|
+
step(generator.next(value));
|
|
449
|
+
} catch (e) {
|
|
450
|
+
reject(e);
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
function rejected(value) {
|
|
454
|
+
try {
|
|
455
|
+
step(generator["throw"](value));
|
|
456
|
+
} catch (e) {
|
|
457
|
+
reject(e);
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
function step(result) {
|
|
461
|
+
result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected);
|
|
462
|
+
}
|
|
463
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
464
|
+
});
|
|
465
|
+
};
|
|
466
|
+
var __generator = function(thisArg, body) {
|
|
467
|
+
var _ = { label: 0, sent: function() {
|
|
468
|
+
if (t[0] & 1) throw t[1];
|
|
469
|
+
return t[1];
|
|
470
|
+
}, trys: [], ops: [] }, f, y, t, g;
|
|
471
|
+
return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() {
|
|
472
|
+
return this;
|
|
473
|
+
}), g;
|
|
474
|
+
function verb(n) {
|
|
475
|
+
return function(v) {
|
|
476
|
+
return step([n, v]);
|
|
477
|
+
};
|
|
478
|
+
}
|
|
479
|
+
function step(op) {
|
|
480
|
+
if (f) throw new TypeError("Generator is already executing.");
|
|
481
|
+
while (_) try {
|
|
482
|
+
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
|
|
483
|
+
if (y = 0, t) op = [op[0] & 2, t.value];
|
|
484
|
+
switch (op[0]) {
|
|
485
|
+
case 0:
|
|
486
|
+
case 1:
|
|
487
|
+
t = op;
|
|
488
|
+
break;
|
|
489
|
+
case 4:
|
|
490
|
+
_.label++;
|
|
491
|
+
return { value: op[1], done: false };
|
|
492
|
+
case 5:
|
|
493
|
+
_.label++;
|
|
494
|
+
y = op[1];
|
|
495
|
+
op = [0];
|
|
496
|
+
continue;
|
|
497
|
+
case 7:
|
|
498
|
+
op = _.ops.pop();
|
|
499
|
+
_.trys.pop();
|
|
500
|
+
continue;
|
|
501
|
+
default:
|
|
502
|
+
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
|
|
503
|
+
_ = 0;
|
|
504
|
+
continue;
|
|
505
|
+
}
|
|
506
|
+
if (op[0] === 3 && (!t || op[1] > t[0] && op[1] < t[3])) {
|
|
507
|
+
_.label = op[1];
|
|
508
|
+
break;
|
|
509
|
+
}
|
|
510
|
+
if (op[0] === 6 && _.label < t[1]) {
|
|
511
|
+
_.label = t[1];
|
|
512
|
+
t = op;
|
|
513
|
+
break;
|
|
514
|
+
}
|
|
515
|
+
if (t && _.label < t[2]) {
|
|
516
|
+
_.label = t[2];
|
|
517
|
+
_.ops.push(op);
|
|
518
|
+
break;
|
|
519
|
+
}
|
|
520
|
+
if (t[2]) _.ops.pop();
|
|
521
|
+
_.trys.pop();
|
|
522
|
+
continue;
|
|
523
|
+
}
|
|
524
|
+
op = body.call(thisArg, _);
|
|
525
|
+
} catch (e) {
|
|
526
|
+
op = [6, e];
|
|
527
|
+
y = 0;
|
|
528
|
+
} finally {
|
|
529
|
+
f = t = 0;
|
|
530
|
+
}
|
|
531
|
+
if (op[0] & 5) throw op[1];
|
|
532
|
+
return { value: op[0] ? op[1] : void 0, done: true };
|
|
533
|
+
}
|
|
534
|
+
};
|
|
535
|
+
var __read = function(o, n) {
|
|
536
|
+
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
537
|
+
if (!m) return o;
|
|
538
|
+
var i = m.call(o), r, ar = [], e;
|
|
539
|
+
try {
|
|
540
|
+
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
541
|
+
} catch (error) {
|
|
542
|
+
e = { error };
|
|
543
|
+
} finally {
|
|
544
|
+
try {
|
|
545
|
+
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
546
|
+
} finally {
|
|
547
|
+
if (e) throw e.error;
|
|
548
|
+
}
|
|
549
|
+
}
|
|
550
|
+
return ar;
|
|
551
|
+
};
|
|
552
|
+
var Resource = (
|
|
553
|
+
/** @class */
|
|
554
|
+
(function() {
|
|
555
|
+
function Resource2(attributes, asyncAttributesPromise) {
|
|
556
|
+
var _this = this;
|
|
557
|
+
var _a;
|
|
558
|
+
this._attributes = attributes;
|
|
559
|
+
this.asyncAttributesPending = asyncAttributesPromise != null;
|
|
560
|
+
this._syncAttributes = (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
|
|
561
|
+
this._asyncAttributesPromise = asyncAttributesPromise === null || asyncAttributesPromise === void 0 ? void 0 : asyncAttributesPromise.then(function(asyncAttributes) {
|
|
562
|
+
_this._attributes = Object.assign({}, _this._attributes, asyncAttributes);
|
|
563
|
+
_this.asyncAttributesPending = false;
|
|
564
|
+
return asyncAttributes;
|
|
565
|
+
}, function(err) {
|
|
566
|
+
diag.debug("a resource's async attributes promise rejected: %s", err);
|
|
567
|
+
_this.asyncAttributesPending = false;
|
|
568
|
+
return {};
|
|
569
|
+
});
|
|
570
|
+
}
|
|
571
|
+
Resource2.empty = function() {
|
|
572
|
+
return Resource2.EMPTY;
|
|
573
|
+
};
|
|
574
|
+
Resource2.default = function() {
|
|
575
|
+
var _a;
|
|
576
|
+
return new Resource2((_a = {}, _a[SemanticResourceAttributes.SERVICE_NAME] = defaultServiceName(), _a[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE], _a[SemanticResourceAttributes.TELEMETRY_SDK_NAME] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_NAME], _a[SemanticResourceAttributes.TELEMETRY_SDK_VERSION] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_VERSION], _a));
|
|
577
|
+
};
|
|
578
|
+
Object.defineProperty(Resource2.prototype, "attributes", {
|
|
579
|
+
get: function() {
|
|
580
|
+
var _a;
|
|
581
|
+
if (this.asyncAttributesPending) {
|
|
582
|
+
diag.error("Accessing resource attributes before async attributes settled");
|
|
583
|
+
}
|
|
584
|
+
return (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
|
|
585
|
+
},
|
|
586
|
+
enumerable: false,
|
|
587
|
+
configurable: true
|
|
588
|
+
});
|
|
589
|
+
Resource2.prototype.waitForAsyncAttributes = function() {
|
|
590
|
+
return __awaiter(this, void 0, void 0, function() {
|
|
591
|
+
return __generator(this, function(_a) {
|
|
592
|
+
switch (_a.label) {
|
|
593
|
+
case 0:
|
|
594
|
+
if (!this.asyncAttributesPending) return [3, 2];
|
|
595
|
+
return [4, this._asyncAttributesPromise];
|
|
596
|
+
case 1:
|
|
597
|
+
_a.sent();
|
|
598
|
+
_a.label = 2;
|
|
599
|
+
case 2:
|
|
600
|
+
return [
|
|
601
|
+
2
|
|
602
|
+
/*return*/
|
|
603
|
+
];
|
|
604
|
+
}
|
|
605
|
+
});
|
|
606
|
+
});
|
|
607
|
+
};
|
|
608
|
+
Resource2.prototype.merge = function(other) {
|
|
609
|
+
var _this = this;
|
|
610
|
+
var _a;
|
|
611
|
+
if (!other)
|
|
612
|
+
return this;
|
|
613
|
+
var mergedSyncAttributes = __assign(__assign({}, this._syncAttributes), (_a = other._syncAttributes) !== null && _a !== void 0 ? _a : other.attributes);
|
|
614
|
+
if (!this._asyncAttributesPromise && !other._asyncAttributesPromise) {
|
|
615
|
+
return new Resource2(mergedSyncAttributes);
|
|
616
|
+
}
|
|
617
|
+
var mergedAttributesPromise = Promise.all([
|
|
618
|
+
this._asyncAttributesPromise,
|
|
619
|
+
other._asyncAttributesPromise
|
|
620
|
+
]).then(function(_a2) {
|
|
621
|
+
var _b;
|
|
622
|
+
var _c = __read(_a2, 2), thisAsyncAttributes = _c[0], otherAsyncAttributes = _c[1];
|
|
623
|
+
return __assign(__assign(__assign(__assign({}, _this._syncAttributes), thisAsyncAttributes), (_b = other._syncAttributes) !== null && _b !== void 0 ? _b : other.attributes), otherAsyncAttributes);
|
|
624
|
+
});
|
|
625
|
+
return new Resource2(mergedSyncAttributes, mergedAttributesPromise);
|
|
626
|
+
};
|
|
627
|
+
Resource2.EMPTY = new Resource2({});
|
|
628
|
+
return Resource2;
|
|
629
|
+
})()
|
|
630
|
+
);
|
|
631
|
+
|
|
632
|
+
// src/trace.ts
|
|
633
|
+
var sessionStorage = new AsyncLocalStorage();
|
|
634
|
+
var fallbackSession = null;
|
|
635
|
+
var apiKey = null;
|
|
636
|
+
var baseUrl = "https://traces.fallom.com";
|
|
637
|
+
var initialized = false;
|
|
638
|
+
var captureContent = true;
|
|
639
|
+
var debugMode = false;
|
|
640
|
+
var sdk = null;
|
|
641
|
+
function log(...args) {
|
|
642
|
+
if (debugMode) console.log("[Fallom]", ...args);
|
|
643
|
+
}
|
|
644
|
+
var fallomSpanProcessor = {
|
|
645
|
+
onStart(span2, _parentContext) {
|
|
646
|
+
log("\u{1F4CD} Span started:", span2.name || "unknown");
|
|
647
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
648
|
+
if (ctx) {
|
|
649
|
+
span2.setAttribute("fallom.config_key", ctx.configKey);
|
|
650
|
+
span2.setAttribute("fallom.session_id", ctx.sessionId);
|
|
651
|
+
if (ctx.customerId) {
|
|
652
|
+
span2.setAttribute("fallom.customer_id", ctx.customerId);
|
|
653
|
+
}
|
|
654
|
+
log(
|
|
655
|
+
" Added session context:",
|
|
656
|
+
ctx.configKey,
|
|
657
|
+
ctx.sessionId,
|
|
658
|
+
ctx.customerId
|
|
659
|
+
);
|
|
660
|
+
} else {
|
|
661
|
+
log(" No session context available");
|
|
662
|
+
}
|
|
663
|
+
},
|
|
664
|
+
onEnd(span2) {
|
|
665
|
+
log("\u2705 Span ended:", span2.name, "duration:", span2.duration);
|
|
666
|
+
},
|
|
667
|
+
shutdown() {
|
|
668
|
+
return Promise.resolve();
|
|
669
|
+
},
|
|
670
|
+
forceFlush() {
|
|
671
|
+
return Promise.resolve();
|
|
672
|
+
}
|
|
673
|
+
};
|
|
674
|
+
async function init(options = {}) {
|
|
675
|
+
if (initialized) return;
|
|
676
|
+
debugMode = options.debug ?? false;
|
|
677
|
+
log("\u{1F680} Initializing Fallom tracing...");
|
|
678
|
+
apiKey = options.apiKey || process.env.FALLOM_API_KEY || null;
|
|
679
|
+
baseUrl = options.baseUrl || process.env.FALLOM_TRACES_URL || process.env.FALLOM_BASE_URL || "https://traces.fallom.com";
|
|
680
|
+
const envCapture = process.env.FALLOM_CAPTURE_CONTENT?.toLowerCase();
|
|
681
|
+
if (envCapture === "false" || envCapture === "0" || envCapture === "no") {
|
|
682
|
+
captureContent = false;
|
|
683
|
+
} else {
|
|
684
|
+
captureContent = options.captureContent ?? true;
|
|
685
|
+
}
|
|
686
|
+
if (!apiKey) {
|
|
687
|
+
throw new Error(
|
|
688
|
+
"No API key provided. Set FALLOM_API_KEY environment variable or pass apiKey parameter."
|
|
689
|
+
);
|
|
690
|
+
}
|
|
691
|
+
initialized = true;
|
|
692
|
+
log("\u{1F4E1} Exporter URL:", `${baseUrl}/v1/traces`);
|
|
693
|
+
const exporter = new OTLPTraceExporter({
|
|
694
|
+
url: `${baseUrl}/v1/traces`,
|
|
695
|
+
headers: {
|
|
696
|
+
Authorization: `Bearer ${apiKey}`
|
|
697
|
+
}
|
|
698
|
+
});
|
|
699
|
+
const instrumentations = await getInstrumentations();
|
|
700
|
+
log("\u{1F527} Loaded instrumentations:", instrumentations.length);
|
|
701
|
+
sdk = new NodeSDK({
|
|
702
|
+
resource: new Resource({
|
|
703
|
+
"service.name": "fallom-traced-app"
|
|
704
|
+
}),
|
|
705
|
+
traceExporter: exporter,
|
|
706
|
+
spanProcessor: fallomSpanProcessor,
|
|
707
|
+
instrumentations
|
|
708
|
+
});
|
|
709
|
+
sdk.start();
|
|
710
|
+
log("\u2705 SDK started");
|
|
711
|
+
process.on("SIGTERM", () => {
|
|
712
|
+
sdk?.shutdown().catch(console.error);
|
|
713
|
+
});
|
|
714
|
+
}
|
|
715
|
+
async function getInstrumentations() {
|
|
716
|
+
const instrumentations = [];
|
|
717
|
+
await tryAddInstrumentation(
|
|
718
|
+
instrumentations,
|
|
719
|
+
"@traceloop/instrumentation-openai",
|
|
720
|
+
"OpenAIInstrumentation"
|
|
721
|
+
);
|
|
722
|
+
await tryAddInstrumentation(
|
|
723
|
+
instrumentations,
|
|
724
|
+
"@traceloop/instrumentation-anthropic",
|
|
725
|
+
"AnthropicInstrumentation"
|
|
726
|
+
);
|
|
727
|
+
await tryAddInstrumentation(
|
|
728
|
+
instrumentations,
|
|
729
|
+
"@traceloop/instrumentation-cohere",
|
|
730
|
+
"CohereInstrumentation"
|
|
731
|
+
);
|
|
732
|
+
await tryAddInstrumentation(
|
|
733
|
+
instrumentations,
|
|
734
|
+
"@traceloop/instrumentation-bedrock",
|
|
735
|
+
"BedrockInstrumentation"
|
|
736
|
+
);
|
|
737
|
+
await tryAddInstrumentation(
|
|
738
|
+
instrumentations,
|
|
739
|
+
"@traceloop/instrumentation-google-generativeai",
|
|
740
|
+
"GoogleGenerativeAIInstrumentation"
|
|
741
|
+
);
|
|
742
|
+
await tryAddInstrumentation(
|
|
743
|
+
instrumentations,
|
|
744
|
+
"@traceloop/instrumentation-azure",
|
|
745
|
+
"AzureOpenAIInstrumentation"
|
|
746
|
+
);
|
|
747
|
+
await tryAddInstrumentation(
|
|
748
|
+
instrumentations,
|
|
749
|
+
"@traceloop/instrumentation-vertexai",
|
|
750
|
+
"VertexAIInstrumentation"
|
|
751
|
+
);
|
|
752
|
+
return instrumentations;
|
|
753
|
+
}
|
|
754
|
+
async function tryAddInstrumentation(instrumentations, pkg, className) {
|
|
755
|
+
try {
|
|
756
|
+
const mod = await import(pkg);
|
|
757
|
+
const InstrumentationClass = mod[className] || mod.default?.[className];
|
|
758
|
+
if (InstrumentationClass) {
|
|
759
|
+
instrumentations.push(
|
|
760
|
+
new InstrumentationClass({ traceContent: captureContent })
|
|
761
|
+
);
|
|
762
|
+
log(` \u2705 Loaded ${pkg}`);
|
|
763
|
+
} else {
|
|
764
|
+
log(
|
|
765
|
+
` \u26A0\uFE0F ${pkg} loaded but ${className} not found. Available:`,
|
|
766
|
+
Object.keys(mod)
|
|
767
|
+
);
|
|
768
|
+
}
|
|
769
|
+
} catch (e) {
|
|
770
|
+
log(` \u274C ${pkg} not installed`);
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
function setSession(configKey, sessionId, customerId) {
|
|
774
|
+
const store = sessionStorage.getStore();
|
|
775
|
+
if (store) {
|
|
776
|
+
store.configKey = configKey;
|
|
777
|
+
store.sessionId = sessionId;
|
|
778
|
+
store.customerId = customerId;
|
|
779
|
+
}
|
|
780
|
+
fallbackSession = { configKey, sessionId, customerId };
|
|
781
|
+
}
|
|
782
|
+
function runWithSession(configKey, sessionId, customerIdOrFn, fn) {
|
|
783
|
+
if (typeof customerIdOrFn === "function") {
|
|
784
|
+
return sessionStorage.run({ configKey, sessionId }, customerIdOrFn);
|
|
785
|
+
}
|
|
786
|
+
return sessionStorage.run(
|
|
787
|
+
{ configKey, sessionId, customerId: customerIdOrFn },
|
|
788
|
+
fn
|
|
789
|
+
);
|
|
790
|
+
}
|
|
791
|
+
function getSession() {
|
|
792
|
+
return sessionStorage.getStore() || fallbackSession || void 0;
|
|
793
|
+
}
|
|
794
|
+
function clearSession() {
|
|
795
|
+
fallbackSession = null;
|
|
796
|
+
}
|
|
797
|
+
function span(data, options = {}) {
|
|
798
|
+
if (!initialized) {
|
|
799
|
+
throw new Error("Fallom not initialized. Call trace.init() first.");
|
|
800
|
+
}
|
|
801
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
802
|
+
const configKey = options.configKey || ctx?.configKey;
|
|
803
|
+
const sessionId = options.sessionId || ctx?.sessionId;
|
|
804
|
+
if (!configKey || !sessionId) {
|
|
805
|
+
throw new Error(
|
|
806
|
+
"No session context. Either call setSession() first, or pass configKey and sessionId explicitly."
|
|
807
|
+
);
|
|
808
|
+
}
|
|
809
|
+
sendSpan(configKey, sessionId, data).catch(() => {
|
|
810
|
+
});
|
|
811
|
+
}
|
|
812
|
+
async function sendSpan(configKey, sessionId, data) {
|
|
813
|
+
try {
|
|
814
|
+
const controller = new AbortController();
|
|
815
|
+
const timeoutId = setTimeout(() => controller.abort(), 5e3);
|
|
816
|
+
await fetch(`${baseUrl}/spans`, {
|
|
817
|
+
method: "POST",
|
|
818
|
+
headers: {
|
|
819
|
+
Authorization: `Bearer ${apiKey}`,
|
|
820
|
+
"Content-Type": "application/json"
|
|
821
|
+
},
|
|
822
|
+
body: JSON.stringify({
|
|
823
|
+
config_key: configKey,
|
|
824
|
+
session_id: sessionId,
|
|
825
|
+
data
|
|
826
|
+
}),
|
|
827
|
+
signal: controller.signal
|
|
828
|
+
});
|
|
829
|
+
clearTimeout(timeoutId);
|
|
830
|
+
} catch {
|
|
831
|
+
}
|
|
832
|
+
}
|
|
833
|
+
async function shutdown() {
|
|
834
|
+
if (sdk) {
|
|
835
|
+
await sdk.shutdown();
|
|
836
|
+
initialized = false;
|
|
837
|
+
}
|
|
838
|
+
}
|
|
839
|
+
function messagesToOtelAttributes(messages, completion, model, responseId) {
|
|
840
|
+
const attrs = {};
|
|
841
|
+
if (model) {
|
|
842
|
+
attrs["gen_ai.request.model"] = model;
|
|
843
|
+
attrs["gen_ai.response.model"] = model;
|
|
844
|
+
}
|
|
845
|
+
if (responseId) {
|
|
846
|
+
attrs["gen_ai.response.id"] = responseId;
|
|
847
|
+
}
|
|
848
|
+
if (messages) {
|
|
849
|
+
messages.forEach((msg, i) => {
|
|
850
|
+
attrs[`gen_ai.prompt.${i}.role`] = msg.role;
|
|
851
|
+
attrs[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
852
|
+
});
|
|
853
|
+
}
|
|
854
|
+
if (completion) {
|
|
855
|
+
attrs["gen_ai.completion.0.role"] = completion.role;
|
|
856
|
+
attrs["gen_ai.completion.0.content"] = typeof completion.content === "string" ? completion.content : JSON.stringify(completion.content);
|
|
857
|
+
if (completion.tool_calls) {
|
|
858
|
+
attrs["gen_ai.completion.0.tool_calls"] = JSON.stringify(
|
|
859
|
+
completion.tool_calls
|
|
860
|
+
);
|
|
861
|
+
}
|
|
862
|
+
}
|
|
863
|
+
return attrs;
|
|
864
|
+
}
|
|
865
|
+
function generateHexId(length) {
|
|
866
|
+
const bytes = new Uint8Array(length / 2);
|
|
867
|
+
crypto.getRandomValues(bytes);
|
|
868
|
+
return Array.from(bytes).map((b) => b.toString(16).padStart(2, "0")).join("");
|
|
869
|
+
}
|
|
870
|
+
var traceContextStorage = new AsyncLocalStorage();
|
|
871
|
+
var fallbackTraceContext = null;
|
|
872
|
+
async function sendTrace(trace) {
|
|
873
|
+
const url = `${baseUrl}/v1/traces`;
|
|
874
|
+
log("\u{1F4E4} Sending trace to:", url);
|
|
875
|
+
log(" Session:", trace.session_id, "Config:", trace.config_key);
|
|
876
|
+
try {
|
|
877
|
+
const controller = new AbortController();
|
|
878
|
+
const timeoutId = setTimeout(() => controller.abort(), 5e3);
|
|
879
|
+
const response = await fetch(url, {
|
|
880
|
+
method: "POST",
|
|
881
|
+
headers: {
|
|
882
|
+
Authorization: `Bearer ${apiKey}`,
|
|
883
|
+
"Content-Type": "application/json"
|
|
884
|
+
},
|
|
885
|
+
body: JSON.stringify(trace),
|
|
886
|
+
signal: controller.signal
|
|
887
|
+
});
|
|
888
|
+
clearTimeout(timeoutId);
|
|
889
|
+
if (!response.ok) {
|
|
890
|
+
const text = await response.text();
|
|
891
|
+
log("\u274C Trace send failed:", response.status, text);
|
|
892
|
+
} else {
|
|
893
|
+
log("\u2705 Trace sent:", trace.name, trace.model);
|
|
894
|
+
}
|
|
895
|
+
} catch (err) {
|
|
896
|
+
log("\u274C Trace send error:", err instanceof Error ? err.message : err);
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
function wrapOpenAI(client) {
|
|
900
|
+
const originalCreate = client.chat.completions.create.bind(
|
|
901
|
+
client.chat.completions
|
|
902
|
+
);
|
|
903
|
+
client.chat.completions.create = async function(...args) {
|
|
904
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
905
|
+
if (!ctx || !initialized) {
|
|
906
|
+
return originalCreate(...args);
|
|
907
|
+
}
|
|
908
|
+
let promptCtx = null;
|
|
909
|
+
try {
|
|
910
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
911
|
+
promptCtx = getPromptContext();
|
|
912
|
+
} catch {
|
|
913
|
+
}
|
|
914
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
915
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
916
|
+
const spanId = generateHexId(16);
|
|
917
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
918
|
+
const params = args[0] || {};
|
|
919
|
+
const startTime = Date.now();
|
|
920
|
+
try {
|
|
921
|
+
const response = await originalCreate(...args);
|
|
922
|
+
const endTime = Date.now();
|
|
923
|
+
const attributes = captureContent ? messagesToOtelAttributes(
|
|
924
|
+
params?.messages,
|
|
925
|
+
response?.choices?.[0]?.message,
|
|
926
|
+
response?.model || params?.model,
|
|
927
|
+
response?.id
|
|
928
|
+
) : {};
|
|
929
|
+
if (response?.usage) {
|
|
930
|
+
attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
|
|
931
|
+
}
|
|
932
|
+
if (response?.choices?.[0]?.finish_reason) {
|
|
933
|
+
attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
|
|
934
|
+
}
|
|
935
|
+
sendTrace({
|
|
936
|
+
config_key: ctx.configKey,
|
|
937
|
+
session_id: ctx.sessionId,
|
|
938
|
+
customer_id: ctx.customerId,
|
|
939
|
+
trace_id: traceId,
|
|
940
|
+
span_id: spanId,
|
|
941
|
+
parent_span_id: parentSpanId,
|
|
942
|
+
name: "chat.completions.create",
|
|
943
|
+
kind: "llm",
|
|
944
|
+
model: response?.model || params?.model,
|
|
945
|
+
start_time: new Date(startTime).toISOString(),
|
|
946
|
+
end_time: new Date(endTime).toISOString(),
|
|
947
|
+
duration_ms: endTime - startTime,
|
|
948
|
+
status: "OK",
|
|
949
|
+
prompt_tokens: response?.usage?.prompt_tokens,
|
|
950
|
+
completion_tokens: response?.usage?.completion_tokens,
|
|
951
|
+
total_tokens: response?.usage?.total_tokens,
|
|
952
|
+
attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
|
|
953
|
+
prompt_key: promptCtx?.promptKey,
|
|
954
|
+
prompt_version: promptCtx?.promptVersion,
|
|
955
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
956
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
957
|
+
}).catch(() => {
|
|
958
|
+
});
|
|
959
|
+
return response;
|
|
960
|
+
} catch (error) {
|
|
961
|
+
const endTime = Date.now();
|
|
962
|
+
const attributes = captureContent ? messagesToOtelAttributes(
|
|
963
|
+
params?.messages,
|
|
964
|
+
void 0,
|
|
965
|
+
params?.model,
|
|
966
|
+
void 0
|
|
967
|
+
) : void 0;
|
|
968
|
+
if (attributes) {
|
|
969
|
+
attributes["error.message"] = error?.message;
|
|
970
|
+
}
|
|
971
|
+
sendTrace({
|
|
972
|
+
config_key: ctx.configKey,
|
|
973
|
+
session_id: ctx.sessionId,
|
|
974
|
+
customer_id: ctx.customerId,
|
|
975
|
+
trace_id: traceId,
|
|
976
|
+
span_id: spanId,
|
|
977
|
+
parent_span_id: parentSpanId,
|
|
978
|
+
name: "chat.completions.create",
|
|
979
|
+
kind: "llm",
|
|
980
|
+
model: params?.model,
|
|
981
|
+
start_time: new Date(startTime).toISOString(),
|
|
982
|
+
end_time: new Date(endTime).toISOString(),
|
|
983
|
+
duration_ms: endTime - startTime,
|
|
984
|
+
status: "ERROR",
|
|
985
|
+
error_message: error?.message,
|
|
986
|
+
attributes,
|
|
987
|
+
prompt_key: promptCtx?.promptKey,
|
|
988
|
+
prompt_version: promptCtx?.promptVersion,
|
|
989
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
990
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
991
|
+
}).catch(() => {
|
|
992
|
+
});
|
|
993
|
+
throw error;
|
|
994
|
+
}
|
|
995
|
+
};
|
|
996
|
+
return client;
|
|
997
|
+
}
|
|
998
|
+
function wrapAnthropic(client) {
|
|
999
|
+
const originalCreate = client.messages.create.bind(client.messages);
|
|
1000
|
+
client.messages.create = async function(...args) {
|
|
1001
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
1002
|
+
if (!ctx || !initialized) {
|
|
1003
|
+
return originalCreate(...args);
|
|
1004
|
+
}
|
|
1005
|
+
let promptCtx = null;
|
|
1006
|
+
try {
|
|
1007
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
1008
|
+
promptCtx = getPromptContext();
|
|
1009
|
+
} catch {
|
|
1010
|
+
}
|
|
1011
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
1012
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
1013
|
+
const spanId = generateHexId(16);
|
|
1014
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
1015
|
+
const params = args[0] || {};
|
|
1016
|
+
const startTime = Date.now();
|
|
1017
|
+
try {
|
|
1018
|
+
const response = await originalCreate(...args);
|
|
1019
|
+
const endTime = Date.now();
|
|
1020
|
+
const attributes = captureContent ? messagesToOtelAttributes(
|
|
1021
|
+
params?.messages,
|
|
1022
|
+
{ role: "assistant", content: response?.content?.[0]?.text || "" },
|
|
1023
|
+
response?.model || params?.model,
|
|
1024
|
+
response?.id
|
|
1025
|
+
) : {};
|
|
1026
|
+
if (params?.system) {
|
|
1027
|
+
attributes["gen_ai.system_prompt"] = params.system;
|
|
1028
|
+
}
|
|
1029
|
+
if (response?.usage) {
|
|
1030
|
+
attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
|
|
1031
|
+
}
|
|
1032
|
+
if (response?.stop_reason) {
|
|
1033
|
+
attributes["gen_ai.response.finish_reason"] = response.stop_reason;
|
|
1034
|
+
}
|
|
1035
|
+
sendTrace({
|
|
1036
|
+
config_key: ctx.configKey,
|
|
1037
|
+
session_id: ctx.sessionId,
|
|
1038
|
+
customer_id: ctx.customerId,
|
|
1039
|
+
trace_id: traceId,
|
|
1040
|
+
span_id: spanId,
|
|
1041
|
+
parent_span_id: parentSpanId,
|
|
1042
|
+
name: "messages.create",
|
|
1043
|
+
kind: "llm",
|
|
1044
|
+
model: response?.model || params?.model,
|
|
1045
|
+
start_time: new Date(startTime).toISOString(),
|
|
1046
|
+
end_time: new Date(endTime).toISOString(),
|
|
1047
|
+
duration_ms: endTime - startTime,
|
|
1048
|
+
status: "OK",
|
|
1049
|
+
prompt_tokens: response?.usage?.input_tokens,
|
|
1050
|
+
completion_tokens: response?.usage?.output_tokens,
|
|
1051
|
+
total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
|
|
1052
|
+
attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
|
|
1053
|
+
prompt_key: promptCtx?.promptKey,
|
|
1054
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1055
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1056
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1057
|
+
}).catch(() => {
|
|
1058
|
+
});
|
|
1059
|
+
return response;
|
|
1060
|
+
} catch (error) {
|
|
1061
|
+
const endTime = Date.now();
|
|
1062
|
+
const attributes = captureContent ? messagesToOtelAttributes(
|
|
1063
|
+
params?.messages,
|
|
1064
|
+
void 0,
|
|
1065
|
+
params?.model,
|
|
1066
|
+
void 0
|
|
1067
|
+
) : void 0;
|
|
1068
|
+
if (attributes) {
|
|
1069
|
+
attributes["error.message"] = error?.message;
|
|
1070
|
+
if (params?.system) {
|
|
1071
|
+
attributes["gen_ai.system_prompt"] = params.system;
|
|
1072
|
+
}
|
|
1073
|
+
}
|
|
1074
|
+
sendTrace({
|
|
1075
|
+
config_key: ctx.configKey,
|
|
1076
|
+
session_id: ctx.sessionId,
|
|
1077
|
+
customer_id: ctx.customerId,
|
|
1078
|
+
trace_id: traceId,
|
|
1079
|
+
span_id: spanId,
|
|
1080
|
+
parent_span_id: parentSpanId,
|
|
1081
|
+
name: "messages.create",
|
|
1082
|
+
kind: "llm",
|
|
1083
|
+
model: params?.model,
|
|
1084
|
+
start_time: new Date(startTime).toISOString(),
|
|
1085
|
+
end_time: new Date(endTime).toISOString(),
|
|
1086
|
+
duration_ms: endTime - startTime,
|
|
1087
|
+
status: "ERROR",
|
|
1088
|
+
error_message: error?.message,
|
|
1089
|
+
attributes,
|
|
1090
|
+
prompt_key: promptCtx?.promptKey,
|
|
1091
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1092
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1093
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1094
|
+
}).catch(() => {
|
|
1095
|
+
});
|
|
1096
|
+
throw error;
|
|
1097
|
+
}
|
|
1098
|
+
};
|
|
1099
|
+
return client;
|
|
1100
|
+
}
|
|
1101
|
+
function wrapGoogleAI(model) {
|
|
1102
|
+
const originalGenerate = model.generateContent.bind(model);
|
|
1103
|
+
model.generateContent = async function(...args) {
|
|
1104
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
1105
|
+
if (!ctx || !initialized) {
|
|
1106
|
+
return originalGenerate(...args);
|
|
1107
|
+
}
|
|
1108
|
+
let promptCtx = null;
|
|
1109
|
+
try {
|
|
1110
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
1111
|
+
promptCtx = getPromptContext();
|
|
1112
|
+
} catch {
|
|
1113
|
+
}
|
|
1114
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
1115
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
1116
|
+
const spanId = generateHexId(16);
|
|
1117
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
1118
|
+
const startTime = Date.now();
|
|
1119
|
+
try {
|
|
1120
|
+
const response = await originalGenerate(...args);
|
|
1121
|
+
const endTime = Date.now();
|
|
1122
|
+
const result = response?.response;
|
|
1123
|
+
const usage = result?.usageMetadata;
|
|
1124
|
+
const modelName = model?.model || "gemini";
|
|
1125
|
+
const attributes = {};
|
|
1126
|
+
if (captureContent) {
|
|
1127
|
+
attributes["gen_ai.request.model"] = modelName;
|
|
1128
|
+
attributes["gen_ai.response.model"] = modelName;
|
|
1129
|
+
const input = args[0];
|
|
1130
|
+
if (typeof input === "string") {
|
|
1131
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
1132
|
+
attributes["gen_ai.prompt.0.content"] = input;
|
|
1133
|
+
} else if (input?.contents) {
|
|
1134
|
+
input.contents.forEach((content, i) => {
|
|
1135
|
+
attributes[`gen_ai.prompt.${i}.role`] = content.role || "user";
|
|
1136
|
+
attributes[`gen_ai.prompt.${i}.content`] = content.parts?.[0]?.text || JSON.stringify(content.parts);
|
|
1137
|
+
});
|
|
1138
|
+
}
|
|
1139
|
+
const outputText = result?.text?.();
|
|
1140
|
+
if (outputText) {
|
|
1141
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
1142
|
+
attributes["gen_ai.completion.0.content"] = outputText;
|
|
1143
|
+
}
|
|
1144
|
+
}
|
|
1145
|
+
if (usage) {
|
|
1146
|
+
attributes["fallom.raw.usage"] = JSON.stringify(usage);
|
|
1147
|
+
}
|
|
1148
|
+
const candidate = result?.candidates?.[0];
|
|
1149
|
+
if (candidate?.finishReason) {
|
|
1150
|
+
attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
|
|
1151
|
+
}
|
|
1152
|
+
sendTrace({
|
|
1153
|
+
config_key: ctx.configKey,
|
|
1154
|
+
session_id: ctx.sessionId,
|
|
1155
|
+
customer_id: ctx.customerId,
|
|
1156
|
+
trace_id: traceId,
|
|
1157
|
+
span_id: spanId,
|
|
1158
|
+
parent_span_id: parentSpanId,
|
|
1159
|
+
name: "generateContent",
|
|
1160
|
+
kind: "llm",
|
|
1161
|
+
model: modelName,
|
|
1162
|
+
start_time: new Date(startTime).toISOString(),
|
|
1163
|
+
end_time: new Date(endTime).toISOString(),
|
|
1164
|
+
duration_ms: endTime - startTime,
|
|
1165
|
+
status: "OK",
|
|
1166
|
+
prompt_tokens: usage?.promptTokenCount,
|
|
1167
|
+
completion_tokens: usage?.candidatesTokenCount,
|
|
1168
|
+
total_tokens: usage?.totalTokenCount,
|
|
1169
|
+
attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
|
|
1170
|
+
prompt_key: promptCtx?.promptKey,
|
|
1171
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1172
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1173
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1174
|
+
}).catch(() => {
|
|
1175
|
+
});
|
|
1176
|
+
return response;
|
|
1177
|
+
} catch (error) {
|
|
1178
|
+
const endTime = Date.now();
|
|
1179
|
+
const modelName = model?.model || "gemini";
|
|
1180
|
+
const attributes = {};
|
|
1181
|
+
if (captureContent) {
|
|
1182
|
+
attributes["gen_ai.request.model"] = modelName;
|
|
1183
|
+
attributes["error.message"] = error?.message;
|
|
1184
|
+
const input = args[0];
|
|
1185
|
+
if (typeof input === "string") {
|
|
1186
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
1187
|
+
attributes["gen_ai.prompt.0.content"] = input;
|
|
1188
|
+
}
|
|
1189
|
+
}
|
|
1190
|
+
sendTrace({
|
|
1191
|
+
config_key: ctx.configKey,
|
|
1192
|
+
session_id: ctx.sessionId,
|
|
1193
|
+
customer_id: ctx.customerId,
|
|
1194
|
+
trace_id: traceId,
|
|
1195
|
+
span_id: spanId,
|
|
1196
|
+
parent_span_id: parentSpanId,
|
|
1197
|
+
name: "generateContent",
|
|
1198
|
+
kind: "llm",
|
|
1199
|
+
model: modelName,
|
|
1200
|
+
start_time: new Date(startTime).toISOString(),
|
|
1201
|
+
end_time: new Date(endTime).toISOString(),
|
|
1202
|
+
duration_ms: endTime - startTime,
|
|
1203
|
+
status: "ERROR",
|
|
1204
|
+
error_message: error?.message,
|
|
1205
|
+
attributes: captureContent ? attributes : void 0,
|
|
1206
|
+
prompt_key: promptCtx?.promptKey,
|
|
1207
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1208
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1209
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1210
|
+
}).catch(() => {
|
|
1211
|
+
});
|
|
1212
|
+
throw error;
|
|
1213
|
+
}
|
|
1214
|
+
};
|
|
1215
|
+
return model;
|
|
1216
|
+
}
|
|
1217
|
+
var aiSdkDebug = false;
|
|
1218
|
+
function extractUsageFromResult(result, directUsage) {
|
|
1219
|
+
let usage = directUsage ?? result?.usage;
|
|
1220
|
+
const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
|
|
1221
|
+
let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : void 0;
|
|
1222
|
+
let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : void 0;
|
|
1223
|
+
let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : void 0;
|
|
1224
|
+
let cost;
|
|
1225
|
+
const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
|
|
1226
|
+
if (orUsage) {
|
|
1227
|
+
if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
|
|
1228
|
+
promptTokens = orUsage.promptTokens;
|
|
1229
|
+
}
|
|
1230
|
+
if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
|
|
1231
|
+
completionTokens = orUsage.completionTokens;
|
|
1232
|
+
}
|
|
1233
|
+
if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
|
|
1234
|
+
totalTokens = orUsage.totalTokens;
|
|
1235
|
+
}
|
|
1236
|
+
if (isValidNumber(orUsage.cost)) {
|
|
1237
|
+
cost = orUsage.cost;
|
|
1238
|
+
}
|
|
1239
|
+
}
|
|
1240
|
+
if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
|
|
1241
|
+
totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
|
|
1242
|
+
}
|
|
1243
|
+
return { promptTokens, completionTokens, totalTokens, cost };
|
|
1244
|
+
}
|
|
1245
|
+
function wrapAISDK(ai, options) {
|
|
1246
|
+
const aiModule = ai;
|
|
1247
|
+
aiSdkDebug = options?.debug ?? false;
|
|
1248
|
+
return {
|
|
1249
|
+
generateText: createGenerateTextWrapper(aiModule),
|
|
1250
|
+
streamText: createStreamTextWrapper(aiModule),
|
|
1251
|
+
generateObject: aiModule.generateObject ? createGenerateObjectWrapper(aiModule) : void 0,
|
|
1252
|
+
streamObject: aiModule.streamObject ? createStreamObjectWrapper(aiModule) : void 0
|
|
1253
|
+
};
|
|
1254
|
+
}
|
|
1255
|
+
function createGenerateTextWrapper(aiModule) {
|
|
1256
|
+
return async (...args) => {
|
|
1257
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
1258
|
+
if (!ctx || !initialized) {
|
|
1259
|
+
return aiModule.generateText(...args);
|
|
1260
|
+
}
|
|
1261
|
+
let promptCtx = null;
|
|
1262
|
+
try {
|
|
1263
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
1264
|
+
promptCtx = getPromptContext();
|
|
1265
|
+
} catch {
|
|
1266
|
+
}
|
|
1267
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
1268
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
1269
|
+
const spanId = generateHexId(16);
|
|
1270
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
1271
|
+
const params = args[0] || {};
|
|
1272
|
+
const startTime = Date.now();
|
|
1273
|
+
try {
|
|
1274
|
+
const result = await aiModule.generateText(...args);
|
|
1275
|
+
const endTime = Date.now();
|
|
1276
|
+
if (aiSdkDebug) {
|
|
1277
|
+
console.log(
|
|
1278
|
+
"\n\u{1F50D} [Fallom Debug] generateText result keys:",
|
|
1279
|
+
Object.keys(result || {})
|
|
1280
|
+
);
|
|
1281
|
+
console.log(
|
|
1282
|
+
"\u{1F50D} [Fallom Debug] result.usage:",
|
|
1283
|
+
JSON.stringify(result?.usage, null, 2)
|
|
1284
|
+
);
|
|
1285
|
+
console.log(
|
|
1286
|
+
"\u{1F50D} [Fallom Debug] result.response keys:",
|
|
1287
|
+
Object.keys(result?.response || {})
|
|
1288
|
+
);
|
|
1289
|
+
console.log(
|
|
1290
|
+
"\u{1F50D} [Fallom Debug] result.response.usage:",
|
|
1291
|
+
JSON.stringify(result?.response?.usage, null, 2)
|
|
1292
|
+
);
|
|
1293
|
+
console.log(
|
|
1294
|
+
"\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
|
|
1295
|
+
JSON.stringify(result?.experimental_providerMetadata, null, 2)
|
|
1296
|
+
);
|
|
1297
|
+
}
|
|
1298
|
+
const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
|
|
1299
|
+
const attributes = {};
|
|
1300
|
+
if (captureContent) {
|
|
1301
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
1302
|
+
attributes["gen_ai.response.model"] = modelId;
|
|
1303
|
+
if (params?.prompt) {
|
|
1304
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
1305
|
+
attributes["gen_ai.prompt.0.content"] = params.prompt;
|
|
1306
|
+
}
|
|
1307
|
+
if (params?.messages) {
|
|
1308
|
+
params.messages.forEach((msg, i) => {
|
|
1309
|
+
attributes[`gen_ai.prompt.${i}.role`] = msg.role;
|
|
1310
|
+
attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
1311
|
+
});
|
|
1312
|
+
}
|
|
1313
|
+
if (result?.text) {
|
|
1314
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
1315
|
+
attributes["gen_ai.completion.0.content"] = result.text;
|
|
1316
|
+
}
|
|
1317
|
+
if (result?.response?.id) {
|
|
1318
|
+
attributes["gen_ai.response.id"] = result.response.id;
|
|
1319
|
+
}
|
|
1320
|
+
}
|
|
1321
|
+
if (result?.usage) {
|
|
1322
|
+
attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
|
|
1323
|
+
}
|
|
1324
|
+
if (result?.experimental_providerMetadata) {
|
|
1325
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(
|
|
1326
|
+
result.experimental_providerMetadata
|
|
1327
|
+
);
|
|
1328
|
+
}
|
|
1329
|
+
if (result?.finishReason) {
|
|
1330
|
+
attributes["gen_ai.response.finish_reason"] = result.finishReason;
|
|
1331
|
+
}
|
|
1332
|
+
const usage = extractUsageFromResult(result);
|
|
1333
|
+
sendTrace({
|
|
1334
|
+
config_key: ctx.configKey,
|
|
1335
|
+
session_id: ctx.sessionId,
|
|
1336
|
+
customer_id: ctx.customerId,
|
|
1337
|
+
trace_id: traceId,
|
|
1338
|
+
span_id: spanId,
|
|
1339
|
+
parent_span_id: parentSpanId,
|
|
1340
|
+
name: "generateText",
|
|
1341
|
+
kind: "llm",
|
|
1342
|
+
model: modelId,
|
|
1343
|
+
start_time: new Date(startTime).toISOString(),
|
|
1344
|
+
end_time: new Date(endTime).toISOString(),
|
|
1345
|
+
duration_ms: endTime - startTime,
|
|
1346
|
+
status: "OK",
|
|
1347
|
+
prompt_tokens: usage.promptTokens,
|
|
1348
|
+
completion_tokens: usage.completionTokens,
|
|
1349
|
+
total_tokens: usage.totalTokens,
|
|
1350
|
+
attributes: captureContent ? attributes : void 0,
|
|
1351
|
+
prompt_key: promptCtx?.promptKey,
|
|
1352
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1353
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1354
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1355
|
+
}).catch(() => {
|
|
1356
|
+
});
|
|
1357
|
+
return result;
|
|
1358
|
+
} catch (error) {
|
|
1359
|
+
const endTime = Date.now();
|
|
1360
|
+
const modelId = params?.model?.modelId || String(params?.model || "unknown");
|
|
1361
|
+
sendTrace({
|
|
1362
|
+
config_key: ctx.configKey,
|
|
1363
|
+
session_id: ctx.sessionId,
|
|
1364
|
+
customer_id: ctx.customerId,
|
|
1365
|
+
trace_id: traceId,
|
|
1366
|
+
span_id: spanId,
|
|
1367
|
+
parent_span_id: parentSpanId,
|
|
1368
|
+
name: "generateText",
|
|
1369
|
+
kind: "llm",
|
|
1370
|
+
model: modelId,
|
|
1371
|
+
start_time: new Date(startTime).toISOString(),
|
|
1372
|
+
end_time: new Date(endTime).toISOString(),
|
|
1373
|
+
duration_ms: endTime - startTime,
|
|
1374
|
+
status: "ERROR",
|
|
1375
|
+
error_message: error?.message,
|
|
1376
|
+
prompt_key: promptCtx?.promptKey,
|
|
1377
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1378
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1379
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1380
|
+
}).catch(() => {
|
|
1381
|
+
});
|
|
1382
|
+
throw error;
|
|
1383
|
+
}
|
|
1384
|
+
};
|
|
1385
|
+
}
|
|
1386
|
+
function createStreamTextWrapper(aiModule) {
|
|
1387
|
+
return async (...args) => {
|
|
1388
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
1389
|
+
const params = args[0] || {};
|
|
1390
|
+
const startTime = Date.now();
|
|
1391
|
+
const result = await aiModule.streamText(...args);
|
|
1392
|
+
if (!ctx || !initialized) {
|
|
1393
|
+
return result;
|
|
1394
|
+
}
|
|
1395
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
1396
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
1397
|
+
const spanId = generateHexId(16);
|
|
1398
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
1399
|
+
let firstTokenTime = null;
|
|
1400
|
+
const modelId = params?.model?.modelId || String(params?.model || "unknown");
|
|
1401
|
+
let promptCtx = null;
|
|
1402
|
+
try {
|
|
1403
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
1404
|
+
promptCtx = getPromptContext();
|
|
1405
|
+
} catch {
|
|
1406
|
+
}
|
|
1407
|
+
if (result?.usage) {
|
|
1408
|
+
result.usage.then(async (rawUsage) => {
|
|
1409
|
+
const endTime = Date.now();
|
|
1410
|
+
if (aiSdkDebug) {
|
|
1411
|
+
console.log(
|
|
1412
|
+
"\n\u{1F50D} [Fallom Debug] streamText usage:",
|
|
1413
|
+
JSON.stringify(rawUsage, null, 2)
|
|
1414
|
+
);
|
|
1415
|
+
console.log(
|
|
1416
|
+
"\u{1F50D} [Fallom Debug] streamText result keys:",
|
|
1417
|
+
Object.keys(result || {})
|
|
1418
|
+
);
|
|
1419
|
+
}
|
|
1420
|
+
log("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
|
|
1421
|
+
let providerMetadata = result?.experimental_providerMetadata;
|
|
1422
|
+
if (providerMetadata && typeof providerMetadata.then === "function") {
|
|
1423
|
+
try {
|
|
1424
|
+
providerMetadata = await providerMetadata;
|
|
1425
|
+
} catch {
|
|
1426
|
+
providerMetadata = void 0;
|
|
1427
|
+
}
|
|
1428
|
+
}
|
|
1429
|
+
const usage = extractUsageFromResult(
|
|
1430
|
+
{ experimental_providerMetadata: providerMetadata },
|
|
1431
|
+
rawUsage
|
|
1432
|
+
);
|
|
1433
|
+
const attributes = {};
|
|
1434
|
+
if (captureContent) {
|
|
1435
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
1436
|
+
if (params?.prompt) {
|
|
1437
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
1438
|
+
attributes["gen_ai.prompt.0.content"] = params.prompt;
|
|
1439
|
+
}
|
|
1440
|
+
}
|
|
1441
|
+
if (firstTokenTime) {
|
|
1442
|
+
attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
|
|
1443
|
+
}
|
|
1444
|
+
if (rawUsage) {
|
|
1445
|
+
attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
|
|
1446
|
+
}
|
|
1447
|
+
if (providerMetadata) {
|
|
1448
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
|
|
1449
|
+
}
|
|
1450
|
+
const tracePayload = {
|
|
1451
|
+
config_key: ctx.configKey,
|
|
1452
|
+
session_id: ctx.sessionId,
|
|
1453
|
+
customer_id: ctx.customerId,
|
|
1454
|
+
trace_id: traceId,
|
|
1455
|
+
span_id: spanId,
|
|
1456
|
+
parent_span_id: parentSpanId,
|
|
1457
|
+
name: "streamText",
|
|
1458
|
+
kind: "llm",
|
|
1459
|
+
model: modelId,
|
|
1460
|
+
start_time: new Date(startTime).toISOString(),
|
|
1461
|
+
end_time: new Date(endTime).toISOString(),
|
|
1462
|
+
duration_ms: endTime - startTime,
|
|
1463
|
+
status: "OK",
|
|
1464
|
+
prompt_tokens: usage.promptTokens,
|
|
1465
|
+
completion_tokens: usage.completionTokens,
|
|
1466
|
+
total_tokens: usage.totalTokens,
|
|
1467
|
+
time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
|
|
1468
|
+
attributes: captureContent ? attributes : void 0,
|
|
1469
|
+
prompt_key: promptCtx?.promptKey,
|
|
1470
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1471
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1472
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1473
|
+
};
|
|
1474
|
+
sendTrace(tracePayload).catch(() => {
|
|
1475
|
+
});
|
|
1476
|
+
}).catch((error) => {
|
|
1477
|
+
const endTime = Date.now();
|
|
1478
|
+
log("\u274C streamText error:", error?.message);
|
|
1479
|
+
sendTrace({
|
|
1480
|
+
config_key: ctx.configKey,
|
|
1481
|
+
session_id: ctx.sessionId,
|
|
1482
|
+
customer_id: ctx.customerId,
|
|
1483
|
+
trace_id: traceId,
|
|
1484
|
+
span_id: spanId,
|
|
1485
|
+
parent_span_id: parentSpanId,
|
|
1486
|
+
name: "streamText",
|
|
1487
|
+
kind: "llm",
|
|
1488
|
+
model: modelId,
|
|
1489
|
+
start_time: new Date(startTime).toISOString(),
|
|
1490
|
+
end_time: new Date(endTime).toISOString(),
|
|
1491
|
+
duration_ms: endTime - startTime,
|
|
1492
|
+
status: "ERROR",
|
|
1493
|
+
error_message: error?.message,
|
|
1494
|
+
prompt_key: promptCtx?.promptKey,
|
|
1495
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1496
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1497
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1498
|
+
}).catch(() => {
|
|
1499
|
+
});
|
|
1500
|
+
});
|
|
1501
|
+
}
|
|
1502
|
+
if (result?.textStream) {
|
|
1503
|
+
const originalTextStream = result.textStream;
|
|
1504
|
+
const wrappedTextStream = (async function* () {
|
|
1505
|
+
for await (const chunk of originalTextStream) {
|
|
1506
|
+
if (!firstTokenTime) {
|
|
1507
|
+
firstTokenTime = Date.now();
|
|
1508
|
+
log("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
|
|
1509
|
+
}
|
|
1510
|
+
yield chunk;
|
|
1511
|
+
}
|
|
1512
|
+
})();
|
|
1513
|
+
return new Proxy(result, {
|
|
1514
|
+
get(target, prop) {
|
|
1515
|
+
if (prop === "textStream") {
|
|
1516
|
+
return wrappedTextStream;
|
|
1517
|
+
}
|
|
1518
|
+
return target[prop];
|
|
1519
|
+
}
|
|
1520
|
+
});
|
|
1521
|
+
}
|
|
1522
|
+
return result;
|
|
1523
|
+
};
|
|
1524
|
+
}
|
|
1525
|
+
function createGenerateObjectWrapper(aiModule) {
|
|
1526
|
+
return async (...args) => {
|
|
1527
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
1528
|
+
if (!ctx || !initialized) {
|
|
1529
|
+
return aiModule.generateObject(...args);
|
|
1530
|
+
}
|
|
1531
|
+
let promptCtx = null;
|
|
1532
|
+
try {
|
|
1533
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
1534
|
+
promptCtx = getPromptContext();
|
|
1535
|
+
} catch {
|
|
1536
|
+
}
|
|
1537
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
1538
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
1539
|
+
const spanId = generateHexId(16);
|
|
1540
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
1541
|
+
const params = args[0] || {};
|
|
1542
|
+
const startTime = Date.now();
|
|
1543
|
+
try {
|
|
1544
|
+
const result = await aiModule.generateObject(...args);
|
|
1545
|
+
const endTime = Date.now();
|
|
1546
|
+
if (aiSdkDebug) {
|
|
1547
|
+
console.log(
|
|
1548
|
+
"\n\u{1F50D} [Fallom Debug] generateObject result keys:",
|
|
1549
|
+
Object.keys(result || {})
|
|
1550
|
+
);
|
|
1551
|
+
console.log(
|
|
1552
|
+
"\u{1F50D} [Fallom Debug] result.usage:",
|
|
1553
|
+
JSON.stringify(result?.usage, null, 2)
|
|
1554
|
+
);
|
|
1555
|
+
console.log(
|
|
1556
|
+
"\u{1F50D} [Fallom Debug] result.response keys:",
|
|
1557
|
+
Object.keys(result?.response || {})
|
|
1558
|
+
);
|
|
1559
|
+
console.log(
|
|
1560
|
+
"\u{1F50D} [Fallom Debug] result.response.usage:",
|
|
1561
|
+
JSON.stringify(result?.response?.usage, null, 2)
|
|
1562
|
+
);
|
|
1563
|
+
}
|
|
1564
|
+
const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
|
|
1565
|
+
const attributes = {};
|
|
1566
|
+
if (captureContent) {
|
|
1567
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
1568
|
+
attributes["gen_ai.response.model"] = modelId;
|
|
1569
|
+
if (result?.object) {
|
|
1570
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
1571
|
+
attributes["gen_ai.completion.0.content"] = JSON.stringify(
|
|
1572
|
+
result.object
|
|
1573
|
+
);
|
|
1574
|
+
}
|
|
1575
|
+
}
|
|
1576
|
+
if (result?.usage) {
|
|
1577
|
+
attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
|
|
1578
|
+
}
|
|
1579
|
+
if (result?.experimental_providerMetadata) {
|
|
1580
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(
|
|
1581
|
+
result.experimental_providerMetadata
|
|
1582
|
+
);
|
|
1583
|
+
}
|
|
1584
|
+
if (result?.finishReason) {
|
|
1585
|
+
attributes["gen_ai.response.finish_reason"] = result.finishReason;
|
|
1586
|
+
}
|
|
1587
|
+
const usage = extractUsageFromResult(result);
|
|
1588
|
+
sendTrace({
|
|
1589
|
+
config_key: ctx.configKey,
|
|
1590
|
+
session_id: ctx.sessionId,
|
|
1591
|
+
customer_id: ctx.customerId,
|
|
1592
|
+
trace_id: traceId,
|
|
1593
|
+
span_id: spanId,
|
|
1594
|
+
parent_span_id: parentSpanId,
|
|
1595
|
+
name: "generateObject",
|
|
1596
|
+
kind: "llm",
|
|
1597
|
+
model: modelId,
|
|
1598
|
+
start_time: new Date(startTime).toISOString(),
|
|
1599
|
+
end_time: new Date(endTime).toISOString(),
|
|
1600
|
+
duration_ms: endTime - startTime,
|
|
1601
|
+
status: "OK",
|
|
1602
|
+
prompt_tokens: usage.promptTokens,
|
|
1603
|
+
completion_tokens: usage.completionTokens,
|
|
1604
|
+
total_tokens: usage.totalTokens,
|
|
1605
|
+
attributes: captureContent ? attributes : void 0,
|
|
1606
|
+
prompt_key: promptCtx?.promptKey,
|
|
1607
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1608
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1609
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1610
|
+
}).catch(() => {
|
|
1611
|
+
});
|
|
1612
|
+
return result;
|
|
1613
|
+
} catch (error) {
|
|
1614
|
+
const endTime = Date.now();
|
|
1615
|
+
const modelId = params?.model?.modelId || String(params?.model || "unknown");
|
|
1616
|
+
sendTrace({
|
|
1617
|
+
config_key: ctx.configKey,
|
|
1618
|
+
session_id: ctx.sessionId,
|
|
1619
|
+
customer_id: ctx.customerId,
|
|
1620
|
+
trace_id: traceId,
|
|
1621
|
+
span_id: spanId,
|
|
1622
|
+
parent_span_id: parentSpanId,
|
|
1623
|
+
name: "generateObject",
|
|
1624
|
+
kind: "llm",
|
|
1625
|
+
model: modelId,
|
|
1626
|
+
start_time: new Date(startTime).toISOString(),
|
|
1627
|
+
end_time: new Date(endTime).toISOString(),
|
|
1628
|
+
duration_ms: endTime - startTime,
|
|
1629
|
+
status: "ERROR",
|
|
1630
|
+
error_message: error?.message,
|
|
1631
|
+
prompt_key: promptCtx?.promptKey,
|
|
1632
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1633
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1634
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1635
|
+
}).catch(() => {
|
|
1636
|
+
});
|
|
1637
|
+
throw error;
|
|
1638
|
+
}
|
|
1639
|
+
};
|
|
1640
|
+
}
|
|
1641
|
+
function createStreamObjectWrapper(aiModule) {
|
|
1642
|
+
return async (...args) => {
|
|
1643
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
1644
|
+
const params = args[0] || {};
|
|
1645
|
+
const startTime = Date.now();
|
|
1646
|
+
const result = await aiModule.streamObject(...args);
|
|
1647
|
+
log("\u{1F50D} streamObject result keys:", Object.keys(result || {}));
|
|
1648
|
+
if (!ctx || !initialized) {
|
|
1649
|
+
return result;
|
|
1650
|
+
}
|
|
1651
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
1652
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
1653
|
+
const spanId = generateHexId(16);
|
|
1654
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
1655
|
+
let firstTokenTime = null;
|
|
1656
|
+
const modelId = params?.model?.modelId || String(params?.model || "unknown");
|
|
1657
|
+
let promptCtx = null;
|
|
1658
|
+
try {
|
|
1659
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
1660
|
+
promptCtx = getPromptContext();
|
|
1661
|
+
} catch {
|
|
1662
|
+
}
|
|
1663
|
+
if (result?.usage) {
|
|
1664
|
+
result.usage.then(async (rawUsage) => {
|
|
1665
|
+
const endTime = Date.now();
|
|
1666
|
+
if (aiSdkDebug) {
|
|
1667
|
+
console.log(
|
|
1668
|
+
"\n\u{1F50D} [Fallom Debug] streamObject usage:",
|
|
1669
|
+
JSON.stringify(rawUsage, null, 2)
|
|
1670
|
+
);
|
|
1671
|
+
console.log(
|
|
1672
|
+
"\u{1F50D} [Fallom Debug] streamObject result keys:",
|
|
1673
|
+
Object.keys(result || {})
|
|
1674
|
+
);
|
|
1675
|
+
}
|
|
1676
|
+
log("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
|
|
1677
|
+
let providerMetadata = result?.experimental_providerMetadata;
|
|
1678
|
+
if (providerMetadata && typeof providerMetadata.then === "function") {
|
|
1679
|
+
try {
|
|
1680
|
+
providerMetadata = await providerMetadata;
|
|
1681
|
+
} catch {
|
|
1682
|
+
providerMetadata = void 0;
|
|
1683
|
+
}
|
|
1684
|
+
}
|
|
1685
|
+
const usage = extractUsageFromResult(
|
|
1686
|
+
{ experimental_providerMetadata: providerMetadata },
|
|
1687
|
+
rawUsage
|
|
1688
|
+
);
|
|
1689
|
+
const attributes = {};
|
|
1690
|
+
if (captureContent) {
|
|
1691
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
1692
|
+
}
|
|
1693
|
+
if (firstTokenTime) {
|
|
1694
|
+
attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
|
|
1695
|
+
}
|
|
1696
|
+
if (rawUsage) {
|
|
1697
|
+
attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
|
|
1698
|
+
}
|
|
1699
|
+
if (providerMetadata) {
|
|
1700
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
|
|
1701
|
+
}
|
|
1702
|
+
sendTrace({
|
|
1703
|
+
config_key: ctx.configKey,
|
|
1704
|
+
session_id: ctx.sessionId,
|
|
1705
|
+
customer_id: ctx.customerId,
|
|
1706
|
+
trace_id: traceId,
|
|
1707
|
+
span_id: spanId,
|
|
1708
|
+
parent_span_id: parentSpanId,
|
|
1709
|
+
name: "streamObject",
|
|
1710
|
+
kind: "llm",
|
|
1711
|
+
model: modelId,
|
|
1712
|
+
start_time: new Date(startTime).toISOString(),
|
|
1713
|
+
end_time: new Date(endTime).toISOString(),
|
|
1714
|
+
duration_ms: endTime - startTime,
|
|
1715
|
+
status: "OK",
|
|
1716
|
+
prompt_tokens: usage.promptTokens,
|
|
1717
|
+
completion_tokens: usage.completionTokens,
|
|
1718
|
+
total_tokens: usage.totalTokens,
|
|
1719
|
+
attributes: captureContent ? attributes : void 0,
|
|
1720
|
+
prompt_key: promptCtx?.promptKey,
|
|
1721
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1722
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1723
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1724
|
+
}).catch(() => {
|
|
1725
|
+
});
|
|
1726
|
+
}).catch((error) => {
|
|
1727
|
+
const endTime = Date.now();
|
|
1728
|
+
sendTrace({
|
|
1729
|
+
config_key: ctx.configKey,
|
|
1730
|
+
session_id: ctx.sessionId,
|
|
1731
|
+
customer_id: ctx.customerId,
|
|
1732
|
+
trace_id: traceId,
|
|
1733
|
+
span_id: spanId,
|
|
1734
|
+
parent_span_id: parentSpanId,
|
|
1735
|
+
name: "streamObject",
|
|
1736
|
+
kind: "llm",
|
|
1737
|
+
model: modelId,
|
|
1738
|
+
start_time: new Date(startTime).toISOString(),
|
|
1739
|
+
end_time: new Date(endTime).toISOString(),
|
|
1740
|
+
duration_ms: endTime - startTime,
|
|
1741
|
+
status: "ERROR",
|
|
1742
|
+
error_message: error?.message,
|
|
1743
|
+
prompt_key: promptCtx?.promptKey,
|
|
1744
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1745
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1746
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1747
|
+
}).catch(() => {
|
|
1748
|
+
});
|
|
1749
|
+
});
|
|
1750
|
+
}
|
|
1751
|
+
if (result?.partialObjectStream) {
|
|
1752
|
+
const originalStream = result.partialObjectStream;
|
|
1753
|
+
const wrappedStream = (async function* () {
|
|
1754
|
+
for await (const chunk of originalStream) {
|
|
1755
|
+
if (!firstTokenTime) {
|
|
1756
|
+
firstTokenTime = Date.now();
|
|
1757
|
+
log("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
|
|
1758
|
+
}
|
|
1759
|
+
yield chunk;
|
|
1760
|
+
}
|
|
1761
|
+
})();
|
|
1762
|
+
return new Proxy(result, {
|
|
1763
|
+
get(target, prop) {
|
|
1764
|
+
if (prop === "partialObjectStream") {
|
|
1765
|
+
return wrappedStream;
|
|
1766
|
+
}
|
|
1767
|
+
return target[prop];
|
|
1768
|
+
}
|
|
1769
|
+
});
|
|
1770
|
+
}
|
|
1771
|
+
return result;
|
|
1772
|
+
};
|
|
1773
|
+
}
|
|
1774
|
+
function wrapMastraAgent(agent) {
|
|
1775
|
+
const originalGenerate = agent.generate.bind(agent);
|
|
1776
|
+
const agentName = agent.name || "MastraAgent";
|
|
1777
|
+
agent.generate = async function(...args) {
|
|
1778
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
1779
|
+
if (!ctx || !initialized) {
|
|
1780
|
+
return originalGenerate(...args);
|
|
1781
|
+
}
|
|
1782
|
+
let promptCtx = null;
|
|
1783
|
+
try {
|
|
1784
|
+
const { getPromptContext } = await import("./prompts-XSZHTCX7.mjs");
|
|
1785
|
+
promptCtx = getPromptContext();
|
|
1786
|
+
} catch {
|
|
1787
|
+
}
|
|
1788
|
+
const traceId = generateHexId(32);
|
|
1789
|
+
const spanId = generateHexId(16);
|
|
1790
|
+
const startTime = Date.now();
|
|
1791
|
+
const messages = args[0] || [];
|
|
1792
|
+
try {
|
|
1793
|
+
const result = await originalGenerate(...args);
|
|
1794
|
+
const endTime = Date.now();
|
|
1795
|
+
const model = result?.model?.modelId || "unknown";
|
|
1796
|
+
const toolCalls = [];
|
|
1797
|
+
if (result?.steps?.length) {
|
|
1798
|
+
for (const step of result.steps) {
|
|
1799
|
+
if (step.toolCalls?.length) {
|
|
1800
|
+
for (let i = 0; i < step.toolCalls.length; i++) {
|
|
1801
|
+
const tc = step.toolCalls[i];
|
|
1802
|
+
const tr = step.toolResults?.[i];
|
|
1803
|
+
toolCalls.push({
|
|
1804
|
+
name: tc.toolName,
|
|
1805
|
+
arguments: tc.args,
|
|
1806
|
+
result: tr?.result
|
|
1807
|
+
});
|
|
1808
|
+
}
|
|
1809
|
+
}
|
|
1810
|
+
}
|
|
1811
|
+
}
|
|
1812
|
+
const attributes = {
|
|
1813
|
+
"gen_ai.system": "Mastra",
|
|
1814
|
+
"gen_ai.request.model": model,
|
|
1815
|
+
"gen_ai.response.model": model,
|
|
1816
|
+
"fallom.source": "mastra-agent",
|
|
1817
|
+
"llm.request.type": "chat"
|
|
1818
|
+
};
|
|
1819
|
+
if (Array.isArray(messages)) {
|
|
1820
|
+
messages.forEach((msg, i) => {
|
|
1821
|
+
attributes[`gen_ai.prompt.${i}.role`] = msg.role || "user";
|
|
1822
|
+
attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
1823
|
+
});
|
|
1824
|
+
}
|
|
1825
|
+
if (result?.text) {
|
|
1826
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
1827
|
+
attributes["gen_ai.completion.0.content"] = result.text;
|
|
1828
|
+
attributes["gen_ai.completion.0.finish_reason"] = "stop";
|
|
1829
|
+
}
|
|
1830
|
+
if (toolCalls.length > 0) {
|
|
1831
|
+
attributes["fallom.tool_calls"] = JSON.stringify(toolCalls);
|
|
1832
|
+
toolCalls.forEach((tc, i) => {
|
|
1833
|
+
attributes[`gen_ai.completion.0.tool_calls.${i}.name`] = tc.name;
|
|
1834
|
+
attributes[`gen_ai.completion.0.tool_calls.${i}.type`] = "function";
|
|
1835
|
+
attributes[`gen_ai.completion.0.tool_calls.${i}.arguments`] = JSON.stringify(tc.arguments);
|
|
1836
|
+
});
|
|
1837
|
+
}
|
|
1838
|
+
if (result?.usage) {
|
|
1839
|
+
attributes["gen_ai.usage.prompt_tokens"] = result.usage.promptTokens;
|
|
1840
|
+
attributes["gen_ai.usage.completion_tokens"] = result.usage.completionTokens;
|
|
1841
|
+
attributes["llm.usage.total_tokens"] = result.usage.totalTokens;
|
|
1842
|
+
}
|
|
1843
|
+
const traceData = {
|
|
1844
|
+
config_key: ctx.configKey,
|
|
1845
|
+
session_id: ctx.sessionId,
|
|
1846
|
+
customer_id: ctx.customerId,
|
|
1847
|
+
trace_id: traceId,
|
|
1848
|
+
span_id: spanId,
|
|
1849
|
+
name: `mastra.${agentName}.generate`,
|
|
1850
|
+
kind: "client",
|
|
1851
|
+
model,
|
|
1852
|
+
start_time: new Date(startTime).toISOString(),
|
|
1853
|
+
end_time: new Date(endTime).toISOString(),
|
|
1854
|
+
duration_ms: endTime - startTime,
|
|
1855
|
+
status: "OK",
|
|
1856
|
+
prompt_tokens: result?.usage?.promptTokens,
|
|
1857
|
+
completion_tokens: result?.usage?.completionTokens,
|
|
1858
|
+
total_tokens: result?.usage?.totalTokens,
|
|
1859
|
+
attributes,
|
|
1860
|
+
prompt_key: promptCtx?.promptKey,
|
|
1861
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1862
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1863
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1864
|
+
};
|
|
1865
|
+
sendTrace(traceData).catch(() => {
|
|
1866
|
+
});
|
|
1867
|
+
return result;
|
|
1868
|
+
} catch (error) {
|
|
1869
|
+
const endTime = Date.now();
|
|
1870
|
+
const traceData = {
|
|
1871
|
+
config_key: ctx.configKey,
|
|
1872
|
+
session_id: ctx.sessionId,
|
|
1873
|
+
customer_id: ctx.customerId,
|
|
1874
|
+
trace_id: traceId,
|
|
1875
|
+
span_id: spanId,
|
|
1876
|
+
name: `mastra.${agentName}.generate`,
|
|
1877
|
+
kind: "client",
|
|
1878
|
+
start_time: new Date(startTime).toISOString(),
|
|
1879
|
+
end_time: new Date(endTime).toISOString(),
|
|
1880
|
+
duration_ms: endTime - startTime,
|
|
1881
|
+
status: "ERROR",
|
|
1882
|
+
error_message: error instanceof Error ? error.message : String(error),
|
|
1883
|
+
prompt_key: promptCtx?.promptKey,
|
|
1884
|
+
prompt_version: promptCtx?.promptVersion,
|
|
1885
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
1886
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
1887
|
+
};
|
|
1888
|
+
sendTrace(traceData).catch(() => {
|
|
1889
|
+
});
|
|
1890
|
+
throw error;
|
|
1891
|
+
}
|
|
1892
|
+
};
|
|
1893
|
+
return agent;
|
|
1894
|
+
}
|
|
1895
|
+
var FallomSession = class {
|
|
1896
|
+
constructor(options) {
|
|
1897
|
+
this.ctx = {
|
|
1898
|
+
configKey: options.configKey,
|
|
1899
|
+
sessionId: options.sessionId,
|
|
1900
|
+
customerId: options.customerId
|
|
1901
|
+
};
|
|
1902
|
+
}
|
|
1903
|
+
/**
|
|
1904
|
+
* Get the session context.
|
|
1905
|
+
*/
|
|
1906
|
+
getContext() {
|
|
1907
|
+
return { ...this.ctx };
|
|
1908
|
+
}
|
|
1909
|
+
/**
|
|
1910
|
+
* Get model assignment for this session (A/B testing).
|
|
1911
|
+
*
|
|
1912
|
+
* @param configKey - Config name to get model for (defaults to session's configKey)
|
|
1913
|
+
* @param options - Optional settings
|
|
1914
|
+
* @returns Model string (e.g., "claude-opus", "gpt-4o")
|
|
1915
|
+
*/
|
|
1916
|
+
async getModel(configKeyOrOptions, options) {
|
|
1917
|
+
let configKey;
|
|
1918
|
+
let opts;
|
|
1919
|
+
if (typeof configKeyOrOptions === "string") {
|
|
1920
|
+
configKey = configKeyOrOptions;
|
|
1921
|
+
opts = options || {};
|
|
1922
|
+
} else {
|
|
1923
|
+
configKey = this.ctx.configKey;
|
|
1924
|
+
opts = configKeyOrOptions || {};
|
|
1925
|
+
}
|
|
1926
|
+
const { get: get2 } = await import("./models-BUHMMTWK.mjs");
|
|
1927
|
+
return get2(configKey, this.ctx.sessionId, opts);
|
|
1928
|
+
}
|
|
1929
|
+
/**
|
|
1930
|
+
* Wrap a Vercel AI SDK model to automatically trace all calls.
|
|
1931
|
+
* Session context is baked into the model.
|
|
1932
|
+
*
|
|
1933
|
+
* @param model - A Vercel AI SDK model (e.g., openai("gpt-4o"))
|
|
1934
|
+
* @returns The same model with tracing enabled
|
|
1935
|
+
*
|
|
1936
|
+
* @example
|
|
1937
|
+
* ```typescript
|
|
1938
|
+
* const model = session.traceModel(openai("gpt-4o"));
|
|
1939
|
+
* await generateText({ model, prompt: "Hello!" }); // Automatically traced
|
|
1940
|
+
* ```
|
|
1941
|
+
*/
|
|
1942
|
+
traceModel(model) {
|
|
1943
|
+
const ctx = this.ctx;
|
|
1944
|
+
const tracedModel = Object.create(model);
|
|
1945
|
+
if (model.doGenerate) {
|
|
1946
|
+
const originalDoGenerate = model.doGenerate.bind(model);
|
|
1947
|
+
tracedModel.doGenerate = async function(...args) {
|
|
1948
|
+
if (!initialized) {
|
|
1949
|
+
return originalDoGenerate(...args);
|
|
1950
|
+
}
|
|
1951
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
1952
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
1953
|
+
const spanId = generateHexId(16);
|
|
1954
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
1955
|
+
const startTime = Date.now();
|
|
1956
|
+
try {
|
|
1957
|
+
const result = await originalDoGenerate(...args);
|
|
1958
|
+
const endTime = Date.now();
|
|
1959
|
+
const modelId = model.modelId || "unknown";
|
|
1960
|
+
const attributes = {};
|
|
1961
|
+
if (captureContent) {
|
|
1962
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
1963
|
+
attributes["gen_ai.response.model"] = modelId;
|
|
1964
|
+
if (result?.rawResponse) {
|
|
1965
|
+
attributes["fallom.raw.response"] = JSON.stringify(result.rawResponse);
|
|
1966
|
+
}
|
|
1967
|
+
}
|
|
1968
|
+
const usage = result?.usage || result?.rawResponse?.usage;
|
|
1969
|
+
if (usage) {
|
|
1970
|
+
attributes["fallom.raw.usage"] = JSON.stringify(usage);
|
|
1971
|
+
}
|
|
1972
|
+
sendTrace({
|
|
1973
|
+
config_key: ctx.configKey,
|
|
1974
|
+
session_id: ctx.sessionId,
|
|
1975
|
+
customer_id: ctx.customerId,
|
|
1976
|
+
trace_id: traceId,
|
|
1977
|
+
span_id: spanId,
|
|
1978
|
+
parent_span_id: parentSpanId,
|
|
1979
|
+
name: "generateText",
|
|
1980
|
+
kind: "llm",
|
|
1981
|
+
model: modelId,
|
|
1982
|
+
start_time: new Date(startTime).toISOString(),
|
|
1983
|
+
end_time: new Date(endTime).toISOString(),
|
|
1984
|
+
duration_ms: endTime - startTime,
|
|
1985
|
+
status: "OK",
|
|
1986
|
+
prompt_tokens: usage?.promptTokens,
|
|
1987
|
+
completion_tokens: usage?.completionTokens,
|
|
1988
|
+
total_tokens: usage?.totalTokens,
|
|
1989
|
+
attributes: captureContent ? attributes : void 0
|
|
1990
|
+
}).catch(() => {
|
|
1991
|
+
});
|
|
1992
|
+
return result;
|
|
1993
|
+
} catch (error) {
|
|
1994
|
+
const endTime = Date.now();
|
|
1995
|
+
const modelId = model.modelId || "unknown";
|
|
1996
|
+
sendTrace({
|
|
1997
|
+
config_key: ctx.configKey,
|
|
1998
|
+
session_id: ctx.sessionId,
|
|
1999
|
+
customer_id: ctx.customerId,
|
|
2000
|
+
trace_id: traceId,
|
|
2001
|
+
span_id: spanId,
|
|
2002
|
+
parent_span_id: parentSpanId,
|
|
2003
|
+
name: "generateText",
|
|
2004
|
+
kind: "llm",
|
|
2005
|
+
model: modelId,
|
|
2006
|
+
start_time: new Date(startTime).toISOString(),
|
|
2007
|
+
end_time: new Date(endTime).toISOString(),
|
|
2008
|
+
duration_ms: endTime - startTime,
|
|
2009
|
+
status: "ERROR",
|
|
2010
|
+
error_message: error instanceof Error ? error.message : String(error)
|
|
2011
|
+
}).catch(() => {
|
|
2012
|
+
});
|
|
2013
|
+
throw error;
|
|
2014
|
+
}
|
|
2015
|
+
};
|
|
2016
|
+
}
|
|
2017
|
+
if (model.doStream) {
|
|
2018
|
+
const originalDoStream = model.doStream.bind(model);
|
|
2019
|
+
tracedModel.doStream = async function(...args) {
|
|
2020
|
+
if (!initialized) {
|
|
2021
|
+
return originalDoStream(...args);
|
|
2022
|
+
}
|
|
2023
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2024
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2025
|
+
const spanId = generateHexId(16);
|
|
2026
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2027
|
+
const startTime = Date.now();
|
|
2028
|
+
const modelId = model.modelId || "unknown";
|
|
2029
|
+
try {
|
|
2030
|
+
const result = await originalDoStream(...args);
|
|
2031
|
+
sendTrace({
|
|
2032
|
+
config_key: ctx.configKey,
|
|
2033
|
+
session_id: ctx.sessionId,
|
|
2034
|
+
customer_id: ctx.customerId,
|
|
2035
|
+
trace_id: traceId,
|
|
2036
|
+
span_id: spanId,
|
|
2037
|
+
parent_span_id: parentSpanId,
|
|
2038
|
+
name: "streamText",
|
|
2039
|
+
kind: "llm",
|
|
2040
|
+
model: modelId,
|
|
2041
|
+
start_time: new Date(startTime).toISOString(),
|
|
2042
|
+
end_time: new Date(Date.now()).toISOString(),
|
|
2043
|
+
duration_ms: Date.now() - startTime,
|
|
2044
|
+
status: "OK",
|
|
2045
|
+
is_streaming: true
|
|
2046
|
+
}).catch(() => {
|
|
2047
|
+
});
|
|
2048
|
+
return result;
|
|
2049
|
+
} catch (error) {
|
|
2050
|
+
sendTrace({
|
|
2051
|
+
config_key: ctx.configKey,
|
|
2052
|
+
session_id: ctx.sessionId,
|
|
2053
|
+
customer_id: ctx.customerId,
|
|
2054
|
+
trace_id: traceId,
|
|
2055
|
+
span_id: spanId,
|
|
2056
|
+
parent_span_id: parentSpanId,
|
|
2057
|
+
name: "streamText",
|
|
2058
|
+
kind: "llm",
|
|
2059
|
+
model: modelId,
|
|
2060
|
+
start_time: new Date(startTime).toISOString(),
|
|
2061
|
+
end_time: new Date(Date.now()).toISOString(),
|
|
2062
|
+
duration_ms: Date.now() - startTime,
|
|
2063
|
+
status: "ERROR",
|
|
2064
|
+
error_message: error instanceof Error ? error.message : String(error),
|
|
2065
|
+
is_streaming: true
|
|
2066
|
+
}).catch(() => {
|
|
2067
|
+
});
|
|
2068
|
+
throw error;
|
|
2069
|
+
}
|
|
2070
|
+
};
|
|
2071
|
+
}
|
|
2072
|
+
return tracedModel;
|
|
2073
|
+
}
|
|
2074
|
+
/**
|
|
2075
|
+
* Wrap an OpenAI client to automatically trace all chat completions.
|
|
2076
|
+
* Session context is baked in - no need for setSession().
|
|
2077
|
+
*/
|
|
2078
|
+
wrapOpenAI(client) {
|
|
2079
|
+
const ctx = this.ctx;
|
|
2080
|
+
const originalCreate = client.chat.completions.create.bind(
|
|
2081
|
+
client.chat.completions
|
|
2082
|
+
);
|
|
2083
|
+
client.chat.completions.create = async function(...args) {
|
|
2084
|
+
if (!initialized) {
|
|
2085
|
+
return originalCreate(...args);
|
|
2086
|
+
}
|
|
2087
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2088
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2089
|
+
const spanId = generateHexId(16);
|
|
2090
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2091
|
+
const params = args[0] || {};
|
|
2092
|
+
const startTime = Date.now();
|
|
2093
|
+
try {
|
|
2094
|
+
const response = await originalCreate(...args);
|
|
2095
|
+
const endTime = Date.now();
|
|
2096
|
+
const attributes = captureContent ? messagesToOtelAttributes(
|
|
2097
|
+
params?.messages,
|
|
2098
|
+
response?.choices?.[0]?.message,
|
|
2099
|
+
response?.model || params?.model,
|
|
2100
|
+
response?.id
|
|
2101
|
+
) : {};
|
|
2102
|
+
if (response?.usage) {
|
|
2103
|
+
attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
|
|
2104
|
+
}
|
|
2105
|
+
if (response?.choices?.[0]?.finish_reason) {
|
|
2106
|
+
attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
|
|
2107
|
+
}
|
|
2108
|
+
sendTrace({
|
|
2109
|
+
config_key: ctx.configKey,
|
|
2110
|
+
session_id: ctx.sessionId,
|
|
2111
|
+
customer_id: ctx.customerId,
|
|
2112
|
+
trace_id: traceId,
|
|
2113
|
+
span_id: spanId,
|
|
2114
|
+
parent_span_id: parentSpanId,
|
|
2115
|
+
name: "chat.completions.create",
|
|
2116
|
+
kind: "llm",
|
|
2117
|
+
model: response?.model || params?.model,
|
|
2118
|
+
start_time: new Date(startTime).toISOString(),
|
|
2119
|
+
end_time: new Date(endTime).toISOString(),
|
|
2120
|
+
duration_ms: endTime - startTime,
|
|
2121
|
+
status: "OK",
|
|
2122
|
+
prompt_tokens: response?.usage?.prompt_tokens,
|
|
2123
|
+
completion_tokens: response?.usage?.completion_tokens,
|
|
2124
|
+
total_tokens: response?.usage?.total_tokens,
|
|
2125
|
+
attributes: captureContent ? attributes : void 0
|
|
2126
|
+
}).catch(() => {
|
|
2127
|
+
});
|
|
2128
|
+
return response;
|
|
2129
|
+
} catch (error) {
|
|
2130
|
+
const endTime = Date.now();
|
|
2131
|
+
sendTrace({
|
|
2132
|
+
config_key: ctx.configKey,
|
|
2133
|
+
session_id: ctx.sessionId,
|
|
2134
|
+
customer_id: ctx.customerId,
|
|
2135
|
+
trace_id: traceId,
|
|
2136
|
+
span_id: spanId,
|
|
2137
|
+
parent_span_id: parentSpanId,
|
|
2138
|
+
name: "chat.completions.create",
|
|
2139
|
+
kind: "llm",
|
|
2140
|
+
model: params?.model,
|
|
2141
|
+
start_time: new Date(startTime).toISOString(),
|
|
2142
|
+
end_time: new Date(endTime).toISOString(),
|
|
2143
|
+
duration_ms: endTime - startTime,
|
|
2144
|
+
status: "ERROR",
|
|
2145
|
+
error_message: error instanceof Error ? error.message : String(error)
|
|
2146
|
+
}).catch(() => {
|
|
2147
|
+
});
|
|
2148
|
+
throw error;
|
|
2149
|
+
}
|
|
2150
|
+
};
|
|
2151
|
+
return client;
|
|
2152
|
+
}
|
|
2153
|
+
/**
|
|
2154
|
+
* Wrap an Anthropic client to automatically trace all message calls.
|
|
2155
|
+
* Session context is baked in - no need for setSession().
|
|
2156
|
+
*/
|
|
2157
|
+
wrapAnthropic(client) {
|
|
2158
|
+
const ctx = this.ctx;
|
|
2159
|
+
const originalCreate = client.messages.create.bind(client.messages);
|
|
2160
|
+
client.messages.create = async function(...args) {
|
|
2161
|
+
if (!initialized) {
|
|
2162
|
+
return originalCreate(...args);
|
|
2163
|
+
}
|
|
2164
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2165
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2166
|
+
const spanId = generateHexId(16);
|
|
2167
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2168
|
+
const params = args[0] || {};
|
|
2169
|
+
const startTime = Date.now();
|
|
2170
|
+
try {
|
|
2171
|
+
const response = await originalCreate(...args);
|
|
2172
|
+
const endTime = Date.now();
|
|
2173
|
+
const usage = response?.usage;
|
|
2174
|
+
const attributes = {};
|
|
2175
|
+
if (captureContent) {
|
|
2176
|
+
attributes["gen_ai.request.model"] = params?.model;
|
|
2177
|
+
attributes["gen_ai.response.model"] = response?.model || params?.model;
|
|
2178
|
+
if (params?.system) {
|
|
2179
|
+
attributes["gen_ai.prompt.0.role"] = "system";
|
|
2180
|
+
attributes["gen_ai.prompt.0.content"] = params.system;
|
|
2181
|
+
}
|
|
2182
|
+
if (params?.messages) {
|
|
2183
|
+
const offset = params?.system ? 1 : 0;
|
|
2184
|
+
params.messages.forEach((msg, i) => {
|
|
2185
|
+
attributes[`gen_ai.prompt.${i + offset}.role`] = msg.role;
|
|
2186
|
+
attributes[`gen_ai.prompt.${i + offset}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
2187
|
+
});
|
|
2188
|
+
}
|
|
2189
|
+
if (response?.content?.[0]?.text) {
|
|
2190
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
2191
|
+
attributes["gen_ai.completion.0.content"] = response.content[0].text;
|
|
2192
|
+
}
|
|
2193
|
+
if (response?.usage) {
|
|
2194
|
+
attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
|
|
2195
|
+
}
|
|
2196
|
+
if (response?.stop_reason) {
|
|
2197
|
+
attributes["gen_ai.response.finish_reason"] = response.stop_reason;
|
|
2198
|
+
}
|
|
2199
|
+
}
|
|
2200
|
+
sendTrace({
|
|
2201
|
+
config_key: ctx.configKey,
|
|
2202
|
+
session_id: ctx.sessionId,
|
|
2203
|
+
customer_id: ctx.customerId,
|
|
2204
|
+
trace_id: traceId,
|
|
2205
|
+
span_id: spanId,
|
|
2206
|
+
parent_span_id: parentSpanId,
|
|
2207
|
+
name: "messages.create",
|
|
2208
|
+
kind: "llm",
|
|
2209
|
+
model: response?.model || params?.model,
|
|
2210
|
+
start_time: new Date(startTime).toISOString(),
|
|
2211
|
+
end_time: new Date(endTime).toISOString(),
|
|
2212
|
+
duration_ms: endTime - startTime,
|
|
2213
|
+
status: "OK",
|
|
2214
|
+
prompt_tokens: usage?.input_tokens,
|
|
2215
|
+
completion_tokens: usage?.output_tokens,
|
|
2216
|
+
total_tokens: usage ? usage.input_tokens + usage.output_tokens : void 0,
|
|
2217
|
+
attributes: captureContent ? attributes : void 0
|
|
2218
|
+
}).catch(() => {
|
|
2219
|
+
});
|
|
2220
|
+
return response;
|
|
2221
|
+
} catch (error) {
|
|
2222
|
+
const endTime = Date.now();
|
|
2223
|
+
sendTrace({
|
|
2224
|
+
config_key: ctx.configKey,
|
|
2225
|
+
session_id: ctx.sessionId,
|
|
2226
|
+
customer_id: ctx.customerId,
|
|
2227
|
+
trace_id: traceId,
|
|
2228
|
+
span_id: spanId,
|
|
2229
|
+
parent_span_id: parentSpanId,
|
|
2230
|
+
name: "messages.create",
|
|
2231
|
+
kind: "llm",
|
|
2232
|
+
model: params?.model,
|
|
2233
|
+
start_time: new Date(startTime).toISOString(),
|
|
2234
|
+
end_time: new Date(endTime).toISOString(),
|
|
2235
|
+
duration_ms: endTime - startTime,
|
|
2236
|
+
status: "ERROR",
|
|
2237
|
+
error_message: error instanceof Error ? error.message : String(error)
|
|
2238
|
+
}).catch(() => {
|
|
2239
|
+
});
|
|
2240
|
+
throw error;
|
|
2241
|
+
}
|
|
2242
|
+
};
|
|
2243
|
+
return client;
|
|
2244
|
+
}
|
|
2245
|
+
/**
|
|
2246
|
+
* Wrap a Google AI model to automatically trace all generateContent calls.
|
|
2247
|
+
* Session context is baked in - no need for setSession().
|
|
2248
|
+
*/
|
|
2249
|
+
wrapGoogleAI(model) {
|
|
2250
|
+
const ctx = this.ctx;
|
|
2251
|
+
const originalGenerate = model.generateContent.bind(model);
|
|
2252
|
+
model.generateContent = async function(...args) {
|
|
2253
|
+
if (!initialized) {
|
|
2254
|
+
return originalGenerate(...args);
|
|
2255
|
+
}
|
|
2256
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2257
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2258
|
+
const spanId = generateHexId(16);
|
|
2259
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2260
|
+
const startTime = Date.now();
|
|
2261
|
+
try {
|
|
2262
|
+
const response = await originalGenerate(...args);
|
|
2263
|
+
const endTime = Date.now();
|
|
2264
|
+
const usage = response?.response?.usageMetadata;
|
|
2265
|
+
const modelName = model.model || "gemini";
|
|
2266
|
+
const attributes = {};
|
|
2267
|
+
if (captureContent) {
|
|
2268
|
+
attributes["gen_ai.request.model"] = modelName;
|
|
2269
|
+
attributes["gen_ai.response.model"] = modelName;
|
|
2270
|
+
const promptArg = args[0];
|
|
2271
|
+
if (typeof promptArg === "string") {
|
|
2272
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
2273
|
+
attributes["gen_ai.prompt.0.content"] = promptArg;
|
|
2274
|
+
}
|
|
2275
|
+
const text = response?.response?.text?.();
|
|
2276
|
+
if (text) {
|
|
2277
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
2278
|
+
attributes["gen_ai.completion.0.content"] = text;
|
|
2279
|
+
}
|
|
2280
|
+
if (usage) {
|
|
2281
|
+
attributes["fallom.raw.usage"] = JSON.stringify(usage);
|
|
2282
|
+
}
|
|
2283
|
+
}
|
|
2284
|
+
sendTrace({
|
|
2285
|
+
config_key: ctx.configKey,
|
|
2286
|
+
session_id: ctx.sessionId,
|
|
2287
|
+
customer_id: ctx.customerId,
|
|
2288
|
+
trace_id: traceId,
|
|
2289
|
+
span_id: spanId,
|
|
2290
|
+
parent_span_id: parentSpanId,
|
|
2291
|
+
name: "generateContent",
|
|
2292
|
+
kind: "llm",
|
|
2293
|
+
model: modelName,
|
|
2294
|
+
start_time: new Date(startTime).toISOString(),
|
|
2295
|
+
end_time: new Date(endTime).toISOString(),
|
|
2296
|
+
duration_ms: endTime - startTime,
|
|
2297
|
+
status: "OK",
|
|
2298
|
+
prompt_tokens: usage?.promptTokenCount,
|
|
2299
|
+
completion_tokens: usage?.candidatesTokenCount,
|
|
2300
|
+
total_tokens: usage?.totalTokenCount,
|
|
2301
|
+
attributes: captureContent ? attributes : void 0
|
|
2302
|
+
}).catch(() => {
|
|
2303
|
+
});
|
|
2304
|
+
return response;
|
|
2305
|
+
} catch (error) {
|
|
2306
|
+
const endTime = Date.now();
|
|
2307
|
+
sendTrace({
|
|
2308
|
+
config_key: ctx.configKey,
|
|
2309
|
+
session_id: ctx.sessionId,
|
|
2310
|
+
customer_id: ctx.customerId,
|
|
2311
|
+
trace_id: traceId,
|
|
2312
|
+
span_id: spanId,
|
|
2313
|
+
parent_span_id: parentSpanId,
|
|
2314
|
+
name: "generateContent",
|
|
2315
|
+
kind: "llm",
|
|
2316
|
+
model: model.model || "gemini",
|
|
2317
|
+
start_time: new Date(startTime).toISOString(),
|
|
2318
|
+
end_time: new Date(endTime).toISOString(),
|
|
2319
|
+
duration_ms: endTime - startTime,
|
|
2320
|
+
status: "ERROR",
|
|
2321
|
+
error_message: error instanceof Error ? error.message : String(error)
|
|
2322
|
+
}).catch(() => {
|
|
2323
|
+
});
|
|
2324
|
+
throw error;
|
|
2325
|
+
}
|
|
2326
|
+
};
|
|
2327
|
+
return model;
|
|
2328
|
+
}
|
|
2329
|
+
/**
|
|
2330
|
+
* Wrap the Vercel AI SDK module to automatically trace all calls.
|
|
2331
|
+
* Session context is baked in - no need for setSession().
|
|
2332
|
+
*/
|
|
2333
|
+
wrapAISDK(ai, options) {
|
|
2334
|
+
const ctx = this.ctx;
|
|
2335
|
+
const aiModule = ai;
|
|
2336
|
+
const debug = options?.debug ?? false;
|
|
2337
|
+
return {
|
|
2338
|
+
generateText: this._createGenerateTextWrapper(aiModule, ctx, debug),
|
|
2339
|
+
streamText: this._createStreamTextWrapper(aiModule, ctx),
|
|
2340
|
+
generateObject: aiModule.generateObject ? this._createGenerateObjectWrapper(aiModule, ctx) : void 0,
|
|
2341
|
+
streamObject: aiModule.streamObject ? this._createStreamObjectWrapper(aiModule, ctx) : void 0
|
|
2342
|
+
};
|
|
2343
|
+
}
|
|
2344
|
+
_createGenerateTextWrapper(aiModule, ctx, debug) {
|
|
2345
|
+
return async (...args) => {
|
|
2346
|
+
if (!initialized) {
|
|
2347
|
+
return aiModule.generateText(...args);
|
|
2348
|
+
}
|
|
2349
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2350
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2351
|
+
const spanId = generateHexId(16);
|
|
2352
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2353
|
+
const params = args[0] || {};
|
|
2354
|
+
const startTime = Date.now();
|
|
2355
|
+
try {
|
|
2356
|
+
const result = await aiModule.generateText(...args);
|
|
2357
|
+
const endTime = Date.now();
|
|
2358
|
+
if (debug) {
|
|
2359
|
+
console.log("\n\u{1F50D} [Fallom Debug] generateText result keys:", Object.keys(result || {}));
|
|
2360
|
+
console.log("\u{1F50D} [Fallom Debug] result.usage:", JSON.stringify(result?.usage, null, 2));
|
|
2361
|
+
console.log("\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:", JSON.stringify(result?.experimental_providerMetadata, null, 2));
|
|
2362
|
+
}
|
|
2363
|
+
const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
|
|
2364
|
+
const attributes = {};
|
|
2365
|
+
if (captureContent) {
|
|
2366
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
2367
|
+
attributes["gen_ai.response.model"] = modelId;
|
|
2368
|
+
if (params?.prompt) {
|
|
2369
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
2370
|
+
attributes["gen_ai.prompt.0.content"] = params.prompt;
|
|
2371
|
+
}
|
|
2372
|
+
if (params?.messages) {
|
|
2373
|
+
params.messages.forEach((msg, i) => {
|
|
2374
|
+
attributes[`gen_ai.prompt.${i}.role`] = msg.role;
|
|
2375
|
+
attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
2376
|
+
});
|
|
2377
|
+
}
|
|
2378
|
+
if (result?.text) {
|
|
2379
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
2380
|
+
attributes["gen_ai.completion.0.content"] = result.text;
|
|
2381
|
+
}
|
|
2382
|
+
if (result?.response?.id) {
|
|
2383
|
+
attributes["gen_ai.response.id"] = result.response.id;
|
|
2384
|
+
}
|
|
2385
|
+
attributes["fallom.raw.usage"] = JSON.stringify(result?.usage);
|
|
2386
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(result?.experimental_providerMetadata);
|
|
2387
|
+
attributes["gen_ai.response.finish_reason"] = result?.finishReason;
|
|
2388
|
+
}
|
|
2389
|
+
sendTrace({
|
|
2390
|
+
config_key: ctx.configKey,
|
|
2391
|
+
session_id: ctx.sessionId,
|
|
2392
|
+
customer_id: ctx.customerId,
|
|
2393
|
+
trace_id: traceId,
|
|
2394
|
+
span_id: spanId,
|
|
2395
|
+
parent_span_id: parentSpanId,
|
|
2396
|
+
name: "generateText",
|
|
2397
|
+
kind: "llm",
|
|
2398
|
+
model: modelId,
|
|
2399
|
+
start_time: new Date(startTime).toISOString(),
|
|
2400
|
+
end_time: new Date(endTime).toISOString(),
|
|
2401
|
+
duration_ms: endTime - startTime,
|
|
2402
|
+
status: "OK",
|
|
2403
|
+
prompt_tokens: result?.usage?.promptTokens,
|
|
2404
|
+
completion_tokens: result?.usage?.completionTokens,
|
|
2405
|
+
total_tokens: result?.usage?.totalTokens,
|
|
2406
|
+
attributes: captureContent ? attributes : void 0
|
|
2407
|
+
}).catch(() => {
|
|
2408
|
+
});
|
|
2409
|
+
return result;
|
|
2410
|
+
} catch (error) {
|
|
2411
|
+
const endTime = Date.now();
|
|
2412
|
+
sendTrace({
|
|
2413
|
+
config_key: ctx.configKey,
|
|
2414
|
+
session_id: ctx.sessionId,
|
|
2415
|
+
customer_id: ctx.customerId,
|
|
2416
|
+
trace_id: traceId,
|
|
2417
|
+
span_id: spanId,
|
|
2418
|
+
parent_span_id: parentSpanId,
|
|
2419
|
+
name: "generateText",
|
|
2420
|
+
kind: "llm",
|
|
2421
|
+
model: params?.model?.modelId || String(params?.model || "unknown"),
|
|
2422
|
+
start_time: new Date(startTime).toISOString(),
|
|
2423
|
+
end_time: new Date(endTime).toISOString(),
|
|
2424
|
+
duration_ms: endTime - startTime,
|
|
2425
|
+
status: "ERROR",
|
|
2426
|
+
error_message: error instanceof Error ? error.message : String(error)
|
|
2427
|
+
}).catch(() => {
|
|
2428
|
+
});
|
|
2429
|
+
throw error;
|
|
2430
|
+
}
|
|
2431
|
+
};
|
|
2432
|
+
}
|
|
2433
|
+
_createStreamTextWrapper(aiModule, ctx) {
|
|
2434
|
+
return async (...args) => {
|
|
2435
|
+
const params = args[0] || {};
|
|
2436
|
+
const startTime = Date.now();
|
|
2437
|
+
const result = await aiModule.streamText(...args);
|
|
2438
|
+
if (!initialized) {
|
|
2439
|
+
return result;
|
|
2440
|
+
}
|
|
2441
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2442
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2443
|
+
const spanId = generateHexId(16);
|
|
2444
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2445
|
+
const modelId = params?.model?.modelId || String(params?.model || "unknown");
|
|
2446
|
+
result.usage.then((usage) => {
|
|
2447
|
+
const endTime = Date.now();
|
|
2448
|
+
const attributes = {};
|
|
2449
|
+
if (captureContent) {
|
|
2450
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
2451
|
+
attributes["gen_ai.response.model"] = modelId;
|
|
2452
|
+
if (params?.prompt) {
|
|
2453
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
2454
|
+
attributes["gen_ai.prompt.0.content"] = params.prompt;
|
|
2455
|
+
}
|
|
2456
|
+
attributes["fallom.raw.usage"] = JSON.stringify(usage);
|
|
2457
|
+
}
|
|
2458
|
+
sendTrace({
|
|
2459
|
+
config_key: ctx.configKey,
|
|
2460
|
+
session_id: ctx.sessionId,
|
|
2461
|
+
customer_id: ctx.customerId,
|
|
2462
|
+
trace_id: traceId,
|
|
2463
|
+
span_id: spanId,
|
|
2464
|
+
parent_span_id: parentSpanId,
|
|
2465
|
+
name: "streamText",
|
|
2466
|
+
kind: "llm",
|
|
2467
|
+
model: modelId,
|
|
2468
|
+
start_time: new Date(startTime).toISOString(),
|
|
2469
|
+
end_time: new Date(endTime).toISOString(),
|
|
2470
|
+
duration_ms: endTime - startTime,
|
|
2471
|
+
status: "OK",
|
|
2472
|
+
prompt_tokens: usage?.promptTokens,
|
|
2473
|
+
completion_tokens: usage?.completionTokens,
|
|
2474
|
+
total_tokens: usage?.totalTokens,
|
|
2475
|
+
is_streaming: true,
|
|
2476
|
+
attributes: captureContent ? attributes : void 0
|
|
2477
|
+
}).catch(() => {
|
|
2478
|
+
});
|
|
2479
|
+
}).catch(() => {
|
|
2480
|
+
});
|
|
2481
|
+
return result;
|
|
2482
|
+
};
|
|
2483
|
+
}
|
|
2484
|
+
_createGenerateObjectWrapper(aiModule, ctx) {
|
|
2485
|
+
return async (...args) => {
|
|
2486
|
+
if (!initialized) {
|
|
2487
|
+
return aiModule.generateObject(...args);
|
|
2488
|
+
}
|
|
2489
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2490
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2491
|
+
const spanId = generateHexId(16);
|
|
2492
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2493
|
+
const params = args[0] || {};
|
|
2494
|
+
const startTime = Date.now();
|
|
2495
|
+
try {
|
|
2496
|
+
const result = await aiModule.generateObject(...args);
|
|
2497
|
+
const endTime = Date.now();
|
|
2498
|
+
const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
|
|
2499
|
+
const attributes = {};
|
|
2500
|
+
if (captureContent) {
|
|
2501
|
+
attributes["gen_ai.request.model"] = modelId;
|
|
2502
|
+
attributes["gen_ai.response.model"] = modelId;
|
|
2503
|
+
if (params?.prompt) {
|
|
2504
|
+
attributes["gen_ai.prompt.0.role"] = "user";
|
|
2505
|
+
attributes["gen_ai.prompt.0.content"] = params.prompt;
|
|
2506
|
+
}
|
|
2507
|
+
if (result?.object) {
|
|
2508
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
2509
|
+
attributes["gen_ai.completion.0.content"] = JSON.stringify(result.object);
|
|
2510
|
+
}
|
|
2511
|
+
attributes["fallom.raw.usage"] = JSON.stringify(result?.usage);
|
|
2512
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(result?.experimental_providerMetadata);
|
|
2513
|
+
}
|
|
2514
|
+
sendTrace({
|
|
2515
|
+
config_key: ctx.configKey,
|
|
2516
|
+
session_id: ctx.sessionId,
|
|
2517
|
+
customer_id: ctx.customerId,
|
|
2518
|
+
trace_id: traceId,
|
|
2519
|
+
span_id: spanId,
|
|
2520
|
+
parent_span_id: parentSpanId,
|
|
2521
|
+
name: "generateObject",
|
|
2522
|
+
kind: "llm",
|
|
2523
|
+
model: modelId,
|
|
2524
|
+
start_time: new Date(startTime).toISOString(),
|
|
2525
|
+
end_time: new Date(endTime).toISOString(),
|
|
2526
|
+
duration_ms: endTime - startTime,
|
|
2527
|
+
status: "OK",
|
|
2528
|
+
prompt_tokens: result?.usage?.promptTokens,
|
|
2529
|
+
completion_tokens: result?.usage?.completionTokens,
|
|
2530
|
+
total_tokens: result?.usage?.totalTokens,
|
|
2531
|
+
attributes: captureContent ? attributes : void 0
|
|
2532
|
+
}).catch(() => {
|
|
2533
|
+
});
|
|
2534
|
+
return result;
|
|
2535
|
+
} catch (error) {
|
|
2536
|
+
const endTime = Date.now();
|
|
2537
|
+
sendTrace({
|
|
2538
|
+
config_key: ctx.configKey,
|
|
2539
|
+
session_id: ctx.sessionId,
|
|
2540
|
+
customer_id: ctx.customerId,
|
|
2541
|
+
trace_id: traceId,
|
|
2542
|
+
span_id: spanId,
|
|
2543
|
+
parent_span_id: parentSpanId,
|
|
2544
|
+
name: "generateObject",
|
|
2545
|
+
kind: "llm",
|
|
2546
|
+
model: params?.model?.modelId || String(params?.model || "unknown"),
|
|
2547
|
+
start_time: new Date(startTime).toISOString(),
|
|
2548
|
+
end_time: new Date(endTime).toISOString(),
|
|
2549
|
+
duration_ms: endTime - startTime,
|
|
2550
|
+
status: "ERROR",
|
|
2551
|
+
error_message: error instanceof Error ? error.message : String(error)
|
|
2552
|
+
}).catch(() => {
|
|
2553
|
+
});
|
|
2554
|
+
throw error;
|
|
2555
|
+
}
|
|
2556
|
+
};
|
|
2557
|
+
}
|
|
2558
|
+
_createStreamObjectWrapper(aiModule, ctx) {
|
|
2559
|
+
return async (...args) => {
|
|
2560
|
+
const params = args[0] || {};
|
|
2561
|
+
const startTime = Date.now();
|
|
2562
|
+
const result = await aiModule.streamObject(...args);
|
|
2563
|
+
if (!initialized) {
|
|
2564
|
+
return result;
|
|
2565
|
+
}
|
|
2566
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2567
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2568
|
+
const spanId = generateHexId(16);
|
|
2569
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2570
|
+
const modelId = params?.model?.modelId || String(params?.model || "unknown");
|
|
2571
|
+
result.usage.then((usage) => {
|
|
2572
|
+
const endTime = Date.now();
|
|
2573
|
+
sendTrace({
|
|
2574
|
+
config_key: ctx.configKey,
|
|
2575
|
+
session_id: ctx.sessionId,
|
|
2576
|
+
customer_id: ctx.customerId,
|
|
2577
|
+
trace_id: traceId,
|
|
2578
|
+
span_id: spanId,
|
|
2579
|
+
parent_span_id: parentSpanId,
|
|
2580
|
+
name: "streamObject",
|
|
2581
|
+
kind: "llm",
|
|
2582
|
+
model: modelId,
|
|
2583
|
+
start_time: new Date(startTime).toISOString(),
|
|
2584
|
+
end_time: new Date(endTime).toISOString(),
|
|
2585
|
+
duration_ms: endTime - startTime,
|
|
2586
|
+
status: "OK",
|
|
2587
|
+
prompt_tokens: usage?.promptTokens,
|
|
2588
|
+
completion_tokens: usage?.completionTokens,
|
|
2589
|
+
total_tokens: usage?.totalTokens,
|
|
2590
|
+
is_streaming: true
|
|
2591
|
+
}).catch(() => {
|
|
2592
|
+
});
|
|
2593
|
+
}).catch(() => {
|
|
2594
|
+
});
|
|
2595
|
+
return result;
|
|
2596
|
+
};
|
|
2597
|
+
}
|
|
2598
|
+
/**
|
|
2599
|
+
* Wrap a Mastra agent to automatically trace all generate calls.
|
|
2600
|
+
* Session context is baked in - no need for setSession().
|
|
2601
|
+
*/
|
|
2602
|
+
wrapMastraAgent(agent) {
|
|
2603
|
+
const ctx = this.ctx;
|
|
2604
|
+
const originalGenerate = agent.generate.bind(agent);
|
|
2605
|
+
const agentName = agent.name || "unknown";
|
|
2606
|
+
agent.generate = async function(...args) {
|
|
2607
|
+
if (!initialized) {
|
|
2608
|
+
return originalGenerate(...args);
|
|
2609
|
+
}
|
|
2610
|
+
const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
|
|
2611
|
+
const traceId = traceCtx?.traceId || generateHexId(32);
|
|
2612
|
+
const spanId = generateHexId(16);
|
|
2613
|
+
const parentSpanId = traceCtx?.parentSpanId;
|
|
2614
|
+
const messages = args[0];
|
|
2615
|
+
const options = args[1] || {};
|
|
2616
|
+
const startTime = Date.now();
|
|
2617
|
+
try {
|
|
2618
|
+
const result = await originalGenerate(...args);
|
|
2619
|
+
const endTime = Date.now();
|
|
2620
|
+
const model = options?.model || result?.model || "unknown";
|
|
2621
|
+
const attributes = {};
|
|
2622
|
+
if (captureContent && Array.isArray(messages)) {
|
|
2623
|
+
messages.forEach((msg, i) => {
|
|
2624
|
+
attributes[`gen_ai.prompt.${i}.role`] = msg.role || "user";
|
|
2625
|
+
attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
2626
|
+
});
|
|
2627
|
+
if (result?.text) {
|
|
2628
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
2629
|
+
attributes["gen_ai.completion.0.content"] = result.text;
|
|
2630
|
+
}
|
|
2631
|
+
if (result?.usage) {
|
|
2632
|
+
attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
|
|
2633
|
+
}
|
|
2634
|
+
}
|
|
2635
|
+
sendTrace({
|
|
2636
|
+
config_key: ctx.configKey,
|
|
2637
|
+
session_id: ctx.sessionId,
|
|
2638
|
+
customer_id: ctx.customerId,
|
|
2639
|
+
trace_id: traceId,
|
|
2640
|
+
span_id: spanId,
|
|
2641
|
+
parent_span_id: parentSpanId,
|
|
2642
|
+
name: `mastra.${agentName}.generate`,
|
|
2643
|
+
kind: "client",
|
|
2644
|
+
model,
|
|
2645
|
+
start_time: new Date(startTime).toISOString(),
|
|
2646
|
+
end_time: new Date(endTime).toISOString(),
|
|
2647
|
+
duration_ms: endTime - startTime,
|
|
2648
|
+
status: "OK",
|
|
2649
|
+
prompt_tokens: result?.usage?.promptTokens,
|
|
2650
|
+
completion_tokens: result?.usage?.completionTokens,
|
|
2651
|
+
total_tokens: result?.usage?.totalTokens,
|
|
2652
|
+
attributes: captureContent ? attributes : void 0
|
|
2653
|
+
}).catch(() => {
|
|
2654
|
+
});
|
|
2655
|
+
return result;
|
|
2656
|
+
} catch (error) {
|
|
2657
|
+
const endTime = Date.now();
|
|
2658
|
+
sendTrace({
|
|
2659
|
+
config_key: ctx.configKey,
|
|
2660
|
+
session_id: ctx.sessionId,
|
|
2661
|
+
customer_id: ctx.customerId,
|
|
2662
|
+
trace_id: traceId,
|
|
2663
|
+
span_id: spanId,
|
|
2664
|
+
parent_span_id: parentSpanId,
|
|
2665
|
+
name: `mastra.${agentName}.generate`,
|
|
2666
|
+
kind: "client",
|
|
2667
|
+
start_time: new Date(startTime).toISOString(),
|
|
2668
|
+
end_time: new Date(endTime).toISOString(),
|
|
2669
|
+
duration_ms: endTime - startTime,
|
|
2670
|
+
status: "ERROR",
|
|
2671
|
+
error_message: error instanceof Error ? error.message : String(error)
|
|
2672
|
+
}).catch(() => {
|
|
2673
|
+
});
|
|
2674
|
+
throw error;
|
|
2675
|
+
}
|
|
2676
|
+
};
|
|
2677
|
+
return agent;
|
|
2678
|
+
}
|
|
2679
|
+
};
|
|
2680
|
+
function session(options) {
|
|
2681
|
+
return new FallomSession(options);
|
|
2682
|
+
}
|
|
2683
|
+
|
|
2684
|
+
// src/models.ts
|
|
2685
|
+
var apiKey2 = null;
|
|
2686
|
+
var baseUrl2 = "https://configs.fallom.com";
|
|
2687
|
+
var initialized2 = false;
|
|
2688
|
+
var syncInterval = null;
|
|
2689
|
+
var debugMode2 = false;
|
|
2690
|
+
var configCache = /* @__PURE__ */ new Map();
|
|
2691
|
+
var SYNC_TIMEOUT = 2e3;
|
|
2692
|
+
var RECORD_TIMEOUT = 1e3;
|
|
2693
|
+
function log2(msg) {
|
|
2694
|
+
if (debugMode2) {
|
|
2695
|
+
console.log(`[Fallom] ${msg}`);
|
|
2696
|
+
}
|
|
2697
|
+
}
|
|
2698
|
+
function init2(options = {}) {
|
|
2699
|
+
apiKey2 = options.apiKey || process.env.FALLOM_API_KEY || null;
|
|
2700
|
+
baseUrl2 = options.baseUrl || process.env.FALLOM_CONFIGS_URL || process.env.FALLOM_BASE_URL || "https://configs.fallom.com";
|
|
2701
|
+
initialized2 = true;
|
|
2702
|
+
if (!apiKey2) {
|
|
2703
|
+
return;
|
|
2704
|
+
}
|
|
2705
|
+
fetchConfigs().catch(() => {
|
|
2706
|
+
});
|
|
2707
|
+
if (!syncInterval) {
|
|
2708
|
+
syncInterval = setInterval(() => {
|
|
2709
|
+
fetchConfigs().catch(() => {
|
|
2710
|
+
});
|
|
2711
|
+
}, 3e4);
|
|
2712
|
+
syncInterval.unref();
|
|
2713
|
+
}
|
|
2714
|
+
}
|
|
2715
|
+
function ensureInit() {
|
|
2716
|
+
if (!initialized2) {
|
|
2717
|
+
try {
|
|
2718
|
+
init2();
|
|
2719
|
+
} catch {
|
|
2720
|
+
}
|
|
2721
|
+
}
|
|
2722
|
+
}
|
|
2723
|
+
async function fetchConfigs(timeout = SYNC_TIMEOUT) {
|
|
2724
|
+
if (!apiKey2) {
|
|
2725
|
+
log2("_fetchConfigs: No API key, skipping");
|
|
2726
|
+
return;
|
|
2727
|
+
}
|
|
2728
|
+
try {
|
|
2729
|
+
log2(`Fetching configs from ${baseUrl2}/configs`);
|
|
2730
|
+
const controller = new AbortController();
|
|
2731
|
+
const timeoutId = setTimeout(() => controller.abort(), timeout);
|
|
2732
|
+
const resp = await fetch(`${baseUrl2}/configs`, {
|
|
2733
|
+
headers: { Authorization: `Bearer ${apiKey2}` },
|
|
2734
|
+
signal: controller.signal
|
|
2735
|
+
});
|
|
2736
|
+
clearTimeout(timeoutId);
|
|
2737
|
+
log2(`Response status: ${resp.status}`);
|
|
2738
|
+
if (resp.ok) {
|
|
2739
|
+
const data = await resp.json();
|
|
2740
|
+
const configs = data.configs || [];
|
|
2741
|
+
log2(`Got ${configs.length} configs: ${configs.map((c) => c.key)}`);
|
|
2742
|
+
for (const c of configs) {
|
|
2743
|
+
const key = c.key;
|
|
2744
|
+
const version = c.version || 1;
|
|
2745
|
+
log2(`Config '${key}' v${version}: ${JSON.stringify(c.variants)}`);
|
|
2746
|
+
if (!configCache.has(key)) {
|
|
2747
|
+
configCache.set(key, { versions: /* @__PURE__ */ new Map(), latest: null });
|
|
2748
|
+
}
|
|
2749
|
+
const cached = configCache.get(key);
|
|
2750
|
+
cached.versions.set(version, c);
|
|
2751
|
+
cached.latest = version;
|
|
2752
|
+
}
|
|
2753
|
+
} else {
|
|
2754
|
+
log2(`Fetch failed: ${resp.statusText}`);
|
|
2755
|
+
}
|
|
2756
|
+
} catch (e) {
|
|
2757
|
+
log2(`Fetch exception: ${e}`);
|
|
2758
|
+
}
|
|
2759
|
+
}
|
|
2760
|
+
async function fetchSpecificVersion(configKey, version, timeout = SYNC_TIMEOUT) {
|
|
2761
|
+
if (!apiKey2) return null;
|
|
2762
|
+
try {
|
|
2763
|
+
const controller = new AbortController();
|
|
2764
|
+
const timeoutId = setTimeout(() => controller.abort(), timeout);
|
|
2765
|
+
const resp = await fetch(
|
|
2766
|
+
`${baseUrl2}/configs/${configKey}/version/${version}`,
|
|
2767
|
+
{
|
|
2768
|
+
headers: { Authorization: `Bearer ${apiKey2}` },
|
|
2769
|
+
signal: controller.signal
|
|
2770
|
+
}
|
|
2771
|
+
);
|
|
2772
|
+
clearTimeout(timeoutId);
|
|
2773
|
+
if (resp.ok) {
|
|
2774
|
+
const config = await resp.json();
|
|
2775
|
+
if (!configCache.has(configKey)) {
|
|
2776
|
+
configCache.set(configKey, { versions: /* @__PURE__ */ new Map(), latest: null });
|
|
2777
|
+
}
|
|
2778
|
+
configCache.get(configKey).versions.set(version, config);
|
|
2779
|
+
return config;
|
|
2780
|
+
}
|
|
2781
|
+
} catch {
|
|
2782
|
+
}
|
|
2783
|
+
return null;
|
|
2784
|
+
}
|
|
2785
|
+
async function get(configKey, sessionId, options = {}) {
|
|
2786
|
+
const { version, fallback, debug = false } = options;
|
|
2787
|
+
debugMode2 = debug;
|
|
2788
|
+
ensureInit();
|
|
2789
|
+
log2(
|
|
2790
|
+
`get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`
|
|
2791
|
+
);
|
|
2792
|
+
try {
|
|
2793
|
+
let configData = configCache.get(configKey);
|
|
2794
|
+
log2(
|
|
2795
|
+
`Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`
|
|
2796
|
+
);
|
|
2797
|
+
if (!configData) {
|
|
2798
|
+
log2("Not in cache, fetching...");
|
|
2799
|
+
await fetchConfigs(SYNC_TIMEOUT);
|
|
2800
|
+
configData = configCache.get(configKey);
|
|
2801
|
+
log2(
|
|
2802
|
+
`After fetch, cache lookup: ${configData ? "found" : "still not found"}`
|
|
2803
|
+
);
|
|
2804
|
+
}
|
|
2805
|
+
if (!configData) {
|
|
2806
|
+
log2(`Config not found, using fallback: ${fallback}`);
|
|
2807
|
+
if (fallback) {
|
|
2808
|
+
console.warn(
|
|
2809
|
+
`[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`
|
|
2810
|
+
);
|
|
2811
|
+
return returnWithTrace(configKey, sessionId, fallback, 0);
|
|
2812
|
+
}
|
|
2813
|
+
throw new Error(
|
|
2814
|
+
`Config '${configKey}' not found. Check that it exists in your Fallom dashboard.`
|
|
2815
|
+
);
|
|
2816
|
+
}
|
|
2817
|
+
let config;
|
|
2818
|
+
let targetVersion;
|
|
2819
|
+
if (version !== void 0) {
|
|
2820
|
+
config = configData.versions.get(version);
|
|
2821
|
+
if (!config) {
|
|
2822
|
+
config = await fetchSpecificVersion(configKey, version, SYNC_TIMEOUT) || void 0;
|
|
2823
|
+
}
|
|
2824
|
+
if (!config) {
|
|
2825
|
+
if (fallback) {
|
|
2826
|
+
console.warn(
|
|
2827
|
+
`[Fallom WARNING] Config '${configKey}' version ${version} not found, using fallback: ${fallback}`
|
|
2828
|
+
);
|
|
2829
|
+
return returnWithTrace(configKey, sessionId, fallback, 0);
|
|
2830
|
+
}
|
|
2831
|
+
throw new Error(`Config '${configKey}' version ${version} not found.`);
|
|
2832
|
+
}
|
|
2833
|
+
targetVersion = version;
|
|
2834
|
+
} else {
|
|
2835
|
+
targetVersion = configData.latest;
|
|
2836
|
+
config = configData.versions.get(targetVersion);
|
|
2837
|
+
if (!config) {
|
|
2838
|
+
if (fallback) {
|
|
2839
|
+
console.warn(
|
|
2840
|
+
`[Fallom WARNING] Config '${configKey}' has no cached version, using fallback: ${fallback}`
|
|
2841
|
+
);
|
|
2842
|
+
return returnWithTrace(configKey, sessionId, fallback, 0);
|
|
2843
|
+
}
|
|
2844
|
+
throw new Error(`Config '${configKey}' has no cached version.`);
|
|
2845
|
+
}
|
|
2846
|
+
}
|
|
2847
|
+
const variantsRaw = config.variants;
|
|
2848
|
+
const configVersion = config.version || targetVersion;
|
|
2849
|
+
const variants = Array.isArray(variantsRaw) ? variantsRaw : Object.values(variantsRaw);
|
|
2850
|
+
log2(
|
|
2851
|
+
`Config found! Version: ${configVersion}, Variants: ${JSON.stringify(
|
|
2852
|
+
variants
|
|
2853
|
+
)}`
|
|
2854
|
+
);
|
|
2855
|
+
const hashBytes = createHash("md5").update(sessionId).digest();
|
|
2856
|
+
const hashVal = hashBytes.readUInt32BE(0) % 1e6;
|
|
2857
|
+
log2(`Session hash: ${hashVal} (out of 1,000,000)`);
|
|
2858
|
+
let cumulative = 0;
|
|
2859
|
+
let assignedModel = variants[variants.length - 1].model;
|
|
2860
|
+
for (const v of variants) {
|
|
2861
|
+
const oldCumulative = cumulative;
|
|
2862
|
+
cumulative += v.weight * 1e4;
|
|
2863
|
+
log2(
|
|
2864
|
+
`Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`
|
|
2865
|
+
);
|
|
2866
|
+
if (hashVal < cumulative) {
|
|
2867
|
+
assignedModel = v.model;
|
|
2868
|
+
break;
|
|
2869
|
+
}
|
|
2870
|
+
}
|
|
2871
|
+
log2(`\u2705 Assigned model: ${assignedModel}`);
|
|
2872
|
+
return returnWithTrace(configKey, sessionId, assignedModel, configVersion);
|
|
2873
|
+
} catch (e) {
|
|
2874
|
+
if (e instanceof Error && e.message.includes("not found")) {
|
|
2875
|
+
throw e;
|
|
2876
|
+
}
|
|
2877
|
+
if (fallback) {
|
|
2878
|
+
console.warn(
|
|
2879
|
+
`[Fallom WARNING] Error getting model for '${configKey}': ${e}. Using fallback: ${fallback}`
|
|
2880
|
+
);
|
|
2881
|
+
return returnWithTrace(configKey, sessionId, fallback, 0);
|
|
2882
|
+
}
|
|
2883
|
+
throw e;
|
|
2884
|
+
}
|
|
2885
|
+
}
|
|
2886
|
+
function returnWithTrace(configKey, sessionId, model, version) {
|
|
2887
|
+
try {
|
|
2888
|
+
setSession(configKey, sessionId);
|
|
2889
|
+
} catch {
|
|
2890
|
+
}
|
|
2891
|
+
if (version > 0) {
|
|
2892
|
+
recordSession(configKey, version, sessionId, model).catch(() => {
|
|
2893
|
+
});
|
|
2894
|
+
}
|
|
2895
|
+
return model;
|
|
2896
|
+
}
|
|
2897
|
+
async function recordSession(configKey, version, sessionId, model) {
|
|
2898
|
+
if (!apiKey2) return;
|
|
2899
|
+
try {
|
|
2900
|
+
const controller = new AbortController();
|
|
2901
|
+
const timeoutId = setTimeout(() => controller.abort(), RECORD_TIMEOUT);
|
|
2902
|
+
await fetch(`${baseUrl2}/sessions`, {
|
|
2903
|
+
method: "POST",
|
|
2904
|
+
headers: {
|
|
2905
|
+
Authorization: `Bearer ${apiKey2}`,
|
|
2906
|
+
"Content-Type": "application/json"
|
|
2907
|
+
},
|
|
2908
|
+
body: JSON.stringify({
|
|
2909
|
+
config_key: configKey,
|
|
2910
|
+
config_version: version,
|
|
2911
|
+
session_id: sessionId,
|
|
2912
|
+
assigned_model: model
|
|
2913
|
+
}),
|
|
2914
|
+
signal: controller.signal
|
|
2915
|
+
});
|
|
2916
|
+
clearTimeout(timeoutId);
|
|
2917
|
+
} catch {
|
|
2918
|
+
}
|
|
2919
|
+
}
|
|
2920
|
+
|
|
2921
|
+
export {
|
|
2922
|
+
init2 as init,
|
|
2923
|
+
get,
|
|
2924
|
+
models_exports,
|
|
2925
|
+
init as init2,
|
|
2926
|
+
getSession,
|
|
2927
|
+
FallomSession,
|
|
2928
|
+
session,
|
|
2929
|
+
trace_exports
|
|
2930
|
+
};
|