@fallom/trace 0.2.4 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2237 +0,0 @@
1
- var __defProp = Object.defineProperty;
2
- var __export = (target, all) => {
3
- for (var name in all)
4
- __defProp(target, name, { get: all[name], enumerable: true });
5
- };
6
-
7
- // src/models.ts
8
- var models_exports = {};
9
- __export(models_exports, {
10
- get: () => get,
11
- init: () => init2
12
- });
13
- import { createHash } from "crypto";
14
-
15
- // src/trace.ts
16
- var trace_exports = {};
17
- __export(trace_exports, {
18
- FallomSession: () => FallomSession,
19
- clearSession: () => clearSession,
20
- getSession: () => getSession,
21
- init: () => init,
22
- runWithSession: () => runWithSession,
23
- sendTrace: () => sendTrace,
24
- session: () => session,
25
- setSession: () => setSession,
26
- shutdown: () => shutdown,
27
- span: () => span,
28
- wrapAISDK: () => wrapAISDK,
29
- wrapAnthropic: () => wrapAnthropic,
30
- wrapGoogleAI: () => wrapGoogleAI,
31
- wrapMastraAgent: () => wrapMastraAgent,
32
- wrapOpenAI: () => wrapOpenAI
33
- });
34
-
35
- // src/trace/core.ts
36
- import { AsyncLocalStorage } from "async_hooks";
37
- import { NodeSDK } from "@opentelemetry/sdk-node";
38
- import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-http";
39
-
40
- // node_modules/@opentelemetry/resources/build/esm/Resource.js
41
- import { diag } from "@opentelemetry/api";
42
-
43
- // node_modules/@opentelemetry/resources/node_modules/@opentelemetry/semantic-conventions/build/esm/resource/SemanticResourceAttributes.js
44
- var SemanticResourceAttributes = {
45
- /**
46
- * Name of the cloud provider.
47
- */
48
- CLOUD_PROVIDER: "cloud.provider",
49
- /**
50
- * The cloud account ID the resource is assigned to.
51
- */
52
- CLOUD_ACCOUNT_ID: "cloud.account.id",
53
- /**
54
- * The geographical region the resource is running. Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), or [Google Cloud regions](https://cloud.google.com/about/locations).
55
- */
56
- CLOUD_REGION: "cloud.region",
57
- /**
58
- * Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running.
59
- *
60
- * Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
61
- */
62
- CLOUD_AVAILABILITY_ZONE: "cloud.availability_zone",
63
- /**
64
- * The cloud platform in use.
65
- *
66
- * Note: The prefix of the service SHOULD match the one specified in `cloud.provider`.
67
- */
68
- CLOUD_PLATFORM: "cloud.platform",
69
- /**
70
- * The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
71
- */
72
- AWS_ECS_CONTAINER_ARN: "aws.ecs.container.arn",
73
- /**
74
- * The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
75
- */
76
- AWS_ECS_CLUSTER_ARN: "aws.ecs.cluster.arn",
77
- /**
78
- * The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task.
79
- */
80
- AWS_ECS_LAUNCHTYPE: "aws.ecs.launchtype",
81
- /**
82
- * The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
83
- */
84
- AWS_ECS_TASK_ARN: "aws.ecs.task.arn",
85
- /**
86
- * The task definition family this task definition is a member of.
87
- */
88
- AWS_ECS_TASK_FAMILY: "aws.ecs.task.family",
89
- /**
90
- * The revision for this task definition.
91
- */
92
- AWS_ECS_TASK_REVISION: "aws.ecs.task.revision",
93
- /**
94
- * The ARN of an EKS cluster.
95
- */
96
- AWS_EKS_CLUSTER_ARN: "aws.eks.cluster.arn",
97
- /**
98
- * The name(s) of the AWS log group(s) an application is writing to.
99
- *
100
- * Note: Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group.
101
- */
102
- AWS_LOG_GROUP_NAMES: "aws.log.group.names",
103
- /**
104
- * The Amazon Resource Name(s) (ARN) of the AWS log group(s).
105
- *
106
- * Note: See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
107
- */
108
- AWS_LOG_GROUP_ARNS: "aws.log.group.arns",
109
- /**
110
- * The name(s) of the AWS log stream(s) an application is writing to.
111
- */
112
- AWS_LOG_STREAM_NAMES: "aws.log.stream.names",
113
- /**
114
- * The ARN(s) of the AWS log stream(s).
115
- *
116
- * Note: See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream.
117
- */
118
- AWS_LOG_STREAM_ARNS: "aws.log.stream.arns",
119
- /**
120
- * Container name.
121
- */
122
- CONTAINER_NAME: "container.name",
123
- /**
124
- * Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated.
125
- */
126
- CONTAINER_ID: "container.id",
127
- /**
128
- * The container runtime managing this container.
129
- */
130
- CONTAINER_RUNTIME: "container.runtime",
131
- /**
132
- * Name of the image the container was built on.
133
- */
134
- CONTAINER_IMAGE_NAME: "container.image.name",
135
- /**
136
- * Container image tag.
137
- */
138
- CONTAINER_IMAGE_TAG: "container.image.tag",
139
- /**
140
- * Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier).
141
- */
142
- DEPLOYMENT_ENVIRONMENT: "deployment.environment",
143
- /**
144
- * A unique identifier representing the device.
145
- *
146
- * Note: The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence.
147
- */
148
- DEVICE_ID: "device.id",
149
- /**
150
- * The model identifier for the device.
151
- *
152
- * Note: It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device.
153
- */
154
- DEVICE_MODEL_IDENTIFIER: "device.model.identifier",
155
- /**
156
- * The marketing name for the device model.
157
- *
158
- * Note: It's recommended this value represents a human readable version of the device model rather than a machine readable alternative.
159
- */
160
- DEVICE_MODEL_NAME: "device.model.name",
161
- /**
162
- * The name of the single function that this runtime instance executes.
163
- *
164
- * Note: This is the name of the function as configured/deployed on the FaaS platform and is usually different from the name of the callback function (which may be stored in the [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) span attributes).
165
- */
166
- FAAS_NAME: "faas.name",
167
- /**
168
- * The unique ID of the single function that this runtime instance executes.
169
- *
170
- * Note: Depending on the cloud provider, use:
171
-
172
- * **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
173
- Take care not to use the "invoked ARN" directly but replace any
174
- [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invokable with multiple
175
- different aliases.
176
- * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names)
177
- * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id).
178
-
179
- On some providers, it may not be possible to determine the full ID at startup,
180
- which is why this field cannot be made required. For example, on AWS the account ID
181
- part of the ARN is not available without calling another AWS API
182
- which may be deemed too slow for a short-running lambda function.
183
- As an alternative, consider setting `faas.id` as a span attribute instead.
184
- */
185
- FAAS_ID: "faas.id",
186
- /**
187
- * The immutable version of the function being executed.
188
- *
189
- * Note: Depending on the cloud provider and platform, use:
190
-
191
- * **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
192
- (an integer represented as a decimal string).
193
- * **Google Cloud Run:** The [revision](https://cloud.google.com/run/docs/managing/revisions)
194
- (i.e., the function name plus the revision suffix).
195
- * **Google Cloud Functions:** The value of the
196
- [`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
197
- * **Azure Functions:** Not applicable. Do not set this attribute.
198
- */
199
- FAAS_VERSION: "faas.version",
200
- /**
201
- * The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version.
202
- *
203
- * Note: * **AWS Lambda:** Use the (full) log stream name.
204
- */
205
- FAAS_INSTANCE: "faas.instance",
206
- /**
207
- * The amount of memory available to the serverless function in MiB.
208
- *
209
- * Note: It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
210
- */
211
- FAAS_MAX_MEMORY: "faas.max_memory",
212
- /**
213
- * Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider.
214
- */
215
- HOST_ID: "host.id",
216
- /**
217
- * Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user.
218
- */
219
- HOST_NAME: "host.name",
220
- /**
221
- * Type of host. For Cloud, this must be the machine type.
222
- */
223
- HOST_TYPE: "host.type",
224
- /**
225
- * The CPU architecture the host system is running on.
226
- */
227
- HOST_ARCH: "host.arch",
228
- /**
229
- * Name of the VM image or OS install the host was instantiated from.
230
- */
231
- HOST_IMAGE_NAME: "host.image.name",
232
- /**
233
- * VM image ID. For Cloud, this value is from the provider.
234
- */
235
- HOST_IMAGE_ID: "host.image.id",
236
- /**
237
- * The version string of the VM image as defined in [Version SpanAttributes](README.md#version-attributes).
238
- */
239
- HOST_IMAGE_VERSION: "host.image.version",
240
- /**
241
- * The name of the cluster.
242
- */
243
- K8S_CLUSTER_NAME: "k8s.cluster.name",
244
- /**
245
- * The name of the Node.
246
- */
247
- K8S_NODE_NAME: "k8s.node.name",
248
- /**
249
- * The UID of the Node.
250
- */
251
- K8S_NODE_UID: "k8s.node.uid",
252
- /**
253
- * The name of the namespace that the pod is running in.
254
- */
255
- K8S_NAMESPACE_NAME: "k8s.namespace.name",
256
- /**
257
- * The UID of the Pod.
258
- */
259
- K8S_POD_UID: "k8s.pod.uid",
260
- /**
261
- * The name of the Pod.
262
- */
263
- K8S_POD_NAME: "k8s.pod.name",
264
- /**
265
- * The name of the Container in a Pod template.
266
- */
267
- K8S_CONTAINER_NAME: "k8s.container.name",
268
- /**
269
- * The UID of the ReplicaSet.
270
- */
271
- K8S_REPLICASET_UID: "k8s.replicaset.uid",
272
- /**
273
- * The name of the ReplicaSet.
274
- */
275
- K8S_REPLICASET_NAME: "k8s.replicaset.name",
276
- /**
277
- * The UID of the Deployment.
278
- */
279
- K8S_DEPLOYMENT_UID: "k8s.deployment.uid",
280
- /**
281
- * The name of the Deployment.
282
- */
283
- K8S_DEPLOYMENT_NAME: "k8s.deployment.name",
284
- /**
285
- * The UID of the StatefulSet.
286
- */
287
- K8S_STATEFULSET_UID: "k8s.statefulset.uid",
288
- /**
289
- * The name of the StatefulSet.
290
- */
291
- K8S_STATEFULSET_NAME: "k8s.statefulset.name",
292
- /**
293
- * The UID of the DaemonSet.
294
- */
295
- K8S_DAEMONSET_UID: "k8s.daemonset.uid",
296
- /**
297
- * The name of the DaemonSet.
298
- */
299
- K8S_DAEMONSET_NAME: "k8s.daemonset.name",
300
- /**
301
- * The UID of the Job.
302
- */
303
- K8S_JOB_UID: "k8s.job.uid",
304
- /**
305
- * The name of the Job.
306
- */
307
- K8S_JOB_NAME: "k8s.job.name",
308
- /**
309
- * The UID of the CronJob.
310
- */
311
- K8S_CRONJOB_UID: "k8s.cronjob.uid",
312
- /**
313
- * The name of the CronJob.
314
- */
315
- K8S_CRONJOB_NAME: "k8s.cronjob.name",
316
- /**
317
- * The operating system type.
318
- */
319
- OS_TYPE: "os.type",
320
- /**
321
- * Human readable (not intended to be parsed) OS version information, like e.g. reported by `ver` or `lsb_release -a` commands.
322
- */
323
- OS_DESCRIPTION: "os.description",
324
- /**
325
- * Human readable operating system name.
326
- */
327
- OS_NAME: "os.name",
328
- /**
329
- * The version string of the operating system as defined in [Version SpanAttributes](../../resource/semantic_conventions/README.md#version-attributes).
330
- */
331
- OS_VERSION: "os.version",
332
- /**
333
- * Process identifier (PID).
334
- */
335
- PROCESS_PID: "process.pid",
336
- /**
337
- * The name of the process executable. On Linux based systems, can be set to the `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of `GetProcessImageFileNameW`.
338
- */
339
- PROCESS_EXECUTABLE_NAME: "process.executable.name",
340
- /**
341
- * The full path to the process executable. On Linux based systems, can be set to the target of `proc/[pid]/exe`. On Windows, can be set to the result of `GetProcessImageFileNameW`.
342
- */
343
- PROCESS_EXECUTABLE_PATH: "process.executable.path",
344
- /**
345
- * The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter extracted from `GetCommandLineW`.
346
- */
347
- PROCESS_COMMAND: "process.command",
348
- /**
349
- * The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of `GetCommandLineW`. Do not set this if you have to assemble it just for monitoring; use `process.command_args` instead.
350
- */
351
- PROCESS_COMMAND_LINE: "process.command_line",
352
- /**
353
- * All the command arguments (including the command/executable itself) as received by the process. On Linux-based systems (and some other Unixoid systems supporting procfs), can be set according to the list of null-delimited strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be the full argv vector passed to `main`.
354
- */
355
- PROCESS_COMMAND_ARGS: "process.command_args",
356
- /**
357
- * The username of the user that owns the process.
358
- */
359
- PROCESS_OWNER: "process.owner",
360
- /**
361
- * The name of the runtime of this process. For compiled native binaries, this SHOULD be the name of the compiler.
362
- */
363
- PROCESS_RUNTIME_NAME: "process.runtime.name",
364
- /**
365
- * The version of the runtime of this process, as returned by the runtime without modification.
366
- */
367
- PROCESS_RUNTIME_VERSION: "process.runtime.version",
368
- /**
369
- * An additional description about the runtime of the process, for example a specific vendor customization of the runtime environment.
370
- */
371
- PROCESS_RUNTIME_DESCRIPTION: "process.runtime.description",
372
- /**
373
- * Logical name of the service.
374
- *
375
- * Note: MUST be the same for all instances of horizontally scaled services. If the value was not specified, SDKs MUST fallback to `unknown_service:` concatenated with [`process.executable.name`](process.md#process), e.g. `unknown_service:bash`. If `process.executable.name` is not available, the value MUST be set to `unknown_service`.
376
- */
377
- SERVICE_NAME: "service.name",
378
- /**
379
- * A namespace for `service.name`.
380
- *
381
- * Note: A string value having a meaning that helps to distinguish a group of services, for example the team name that owns a group of services. `service.name` is expected to be unique within the same namespace. If `service.namespace` is not specified in the Resource then `service.name` is expected to be unique for all services that have no explicit namespace defined (so the empty/unspecified namespace is simply one more valid namespace). Zero-length namespace string is assumed equal to unspecified namespace.
382
- */
383
- SERVICE_NAMESPACE: "service.namespace",
384
- /**
385
- * The string ID of the service instance.
386
- *
387
- * Note: MUST be unique for each instance of the same `service.namespace,service.name` pair (in other words `service.namespace,service.name,service.instance.id` triplet MUST be globally unique). The ID helps to distinguish instances of the same service that exist at the same time (e.g. instances of a horizontally scaled service). It is preferable for the ID to be persistent and stay the same for the lifetime of the service instance, however it is acceptable that the ID is ephemeral and changes during important lifetime events for the service (e.g. service restarts). If the service has no inherent unique ID that can be used as the value of this attribute it is recommended to generate a random Version 1 or Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use Version 5, see RFC 4122 for more recommendations).
388
- */
389
- SERVICE_INSTANCE_ID: "service.instance.id",
390
- /**
391
- * The version string of the service API or implementation.
392
- */
393
- SERVICE_VERSION: "service.version",
394
- /**
395
- * The name of the telemetry SDK as defined above.
396
- */
397
- TELEMETRY_SDK_NAME: "telemetry.sdk.name",
398
- /**
399
- * The language of the telemetry SDK.
400
- */
401
- TELEMETRY_SDK_LANGUAGE: "telemetry.sdk.language",
402
- /**
403
- * The version string of the telemetry SDK.
404
- */
405
- TELEMETRY_SDK_VERSION: "telemetry.sdk.version",
406
- /**
407
- * The version string of the auto instrumentation agent, if used.
408
- */
409
- TELEMETRY_AUTO_VERSION: "telemetry.auto.version",
410
- /**
411
- * The name of the web engine.
412
- */
413
- WEBENGINE_NAME: "webengine.name",
414
- /**
415
- * The version of the web engine.
416
- */
417
- WEBENGINE_VERSION: "webengine.version",
418
- /**
419
- * Additional description of the web engine (e.g. detailed version and edition information).
420
- */
421
- WEBENGINE_DESCRIPTION: "webengine.description"
422
- };
423
-
424
- // node_modules/@opentelemetry/resources/build/esm/Resource.js
425
- import { SDK_INFO } from "@opentelemetry/core";
426
-
427
- // node_modules/@opentelemetry/resources/build/esm/platform/node/default-service-name.js
428
- function defaultServiceName() {
429
- return "unknown_service:" + process.argv0;
430
- }
431
-
432
- // node_modules/@opentelemetry/resources/build/esm/Resource.js
433
- var __assign = function() {
434
- __assign = Object.assign || function(t) {
435
- for (var s, i = 1, n = arguments.length; i < n; i++) {
436
- s = arguments[i];
437
- for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
438
- t[p] = s[p];
439
- }
440
- return t;
441
- };
442
- return __assign.apply(this, arguments);
443
- };
444
- var __awaiter = function(thisArg, _arguments, P, generator) {
445
- function adopt(value) {
446
- return value instanceof P ? value : new P(function(resolve) {
447
- resolve(value);
448
- });
449
- }
450
- return new (P || (P = Promise))(function(resolve, reject) {
451
- function fulfilled(value) {
452
- try {
453
- step(generator.next(value));
454
- } catch (e) {
455
- reject(e);
456
- }
457
- }
458
- function rejected(value) {
459
- try {
460
- step(generator["throw"](value));
461
- } catch (e) {
462
- reject(e);
463
- }
464
- }
465
- function step(result) {
466
- result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected);
467
- }
468
- step((generator = generator.apply(thisArg, _arguments || [])).next());
469
- });
470
- };
471
- var __generator = function(thisArg, body) {
472
- var _ = { label: 0, sent: function() {
473
- if (t[0] & 1) throw t[1];
474
- return t[1];
475
- }, trys: [], ops: [] }, f, y, t, g;
476
- return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() {
477
- return this;
478
- }), g;
479
- function verb(n) {
480
- return function(v) {
481
- return step([n, v]);
482
- };
483
- }
484
- function step(op) {
485
- if (f) throw new TypeError("Generator is already executing.");
486
- while (_) try {
487
- if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
488
- if (y = 0, t) op = [op[0] & 2, t.value];
489
- switch (op[0]) {
490
- case 0:
491
- case 1:
492
- t = op;
493
- break;
494
- case 4:
495
- _.label++;
496
- return { value: op[1], done: false };
497
- case 5:
498
- _.label++;
499
- y = op[1];
500
- op = [0];
501
- continue;
502
- case 7:
503
- op = _.ops.pop();
504
- _.trys.pop();
505
- continue;
506
- default:
507
- if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) {
508
- _ = 0;
509
- continue;
510
- }
511
- if (op[0] === 3 && (!t || op[1] > t[0] && op[1] < t[3])) {
512
- _.label = op[1];
513
- break;
514
- }
515
- if (op[0] === 6 && _.label < t[1]) {
516
- _.label = t[1];
517
- t = op;
518
- break;
519
- }
520
- if (t && _.label < t[2]) {
521
- _.label = t[2];
522
- _.ops.push(op);
523
- break;
524
- }
525
- if (t[2]) _.ops.pop();
526
- _.trys.pop();
527
- continue;
528
- }
529
- op = body.call(thisArg, _);
530
- } catch (e) {
531
- op = [6, e];
532
- y = 0;
533
- } finally {
534
- f = t = 0;
535
- }
536
- if (op[0] & 5) throw op[1];
537
- return { value: op[0] ? op[1] : void 0, done: true };
538
- }
539
- };
540
- var __read = function(o, n) {
541
- var m = typeof Symbol === "function" && o[Symbol.iterator];
542
- if (!m) return o;
543
- var i = m.call(o), r, ar = [], e;
544
- try {
545
- while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
546
- } catch (error) {
547
- e = { error };
548
- } finally {
549
- try {
550
- if (r && !r.done && (m = i["return"])) m.call(i);
551
- } finally {
552
- if (e) throw e.error;
553
- }
554
- }
555
- return ar;
556
- };
557
- var Resource = (
558
- /** @class */
559
- (function() {
560
- function Resource2(attributes, asyncAttributesPromise) {
561
- var _this = this;
562
- var _a;
563
- this._attributes = attributes;
564
- this.asyncAttributesPending = asyncAttributesPromise != null;
565
- this._syncAttributes = (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
566
- this._asyncAttributesPromise = asyncAttributesPromise === null || asyncAttributesPromise === void 0 ? void 0 : asyncAttributesPromise.then(function(asyncAttributes) {
567
- _this._attributes = Object.assign({}, _this._attributes, asyncAttributes);
568
- _this.asyncAttributesPending = false;
569
- return asyncAttributes;
570
- }, function(err) {
571
- diag.debug("a resource's async attributes promise rejected: %s", err);
572
- _this.asyncAttributesPending = false;
573
- return {};
574
- });
575
- }
576
- Resource2.empty = function() {
577
- return Resource2.EMPTY;
578
- };
579
- Resource2.default = function() {
580
- var _a;
581
- return new Resource2((_a = {}, _a[SemanticResourceAttributes.SERVICE_NAME] = defaultServiceName(), _a[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE], _a[SemanticResourceAttributes.TELEMETRY_SDK_NAME] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_NAME], _a[SemanticResourceAttributes.TELEMETRY_SDK_VERSION] = SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_VERSION], _a));
582
- };
583
- Object.defineProperty(Resource2.prototype, "attributes", {
584
- get: function() {
585
- var _a;
586
- if (this.asyncAttributesPending) {
587
- diag.error("Accessing resource attributes before async attributes settled");
588
- }
589
- return (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
590
- },
591
- enumerable: false,
592
- configurable: true
593
- });
594
- Resource2.prototype.waitForAsyncAttributes = function() {
595
- return __awaiter(this, void 0, void 0, function() {
596
- return __generator(this, function(_a) {
597
- switch (_a.label) {
598
- case 0:
599
- if (!this.asyncAttributesPending) return [3, 2];
600
- return [4, this._asyncAttributesPromise];
601
- case 1:
602
- _a.sent();
603
- _a.label = 2;
604
- case 2:
605
- return [
606
- 2
607
- /*return*/
608
- ];
609
- }
610
- });
611
- });
612
- };
613
- Resource2.prototype.merge = function(other) {
614
- var _this = this;
615
- var _a;
616
- if (!other)
617
- return this;
618
- var mergedSyncAttributes = __assign(__assign({}, this._syncAttributes), (_a = other._syncAttributes) !== null && _a !== void 0 ? _a : other.attributes);
619
- if (!this._asyncAttributesPromise && !other._asyncAttributesPromise) {
620
- return new Resource2(mergedSyncAttributes);
621
- }
622
- var mergedAttributesPromise = Promise.all([
623
- this._asyncAttributesPromise,
624
- other._asyncAttributesPromise
625
- ]).then(function(_a2) {
626
- var _b;
627
- var _c = __read(_a2, 2), thisAsyncAttributes = _c[0], otherAsyncAttributes = _c[1];
628
- return __assign(__assign(__assign(__assign({}, _this._syncAttributes), thisAsyncAttributes), (_b = other._syncAttributes) !== null && _b !== void 0 ? _b : other.attributes), otherAsyncAttributes);
629
- });
630
- return new Resource2(mergedSyncAttributes, mergedAttributesPromise);
631
- };
632
- Resource2.EMPTY = new Resource2({});
633
- return Resource2;
634
- })()
635
- );
636
-
637
- // src/trace/core.ts
638
- var sessionStorage = new AsyncLocalStorage();
639
- var fallbackSession = null;
640
- var traceContextStorage = new AsyncLocalStorage();
641
- var fallbackTraceContext = null;
642
- var apiKey = null;
643
- var baseUrl = "https://traces.fallom.com";
644
- var initialized = false;
645
- var captureContent = true;
646
- var debugMode = false;
647
- var sdk = null;
648
- function log(...args) {
649
- if (debugMode) console.log("[Fallom]", ...args);
650
- }
651
- function getSessionStorage() {
652
- return sessionStorage;
653
- }
654
- function getTraceContextStorage() {
655
- return traceContextStorage;
656
- }
657
- function getFallbackSession() {
658
- return fallbackSession;
659
- }
660
- function getFallbackTraceContext() {
661
- return fallbackTraceContext;
662
- }
663
- function isInitialized() {
664
- return initialized;
665
- }
666
- function shouldCaptureContent() {
667
- return captureContent;
668
- }
669
- function isDebugMode() {
670
- return debugMode;
671
- }
672
- var fallomSpanProcessor = {
673
- onStart(span2, _parentContext) {
674
- log("\u{1F4CD} Span started:", span2.name || "unknown");
675
- const ctx = sessionStorage.getStore() || fallbackSession;
676
- if (ctx) {
677
- span2.setAttribute("fallom.config_key", ctx.configKey);
678
- span2.setAttribute("fallom.session_id", ctx.sessionId);
679
- if (ctx.customerId) {
680
- span2.setAttribute("fallom.customer_id", ctx.customerId);
681
- }
682
- log(
683
- " Added session context:",
684
- ctx.configKey,
685
- ctx.sessionId,
686
- ctx.customerId
687
- );
688
- } else {
689
- log(" No session context available");
690
- }
691
- },
692
- onEnd(span2) {
693
- log("\u2705 Span ended:", span2.name, "duration:", span2.duration);
694
- },
695
- shutdown() {
696
- return Promise.resolve();
697
- },
698
- forceFlush() {
699
- return Promise.resolve();
700
- }
701
- };
702
- async function getInstrumentations() {
703
- const instrumentations = [];
704
- await tryAddInstrumentation(
705
- instrumentations,
706
- "@traceloop/instrumentation-openai",
707
- "OpenAIInstrumentation"
708
- );
709
- await tryAddInstrumentation(
710
- instrumentations,
711
- "@traceloop/instrumentation-anthropic",
712
- "AnthropicInstrumentation"
713
- );
714
- await tryAddInstrumentation(
715
- instrumentations,
716
- "@traceloop/instrumentation-cohere",
717
- "CohereInstrumentation"
718
- );
719
- await tryAddInstrumentation(
720
- instrumentations,
721
- "@traceloop/instrumentation-bedrock",
722
- "BedrockInstrumentation"
723
- );
724
- await tryAddInstrumentation(
725
- instrumentations,
726
- "@traceloop/instrumentation-google-generativeai",
727
- "GoogleGenerativeAIInstrumentation"
728
- );
729
- await tryAddInstrumentation(
730
- instrumentations,
731
- "@traceloop/instrumentation-azure",
732
- "AzureOpenAIInstrumentation"
733
- );
734
- await tryAddInstrumentation(
735
- instrumentations,
736
- "@traceloop/instrumentation-vertexai",
737
- "VertexAIInstrumentation"
738
- );
739
- return instrumentations;
740
- }
741
- async function tryAddInstrumentation(instrumentations, pkg, className) {
742
- try {
743
- const mod = await import(pkg);
744
- const InstrumentationClass = mod[className] || mod.default?.[className];
745
- if (InstrumentationClass) {
746
- instrumentations.push(
747
- new InstrumentationClass({ traceContent: captureContent })
748
- );
749
- log(` \u2705 Loaded ${pkg}`);
750
- } else {
751
- log(
752
- ` \u26A0\uFE0F ${pkg} loaded but ${className} not found. Available:`,
753
- Object.keys(mod)
754
- );
755
- }
756
- } catch {
757
- log(` \u274C ${pkg} not installed`);
758
- }
759
- }
760
- async function init(options = {}) {
761
- if (initialized) return;
762
- debugMode = options.debug ?? false;
763
- log("\u{1F680} Initializing Fallom tracing...");
764
- apiKey = options.apiKey || process.env.FALLOM_API_KEY || null;
765
- baseUrl = options.baseUrl || process.env.FALLOM_TRACES_URL || process.env.FALLOM_BASE_URL || "https://traces.fallom.com";
766
- const envCapture = process.env.FALLOM_CAPTURE_CONTENT?.toLowerCase();
767
- if (envCapture === "false" || envCapture === "0" || envCapture === "no") {
768
- captureContent = false;
769
- } else {
770
- captureContent = options.captureContent ?? true;
771
- }
772
- if (!apiKey) {
773
- throw new Error(
774
- "No API key provided. Set FALLOM_API_KEY environment variable or pass apiKey parameter."
775
- );
776
- }
777
- initialized = true;
778
- log("\u{1F4E1} Exporter URL:", `${baseUrl}/v1/traces`);
779
- const exporter = new OTLPTraceExporter({
780
- url: `${baseUrl}/v1/traces`,
781
- headers: {
782
- Authorization: `Bearer ${apiKey}`
783
- }
784
- });
785
- const instrumentations = await getInstrumentations();
786
- log("\u{1F527} Loaded instrumentations:", instrumentations.length);
787
- sdk = new NodeSDK({
788
- resource: new Resource({
789
- "service.name": "fallom-traced-app"
790
- }),
791
- traceExporter: exporter,
792
- spanProcessor: fallomSpanProcessor,
793
- instrumentations
794
- });
795
- sdk.start();
796
- log("\u2705 SDK started");
797
- process.on("SIGTERM", () => {
798
- sdk?.shutdown().catch(console.error);
799
- });
800
- }
801
- async function shutdown() {
802
- if (sdk) {
803
- await sdk.shutdown();
804
- initialized = false;
805
- }
806
- }
807
- function setSession(configKey, sessionId, customerId) {
808
- const store = sessionStorage.getStore();
809
- if (store) {
810
- store.configKey = configKey;
811
- store.sessionId = sessionId;
812
- store.customerId = customerId;
813
- }
814
- fallbackSession = { configKey, sessionId, customerId };
815
- }
816
- function runWithSession(configKey, sessionId, customerIdOrFn, fn) {
817
- if (typeof customerIdOrFn === "function") {
818
- return sessionStorage.run({ configKey, sessionId }, customerIdOrFn);
819
- }
820
- return sessionStorage.run(
821
- { configKey, sessionId, customerId: customerIdOrFn },
822
- fn
823
- );
824
- }
825
- function getSession() {
826
- return sessionStorage.getStore() || fallbackSession || void 0;
827
- }
828
- function clearSession() {
829
- fallbackSession = null;
830
- }
831
- async function sendTrace(trace) {
832
- const url = `${baseUrl}/v1/traces`;
833
- log("\u{1F4E4} Sending trace to:", url);
834
- log(" Session:", trace.session_id, "Config:", trace.config_key);
835
- try {
836
- const controller = new AbortController();
837
- const timeoutId = setTimeout(() => controller.abort(), 5e3);
838
- const response = await fetch(url, {
839
- method: "POST",
840
- headers: {
841
- Authorization: `Bearer ${apiKey}`,
842
- "Content-Type": "application/json"
843
- },
844
- body: JSON.stringify(trace),
845
- signal: controller.signal
846
- });
847
- clearTimeout(timeoutId);
848
- if (!response.ok) {
849
- const text = await response.text();
850
- log("\u274C Trace send failed:", response.status, text);
851
- } else {
852
- log("\u2705 Trace sent:", trace.name, trace.model);
853
- }
854
- } catch (err) {
855
- log("\u274C Trace send error:", err instanceof Error ? err.message : err);
856
- }
857
- }
858
- function span(data, options = {}) {
859
- if (!initialized) {
860
- throw new Error("Fallom not initialized. Call trace.init() first.");
861
- }
862
- const ctx = sessionStorage.getStore() || fallbackSession;
863
- const configKey = options.configKey || ctx?.configKey;
864
- const sessionId = options.sessionId || ctx?.sessionId;
865
- if (!configKey || !sessionId) {
866
- throw new Error(
867
- "No session context. Either call setSession() first, or pass configKey and sessionId explicitly."
868
- );
869
- }
870
- sendSpan(configKey, sessionId, data).catch(() => {
871
- });
872
- }
873
- async function sendSpan(configKey, sessionId, data) {
874
- try {
875
- const controller = new AbortController();
876
- const timeoutId = setTimeout(() => controller.abort(), 5e3);
877
- await fetch(`${baseUrl}/spans`, {
878
- method: "POST",
879
- headers: {
880
- Authorization: `Bearer ${apiKey}`,
881
- "Content-Type": "application/json"
882
- },
883
- body: JSON.stringify({
884
- config_key: configKey,
885
- session_id: sessionId,
886
- data
887
- }),
888
- signal: controller.signal
889
- });
890
- clearTimeout(timeoutId);
891
- } catch {
892
- }
893
- }
894
-
895
- // src/trace/utils.ts
896
- function generateHexId(length) {
897
- const bytes = new Uint8Array(length / 2);
898
- crypto.getRandomValues(bytes);
899
- return Array.from(bytes).map((b) => b.toString(16).padStart(2, "0")).join("");
900
- }
901
- function messagesToOtelAttributes(messages, completion, model, responseId) {
902
- const attrs = {};
903
- if (model) {
904
- attrs["gen_ai.request.model"] = model;
905
- attrs["gen_ai.response.model"] = model;
906
- }
907
- if (responseId) {
908
- attrs["gen_ai.response.id"] = responseId;
909
- }
910
- if (messages) {
911
- messages.forEach((msg, i) => {
912
- attrs[`gen_ai.prompt.${i}.role`] = msg.role;
913
- attrs[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
914
- });
915
- }
916
- if (completion) {
917
- attrs["gen_ai.completion.0.role"] = completion.role;
918
- attrs["gen_ai.completion.0.content"] = typeof completion.content === "string" ? completion.content : JSON.stringify(completion.content);
919
- if (completion.tool_calls) {
920
- attrs["gen_ai.completion.0.tool_calls"] = JSON.stringify(
921
- completion.tool_calls
922
- );
923
- }
924
- }
925
- return attrs;
926
- }
927
-
928
- // src/trace/wrappers/openai.ts
929
- function wrapOpenAI(client, sessionCtx) {
930
- const originalCreate = client.chat.completions.create.bind(
931
- client.chat.completions
932
- );
933
- client.chat.completions.create = async function(...args) {
934
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
935
- if (!ctx || !isInitialized()) {
936
- return originalCreate(...args);
937
- }
938
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
939
- const traceId = traceCtx?.traceId || generateHexId(32);
940
- const spanId = generateHexId(16);
941
- const parentSpanId = traceCtx?.parentSpanId;
942
- const params = args[0] || {};
943
- const startTime = Date.now();
944
- const captureContent2 = shouldCaptureContent();
945
- try {
946
- const response = await originalCreate(...args);
947
- const endTime = Date.now();
948
- const attributes = captureContent2 ? messagesToOtelAttributes(
949
- params?.messages,
950
- response?.choices?.[0]?.message,
951
- response?.model || params?.model,
952
- response?.id
953
- ) : {};
954
- if (response?.usage) {
955
- attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
956
- }
957
- if (response?.choices?.[0]?.finish_reason) {
958
- attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
959
- }
960
- sendTrace({
961
- config_key: ctx.configKey,
962
- session_id: ctx.sessionId,
963
- customer_id: ctx.customerId,
964
- trace_id: traceId,
965
- span_id: spanId,
966
- parent_span_id: parentSpanId,
967
- name: "chat.completions.create",
968
- kind: "llm",
969
- model: response?.model || params?.model,
970
- start_time: new Date(startTime).toISOString(),
971
- end_time: new Date(endTime).toISOString(),
972
- duration_ms: endTime - startTime,
973
- status: "OK",
974
- prompt_tokens: response?.usage?.prompt_tokens,
975
- completion_tokens: response?.usage?.completion_tokens,
976
- total_tokens: response?.usage?.total_tokens,
977
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
978
- }).catch(() => {
979
- });
980
- return response;
981
- } catch (error) {
982
- const endTime = Date.now();
983
- const attributes = captureContent2 ? messagesToOtelAttributes(
984
- params?.messages,
985
- void 0,
986
- params?.model,
987
- void 0
988
- ) : void 0;
989
- if (attributes) {
990
- attributes["error.message"] = error?.message;
991
- }
992
- sendTrace({
993
- config_key: ctx.configKey,
994
- session_id: ctx.sessionId,
995
- customer_id: ctx.customerId,
996
- trace_id: traceId,
997
- span_id: spanId,
998
- parent_span_id: parentSpanId,
999
- name: "chat.completions.create",
1000
- kind: "llm",
1001
- model: params?.model,
1002
- start_time: new Date(startTime).toISOString(),
1003
- end_time: new Date(endTime).toISOString(),
1004
- duration_ms: endTime - startTime,
1005
- status: "ERROR",
1006
- error_message: error?.message,
1007
- attributes
1008
- }).catch(() => {
1009
- });
1010
- throw error;
1011
- }
1012
- };
1013
- return client;
1014
- }
1015
-
1016
- // src/trace/wrappers/anthropic.ts
1017
- function wrapAnthropic(client, sessionCtx) {
1018
- const originalCreate = client.messages.create.bind(client.messages);
1019
- client.messages.create = async function(...args) {
1020
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
1021
- if (!ctx || !isInitialized()) {
1022
- return originalCreate(...args);
1023
- }
1024
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1025
- const traceId = traceCtx?.traceId || generateHexId(32);
1026
- const spanId = generateHexId(16);
1027
- const parentSpanId = traceCtx?.parentSpanId;
1028
- const params = args[0] || {};
1029
- const startTime = Date.now();
1030
- const captureContent2 = shouldCaptureContent();
1031
- try {
1032
- const response = await originalCreate(...args);
1033
- const endTime = Date.now();
1034
- const attributes = captureContent2 ? messagesToOtelAttributes(
1035
- params?.messages,
1036
- { role: "assistant", content: response?.content?.[0]?.text || "" },
1037
- response?.model || params?.model,
1038
- response?.id
1039
- ) : {};
1040
- if (params?.system) {
1041
- attributes["gen_ai.system_prompt"] = params.system;
1042
- }
1043
- if (response?.usage) {
1044
- attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1045
- }
1046
- if (response?.stop_reason) {
1047
- attributes["gen_ai.response.finish_reason"] = response.stop_reason;
1048
- }
1049
- sendTrace({
1050
- config_key: ctx.configKey,
1051
- session_id: ctx.sessionId,
1052
- customer_id: ctx.customerId,
1053
- trace_id: traceId,
1054
- span_id: spanId,
1055
- parent_span_id: parentSpanId,
1056
- name: "messages.create",
1057
- kind: "llm",
1058
- model: response?.model || params?.model,
1059
- start_time: new Date(startTime).toISOString(),
1060
- end_time: new Date(endTime).toISOString(),
1061
- duration_ms: endTime - startTime,
1062
- status: "OK",
1063
- prompt_tokens: response?.usage?.input_tokens,
1064
- completion_tokens: response?.usage?.output_tokens,
1065
- total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
1066
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1067
- }).catch(() => {
1068
- });
1069
- return response;
1070
- } catch (error) {
1071
- const endTime = Date.now();
1072
- const attributes = captureContent2 ? messagesToOtelAttributes(
1073
- params?.messages,
1074
- void 0,
1075
- params?.model,
1076
- void 0
1077
- ) : void 0;
1078
- if (attributes) {
1079
- attributes["error.message"] = error?.message;
1080
- if (params?.system) {
1081
- attributes["gen_ai.system_prompt"] = params.system;
1082
- }
1083
- }
1084
- sendTrace({
1085
- config_key: ctx.configKey,
1086
- session_id: ctx.sessionId,
1087
- customer_id: ctx.customerId,
1088
- trace_id: traceId,
1089
- span_id: spanId,
1090
- parent_span_id: parentSpanId,
1091
- name: "messages.create",
1092
- kind: "llm",
1093
- model: params?.model,
1094
- start_time: new Date(startTime).toISOString(),
1095
- end_time: new Date(endTime).toISOString(),
1096
- duration_ms: endTime - startTime,
1097
- status: "ERROR",
1098
- error_message: error?.message,
1099
- attributes
1100
- }).catch(() => {
1101
- });
1102
- throw error;
1103
- }
1104
- };
1105
- return client;
1106
- }
1107
-
1108
- // src/trace/wrappers/google-ai.ts
1109
- function wrapGoogleAI(model, sessionCtx) {
1110
- const originalGenerate = model.generateContent.bind(model);
1111
- model.generateContent = async function(...args) {
1112
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
1113
- if (!ctx || !isInitialized()) {
1114
- return originalGenerate(...args);
1115
- }
1116
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1117
- const traceId = traceCtx?.traceId || generateHexId(32);
1118
- const spanId = generateHexId(16);
1119
- const parentSpanId = traceCtx?.parentSpanId;
1120
- const startTime = Date.now();
1121
- const captureContent2 = shouldCaptureContent();
1122
- try {
1123
- const response = await originalGenerate(...args);
1124
- const endTime = Date.now();
1125
- const result = response?.response;
1126
- const usage = result?.usageMetadata;
1127
- const modelName = model?.model || "gemini";
1128
- const attributes = {};
1129
- if (captureContent2) {
1130
- attributes["gen_ai.request.model"] = modelName;
1131
- attributes["gen_ai.response.model"] = modelName;
1132
- const input = args[0];
1133
- if (typeof input === "string") {
1134
- attributes["gen_ai.prompt.0.role"] = "user";
1135
- attributes["gen_ai.prompt.0.content"] = input;
1136
- } else if (input?.contents) {
1137
- input.contents.forEach((content, i) => {
1138
- attributes[`gen_ai.prompt.${i}.role`] = content.role || "user";
1139
- attributes[`gen_ai.prompt.${i}.content`] = content.parts?.[0]?.text || JSON.stringify(content.parts);
1140
- });
1141
- }
1142
- const outputText = result?.text?.();
1143
- if (outputText) {
1144
- attributes["gen_ai.completion.0.role"] = "assistant";
1145
- attributes["gen_ai.completion.0.content"] = outputText;
1146
- }
1147
- }
1148
- if (usage) {
1149
- attributes["fallom.raw.usage"] = JSON.stringify(usage);
1150
- }
1151
- const candidate = result?.candidates?.[0];
1152
- if (candidate?.finishReason) {
1153
- attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
1154
- }
1155
- sendTrace({
1156
- config_key: ctx.configKey,
1157
- session_id: ctx.sessionId,
1158
- customer_id: ctx.customerId,
1159
- trace_id: traceId,
1160
- span_id: spanId,
1161
- parent_span_id: parentSpanId,
1162
- name: "generateContent",
1163
- kind: "llm",
1164
- model: modelName,
1165
- start_time: new Date(startTime).toISOString(),
1166
- end_time: new Date(endTime).toISOString(),
1167
- duration_ms: endTime - startTime,
1168
- status: "OK",
1169
- prompt_tokens: usage?.promptTokenCount,
1170
- completion_tokens: usage?.candidatesTokenCount,
1171
- total_tokens: usage?.totalTokenCount,
1172
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1173
- }).catch(() => {
1174
- });
1175
- return response;
1176
- } catch (error) {
1177
- const endTime = Date.now();
1178
- const modelName = model?.model || "gemini";
1179
- const attributes = {};
1180
- if (captureContent2) {
1181
- attributes["gen_ai.request.model"] = modelName;
1182
- attributes["error.message"] = error?.message;
1183
- const input = args[0];
1184
- if (typeof input === "string") {
1185
- attributes["gen_ai.prompt.0.role"] = "user";
1186
- attributes["gen_ai.prompt.0.content"] = input;
1187
- }
1188
- }
1189
- sendTrace({
1190
- config_key: ctx.configKey,
1191
- session_id: ctx.sessionId,
1192
- customer_id: ctx.customerId,
1193
- trace_id: traceId,
1194
- span_id: spanId,
1195
- parent_span_id: parentSpanId,
1196
- name: "generateContent",
1197
- kind: "llm",
1198
- model: modelName,
1199
- start_time: new Date(startTime).toISOString(),
1200
- end_time: new Date(endTime).toISOString(),
1201
- duration_ms: endTime - startTime,
1202
- status: "ERROR",
1203
- error_message: error?.message,
1204
- attributes: captureContent2 ? attributes : void 0
1205
- }).catch(() => {
1206
- });
1207
- throw error;
1208
- }
1209
- };
1210
- return model;
1211
- }
1212
-
1213
- // src/trace/wrappers/vercel-ai/utils.ts
1214
- function extractUsageFromResult(result, directUsage) {
1215
- let usage = directUsage ?? result?.usage;
1216
- const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
1217
- let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : void 0;
1218
- let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : void 0;
1219
- let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : void 0;
1220
- let cost;
1221
- const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
1222
- if (orUsage) {
1223
- if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
1224
- promptTokens = orUsage.promptTokens;
1225
- }
1226
- if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
1227
- completionTokens = orUsage.completionTokens;
1228
- }
1229
- if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
1230
- totalTokens = orUsage.totalTokens;
1231
- }
1232
- if (isValidNumber(orUsage.cost)) {
1233
- cost = orUsage.cost;
1234
- }
1235
- }
1236
- if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
1237
- totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
1238
- }
1239
- return { promptTokens, completionTokens, totalTokens, cost };
1240
- }
1241
-
1242
- // src/trace/wrappers/vercel-ai/generate-text.ts
1243
- function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1244
- return async (...args) => {
1245
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
1246
- if (!ctx || !isInitialized()) {
1247
- return aiModule.generateText(...args);
1248
- }
1249
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1250
- const traceId = traceCtx?.traceId || generateHexId(32);
1251
- const spanId = generateHexId(16);
1252
- const parentSpanId = traceCtx?.parentSpanId;
1253
- const params = args[0] || {};
1254
- const startTime = Date.now();
1255
- const captureContent2 = shouldCaptureContent();
1256
- try {
1257
- const result = await aiModule.generateText(...args);
1258
- const endTime = Date.now();
1259
- if (debug || isDebugMode()) {
1260
- console.log(
1261
- "\n\u{1F50D} [Fallom Debug] generateText result keys:",
1262
- Object.keys(result || {})
1263
- );
1264
- console.log(
1265
- "\u{1F50D} [Fallom Debug] result.usage:",
1266
- JSON.stringify(result?.usage, null, 2)
1267
- );
1268
- console.log(
1269
- "\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
1270
- JSON.stringify(result?.experimental_providerMetadata, null, 2)
1271
- );
1272
- }
1273
- const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1274
- const attributes = {};
1275
- if (captureContent2) {
1276
- attributes["gen_ai.request.model"] = modelId;
1277
- attributes["gen_ai.response.model"] = modelId;
1278
- if (params?.prompt) {
1279
- attributes["gen_ai.prompt.0.role"] = "user";
1280
- attributes["gen_ai.prompt.0.content"] = params.prompt;
1281
- }
1282
- if (params?.messages) {
1283
- params.messages.forEach((msg, i) => {
1284
- attributes[`gen_ai.prompt.${i}.role`] = msg.role;
1285
- attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1286
- });
1287
- }
1288
- if (result?.text) {
1289
- attributes["gen_ai.completion.0.role"] = "assistant";
1290
- attributes["gen_ai.completion.0.content"] = result.text;
1291
- }
1292
- if (result?.response?.id) {
1293
- attributes["gen_ai.response.id"] = result.response.id;
1294
- }
1295
- }
1296
- if (result?.usage) {
1297
- attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1298
- }
1299
- if (result?.experimental_providerMetadata) {
1300
- attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1301
- result.experimental_providerMetadata
1302
- );
1303
- }
1304
- if (result?.finishReason) {
1305
- attributes["gen_ai.response.finish_reason"] = result.finishReason;
1306
- }
1307
- const usage = extractUsageFromResult(result);
1308
- sendTrace({
1309
- config_key: ctx.configKey,
1310
- session_id: ctx.sessionId,
1311
- customer_id: ctx.customerId,
1312
- trace_id: traceId,
1313
- span_id: spanId,
1314
- parent_span_id: parentSpanId,
1315
- name: "generateText",
1316
- kind: "llm",
1317
- model: modelId,
1318
- start_time: new Date(startTime).toISOString(),
1319
- end_time: new Date(endTime).toISOString(),
1320
- duration_ms: endTime - startTime,
1321
- status: "OK",
1322
- prompt_tokens: usage.promptTokens,
1323
- completion_tokens: usage.completionTokens,
1324
- total_tokens: usage.totalTokens,
1325
- attributes: captureContent2 ? attributes : void 0
1326
- }).catch(() => {
1327
- });
1328
- return result;
1329
- } catch (error) {
1330
- const endTime = Date.now();
1331
- const modelId = params?.model?.modelId || String(params?.model || "unknown");
1332
- sendTrace({
1333
- config_key: ctx.configKey,
1334
- session_id: ctx.sessionId,
1335
- customer_id: ctx.customerId,
1336
- trace_id: traceId,
1337
- span_id: spanId,
1338
- parent_span_id: parentSpanId,
1339
- name: "generateText",
1340
- kind: "llm",
1341
- model: modelId,
1342
- start_time: new Date(startTime).toISOString(),
1343
- end_time: new Date(endTime).toISOString(),
1344
- duration_ms: endTime - startTime,
1345
- status: "ERROR",
1346
- error_message: error?.message
1347
- }).catch(() => {
1348
- });
1349
- throw error;
1350
- }
1351
- };
1352
- }
1353
-
1354
- // src/trace/wrappers/vercel-ai/stream-text.ts
1355
- function log2(...args) {
1356
- if (isDebugMode()) console.log("[Fallom]", ...args);
1357
- }
1358
- function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1359
- return async (...args) => {
1360
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
1361
- const params = args[0] || {};
1362
- const startTime = Date.now();
1363
- const captureContent2 = shouldCaptureContent();
1364
- const result = await aiModule.streamText(...args);
1365
- if (!ctx || !isInitialized()) {
1366
- return result;
1367
- }
1368
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1369
- const traceId = traceCtx?.traceId || generateHexId(32);
1370
- const spanId = generateHexId(16);
1371
- const parentSpanId = traceCtx?.parentSpanId;
1372
- let firstTokenTime = null;
1373
- const modelId = params?.model?.modelId || String(params?.model || "unknown");
1374
- if (result?.usage) {
1375
- result.usage.then(async (rawUsage) => {
1376
- const endTime = Date.now();
1377
- if (debug || isDebugMode()) {
1378
- console.log(
1379
- "\n\u{1F50D} [Fallom Debug] streamText usage:",
1380
- JSON.stringify(rawUsage, null, 2)
1381
- );
1382
- }
1383
- log2("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
1384
- let providerMetadata = result?.experimental_providerMetadata;
1385
- if (providerMetadata && typeof providerMetadata.then === "function") {
1386
- try {
1387
- providerMetadata = await providerMetadata;
1388
- } catch {
1389
- providerMetadata = void 0;
1390
- }
1391
- }
1392
- const usage = extractUsageFromResult(
1393
- { experimental_providerMetadata: providerMetadata },
1394
- rawUsage
1395
- );
1396
- const attributes = {};
1397
- if (captureContent2) {
1398
- attributes["gen_ai.request.model"] = modelId;
1399
- if (params?.prompt) {
1400
- attributes["gen_ai.prompt.0.role"] = "user";
1401
- attributes["gen_ai.prompt.0.content"] = params.prompt;
1402
- }
1403
- }
1404
- if (firstTokenTime) {
1405
- attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1406
- }
1407
- if (rawUsage) {
1408
- attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1409
- }
1410
- if (providerMetadata) {
1411
- attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1412
- }
1413
- const tracePayload = {
1414
- config_key: ctx.configKey,
1415
- session_id: ctx.sessionId,
1416
- customer_id: ctx.customerId,
1417
- trace_id: traceId,
1418
- span_id: spanId,
1419
- parent_span_id: parentSpanId,
1420
- name: "streamText",
1421
- kind: "llm",
1422
- model: modelId,
1423
- start_time: new Date(startTime).toISOString(),
1424
- end_time: new Date(endTime).toISOString(),
1425
- duration_ms: endTime - startTime,
1426
- status: "OK",
1427
- prompt_tokens: usage.promptTokens,
1428
- completion_tokens: usage.completionTokens,
1429
- total_tokens: usage.totalTokens,
1430
- time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1431
- attributes: captureContent2 ? attributes : void 0
1432
- };
1433
- sendTrace(tracePayload).catch(() => {
1434
- });
1435
- }).catch((error) => {
1436
- const endTime = Date.now();
1437
- log2("\u274C streamText error:", error?.message);
1438
- sendTrace({
1439
- config_key: ctx.configKey,
1440
- session_id: ctx.sessionId,
1441
- customer_id: ctx.customerId,
1442
- trace_id: traceId,
1443
- span_id: spanId,
1444
- parent_span_id: parentSpanId,
1445
- name: "streamText",
1446
- kind: "llm",
1447
- model: modelId,
1448
- start_time: new Date(startTime).toISOString(),
1449
- end_time: new Date(endTime).toISOString(),
1450
- duration_ms: endTime - startTime,
1451
- status: "ERROR",
1452
- error_message: error?.message
1453
- }).catch(() => {
1454
- });
1455
- });
1456
- }
1457
- if (result?.textStream) {
1458
- const originalTextStream = result.textStream;
1459
- const wrappedTextStream = (async function* () {
1460
- for await (const chunk of originalTextStream) {
1461
- if (!firstTokenTime) {
1462
- firstTokenTime = Date.now();
1463
- log2("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1464
- }
1465
- yield chunk;
1466
- }
1467
- })();
1468
- return new Proxy(result, {
1469
- get(target, prop) {
1470
- if (prop === "textStream") {
1471
- return wrappedTextStream;
1472
- }
1473
- return target[prop];
1474
- }
1475
- });
1476
- }
1477
- return result;
1478
- };
1479
- }
1480
-
1481
- // src/trace/wrappers/vercel-ai/generate-object.ts
1482
- function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1483
- return async (...args) => {
1484
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
1485
- if (!ctx || !isInitialized()) {
1486
- return aiModule.generateObject(...args);
1487
- }
1488
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1489
- const traceId = traceCtx?.traceId || generateHexId(32);
1490
- const spanId = generateHexId(16);
1491
- const parentSpanId = traceCtx?.parentSpanId;
1492
- const params = args[0] || {};
1493
- const startTime = Date.now();
1494
- const captureContent2 = shouldCaptureContent();
1495
- try {
1496
- const result = await aiModule.generateObject(...args);
1497
- const endTime = Date.now();
1498
- if (debug || isDebugMode()) {
1499
- console.log(
1500
- "\n\u{1F50D} [Fallom Debug] generateObject result keys:",
1501
- Object.keys(result || {})
1502
- );
1503
- console.log(
1504
- "\u{1F50D} [Fallom Debug] result.usage:",
1505
- JSON.stringify(result?.usage, null, 2)
1506
- );
1507
- }
1508
- const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1509
- const attributes = {};
1510
- if (captureContent2) {
1511
- attributes["gen_ai.request.model"] = modelId;
1512
- attributes["gen_ai.response.model"] = modelId;
1513
- if (result?.object) {
1514
- attributes["gen_ai.completion.0.role"] = "assistant";
1515
- attributes["gen_ai.completion.0.content"] = JSON.stringify(
1516
- result.object
1517
- );
1518
- }
1519
- }
1520
- if (result?.usage) {
1521
- attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1522
- }
1523
- if (result?.experimental_providerMetadata) {
1524
- attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1525
- result.experimental_providerMetadata
1526
- );
1527
- }
1528
- if (result?.finishReason) {
1529
- attributes["gen_ai.response.finish_reason"] = result.finishReason;
1530
- }
1531
- const usage = extractUsageFromResult(result);
1532
- sendTrace({
1533
- config_key: ctx.configKey,
1534
- session_id: ctx.sessionId,
1535
- customer_id: ctx.customerId,
1536
- trace_id: traceId,
1537
- span_id: spanId,
1538
- parent_span_id: parentSpanId,
1539
- name: "generateObject",
1540
- kind: "llm",
1541
- model: modelId,
1542
- start_time: new Date(startTime).toISOString(),
1543
- end_time: new Date(endTime).toISOString(),
1544
- duration_ms: endTime - startTime,
1545
- status: "OK",
1546
- prompt_tokens: usage.promptTokens,
1547
- completion_tokens: usage.completionTokens,
1548
- total_tokens: usage.totalTokens,
1549
- attributes: captureContent2 ? attributes : void 0
1550
- }).catch(() => {
1551
- });
1552
- return result;
1553
- } catch (error) {
1554
- const endTime = Date.now();
1555
- const modelId = params?.model?.modelId || String(params?.model || "unknown");
1556
- sendTrace({
1557
- config_key: ctx.configKey,
1558
- session_id: ctx.sessionId,
1559
- customer_id: ctx.customerId,
1560
- trace_id: traceId,
1561
- span_id: spanId,
1562
- parent_span_id: parentSpanId,
1563
- name: "generateObject",
1564
- kind: "llm",
1565
- model: modelId,
1566
- start_time: new Date(startTime).toISOString(),
1567
- end_time: new Date(endTime).toISOString(),
1568
- duration_ms: endTime - startTime,
1569
- status: "ERROR",
1570
- error_message: error?.message
1571
- }).catch(() => {
1572
- });
1573
- throw error;
1574
- }
1575
- };
1576
- }
1577
-
1578
- // src/trace/wrappers/vercel-ai/stream-object.ts
1579
- function log3(...args) {
1580
- if (isDebugMode()) console.log("[Fallom]", ...args);
1581
- }
1582
- function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1583
- return async (...args) => {
1584
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
1585
- const params = args[0] || {};
1586
- const startTime = Date.now();
1587
- const captureContent2 = shouldCaptureContent();
1588
- const result = await aiModule.streamObject(...args);
1589
- log3("\u{1F50D} streamObject result keys:", Object.keys(result || {}));
1590
- if (!ctx || !isInitialized()) {
1591
- return result;
1592
- }
1593
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1594
- const traceId = traceCtx?.traceId || generateHexId(32);
1595
- const spanId = generateHexId(16);
1596
- const parentSpanId = traceCtx?.parentSpanId;
1597
- let firstTokenTime = null;
1598
- const modelId = params?.model?.modelId || String(params?.model || "unknown");
1599
- if (result?.usage) {
1600
- result.usage.then(async (rawUsage) => {
1601
- const endTime = Date.now();
1602
- if (debug || isDebugMode()) {
1603
- console.log(
1604
- "\n\u{1F50D} [Fallom Debug] streamObject usage:",
1605
- JSON.stringify(rawUsage, null, 2)
1606
- );
1607
- }
1608
- log3("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
1609
- let providerMetadata = result?.experimental_providerMetadata;
1610
- if (providerMetadata && typeof providerMetadata.then === "function") {
1611
- try {
1612
- providerMetadata = await providerMetadata;
1613
- } catch {
1614
- providerMetadata = void 0;
1615
- }
1616
- }
1617
- const usage = extractUsageFromResult(
1618
- { experimental_providerMetadata: providerMetadata },
1619
- rawUsage
1620
- );
1621
- const attributes = {};
1622
- if (captureContent2) {
1623
- attributes["gen_ai.request.model"] = modelId;
1624
- }
1625
- if (firstTokenTime) {
1626
- attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1627
- }
1628
- if (rawUsage) {
1629
- attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1630
- }
1631
- if (providerMetadata) {
1632
- attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1633
- }
1634
- sendTrace({
1635
- config_key: ctx.configKey,
1636
- session_id: ctx.sessionId,
1637
- customer_id: ctx.customerId,
1638
- trace_id: traceId,
1639
- span_id: spanId,
1640
- parent_span_id: parentSpanId,
1641
- name: "streamObject",
1642
- kind: "llm",
1643
- model: modelId,
1644
- start_time: new Date(startTime).toISOString(),
1645
- end_time: new Date(endTime).toISOString(),
1646
- duration_ms: endTime - startTime,
1647
- status: "OK",
1648
- prompt_tokens: usage.promptTokens,
1649
- completion_tokens: usage.completionTokens,
1650
- total_tokens: usage.totalTokens,
1651
- attributes: captureContent2 ? attributes : void 0
1652
- }).catch(() => {
1653
- });
1654
- }).catch((error) => {
1655
- const endTime = Date.now();
1656
- sendTrace({
1657
- config_key: ctx.configKey,
1658
- session_id: ctx.sessionId,
1659
- customer_id: ctx.customerId,
1660
- trace_id: traceId,
1661
- span_id: spanId,
1662
- parent_span_id: parentSpanId,
1663
- name: "streamObject",
1664
- kind: "llm",
1665
- model: modelId,
1666
- start_time: new Date(startTime).toISOString(),
1667
- end_time: new Date(endTime).toISOString(),
1668
- duration_ms: endTime - startTime,
1669
- status: "ERROR",
1670
- error_message: error?.message
1671
- }).catch(() => {
1672
- });
1673
- });
1674
- }
1675
- if (result?.partialObjectStream) {
1676
- const originalStream = result.partialObjectStream;
1677
- const wrappedStream = (async function* () {
1678
- for await (const chunk of originalStream) {
1679
- if (!firstTokenTime) {
1680
- firstTokenTime = Date.now();
1681
- log3("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1682
- }
1683
- yield chunk;
1684
- }
1685
- })();
1686
- return new Proxy(result, {
1687
- get(target, prop) {
1688
- if (prop === "partialObjectStream") {
1689
- return wrappedStream;
1690
- }
1691
- return target[prop];
1692
- }
1693
- });
1694
- }
1695
- return result;
1696
- };
1697
- }
1698
-
1699
- // src/trace/wrappers/vercel-ai/index.ts
1700
- function wrapAISDK(ai, sessionCtx, options) {
1701
- const debug = options?.debug ?? false;
1702
- return {
1703
- generateText: createGenerateTextWrapper(ai, sessionCtx, debug),
1704
- streamText: createStreamTextWrapper(ai, sessionCtx, debug),
1705
- generateObject: ai.generateObject ? createGenerateObjectWrapper(ai, sessionCtx, debug) : void 0,
1706
- streamObject: ai.streamObject ? createStreamObjectWrapper(ai, sessionCtx, debug) : void 0
1707
- };
1708
- }
1709
-
1710
- // src/trace/wrappers/mastra.ts
1711
- function wrapMastraAgent(agent, sessionCtx) {
1712
- const originalGenerate = agent.generate.bind(agent);
1713
- const agentName = agent.name || "MastraAgent";
1714
- agent.generate = async function(...args) {
1715
- const ctx = sessionCtx || getSessionStorage().getStore() || getFallbackSession();
1716
- if (!ctx || !isInitialized()) {
1717
- return originalGenerate(...args);
1718
- }
1719
- const traceId = generateHexId(32);
1720
- const spanId = generateHexId(16);
1721
- const startTime = Date.now();
1722
- const messages = args[0] || [];
1723
- try {
1724
- const result = await originalGenerate(...args);
1725
- const endTime = Date.now();
1726
- const model = result?.model?.modelId || "unknown";
1727
- const toolCalls = [];
1728
- if (result?.steps?.length) {
1729
- for (const step of result.steps) {
1730
- if (step.toolCalls?.length) {
1731
- for (let i = 0; i < step.toolCalls.length; i++) {
1732
- const tc = step.toolCalls[i];
1733
- const tr = step.toolResults?.[i];
1734
- toolCalls.push({
1735
- name: tc.toolName,
1736
- arguments: tc.args,
1737
- result: tr?.result
1738
- });
1739
- }
1740
- }
1741
- }
1742
- }
1743
- const attributes = {
1744
- "gen_ai.system": "Mastra",
1745
- "gen_ai.request.model": model,
1746
- "gen_ai.response.model": model,
1747
- "fallom.source": "mastra-agent",
1748
- "llm.request.type": "chat"
1749
- };
1750
- if (Array.isArray(messages)) {
1751
- messages.forEach((msg, i) => {
1752
- attributes[`gen_ai.prompt.${i}.role`] = msg.role || "user";
1753
- attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1754
- });
1755
- }
1756
- if (result?.text) {
1757
- attributes["gen_ai.completion.0.role"] = "assistant";
1758
- attributes["gen_ai.completion.0.content"] = result.text;
1759
- attributes["gen_ai.completion.0.finish_reason"] = "stop";
1760
- }
1761
- if (toolCalls.length > 0) {
1762
- attributes["fallom.tool_calls"] = JSON.stringify(toolCalls);
1763
- toolCalls.forEach((tc, i) => {
1764
- attributes[`gen_ai.completion.0.tool_calls.${i}.name`] = tc.name;
1765
- attributes[`gen_ai.completion.0.tool_calls.${i}.type`] = "function";
1766
- attributes[`gen_ai.completion.0.tool_calls.${i}.arguments`] = JSON.stringify(tc.arguments);
1767
- });
1768
- }
1769
- if (result?.usage) {
1770
- attributes["gen_ai.usage.prompt_tokens"] = result.usage.promptTokens;
1771
- attributes["gen_ai.usage.completion_tokens"] = result.usage.completionTokens;
1772
- attributes["llm.usage.total_tokens"] = result.usage.totalTokens;
1773
- }
1774
- const traceData = {
1775
- config_key: ctx.configKey,
1776
- session_id: ctx.sessionId,
1777
- customer_id: ctx.customerId,
1778
- trace_id: traceId,
1779
- span_id: spanId,
1780
- name: `mastra.${agentName}.generate`,
1781
- kind: "client",
1782
- model,
1783
- start_time: new Date(startTime).toISOString(),
1784
- end_time: new Date(endTime).toISOString(),
1785
- duration_ms: endTime - startTime,
1786
- status: "OK",
1787
- prompt_tokens: result?.usage?.promptTokens,
1788
- completion_tokens: result?.usage?.completionTokens,
1789
- total_tokens: result?.usage?.totalTokens,
1790
- attributes
1791
- };
1792
- sendTrace(traceData).catch(() => {
1793
- });
1794
- return result;
1795
- } catch (error) {
1796
- const endTime = Date.now();
1797
- const traceData = {
1798
- config_key: ctx.configKey,
1799
- session_id: ctx.sessionId,
1800
- customer_id: ctx.customerId,
1801
- trace_id: traceId,
1802
- span_id: spanId,
1803
- name: `mastra.${agentName}.generate`,
1804
- kind: "client",
1805
- start_time: new Date(startTime).toISOString(),
1806
- end_time: new Date(endTime).toISOString(),
1807
- duration_ms: endTime - startTime,
1808
- status: "ERROR",
1809
- error_message: error instanceof Error ? error.message : String(error)
1810
- };
1811
- sendTrace(traceData).catch(() => {
1812
- });
1813
- throw error;
1814
- }
1815
- };
1816
- return agent;
1817
- }
1818
-
1819
- // src/trace/session.ts
1820
- var FallomSession = class {
1821
- constructor(options) {
1822
- this.ctx = {
1823
- configKey: options.configKey,
1824
- sessionId: options.sessionId,
1825
- customerId: options.customerId
1826
- };
1827
- }
1828
- /** Get the session context. */
1829
- getContext() {
1830
- return { ...this.ctx };
1831
- }
1832
- /**
1833
- * Get model assignment for this session (A/B testing).
1834
- */
1835
- async getModel(configKeyOrOptions, options) {
1836
- let configKey;
1837
- let opts;
1838
- if (typeof configKeyOrOptions === "string") {
1839
- configKey = configKeyOrOptions;
1840
- opts = options || {};
1841
- } else {
1842
- configKey = this.ctx.configKey;
1843
- opts = configKeyOrOptions || {};
1844
- }
1845
- const { get: get2 } = await import("./models-JIO5LVMB.mjs");
1846
- return get2(configKey, this.ctx.sessionId, opts);
1847
- }
1848
- /**
1849
- * Wrap a Vercel AI SDK model to trace all calls.
1850
- */
1851
- traceModel(model) {
1852
- const ctx = this.ctx;
1853
- const tracedModel = Object.create(model);
1854
- if (model.doGenerate) {
1855
- const originalDoGenerate = model.doGenerate.bind(model);
1856
- tracedModel.doGenerate = async function(...args) {
1857
- if (!isInitialized()) return originalDoGenerate(...args);
1858
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1859
- const traceId = traceCtx?.traceId || generateHexId(32);
1860
- const spanId = generateHexId(16);
1861
- const startTime = Date.now();
1862
- try {
1863
- const result = await originalDoGenerate(...args);
1864
- const endTime = Date.now();
1865
- const modelId = model.modelId || "unknown";
1866
- const usage = result?.usage || result?.rawResponse?.usage;
1867
- sendTrace({
1868
- config_key: ctx.configKey,
1869
- session_id: ctx.sessionId,
1870
- customer_id: ctx.customerId,
1871
- trace_id: traceId,
1872
- span_id: spanId,
1873
- parent_span_id: traceCtx?.parentSpanId,
1874
- name: "generateText",
1875
- kind: "llm",
1876
- model: modelId,
1877
- start_time: new Date(startTime).toISOString(),
1878
- end_time: new Date(endTime).toISOString(),
1879
- duration_ms: endTime - startTime,
1880
- status: "OK",
1881
- prompt_tokens: usage?.promptTokens,
1882
- completion_tokens: usage?.completionTokens,
1883
- total_tokens: usage?.totalTokens,
1884
- attributes: shouldCaptureContent() && usage ? { "fallom.raw.usage": JSON.stringify(usage) } : void 0
1885
- }).catch(() => {
1886
- });
1887
- return result;
1888
- } catch (error) {
1889
- const endTime = Date.now();
1890
- sendTrace({
1891
- config_key: ctx.configKey,
1892
- session_id: ctx.sessionId,
1893
- customer_id: ctx.customerId,
1894
- trace_id: traceId,
1895
- span_id: spanId,
1896
- parent_span_id: traceCtx?.parentSpanId,
1897
- name: "generateText",
1898
- kind: "llm",
1899
- model: model.modelId || "unknown",
1900
- start_time: new Date(startTime).toISOString(),
1901
- end_time: new Date(endTime).toISOString(),
1902
- duration_ms: endTime - startTime,
1903
- status: "ERROR",
1904
- error_message: error instanceof Error ? error.message : String(error)
1905
- }).catch(() => {
1906
- });
1907
- throw error;
1908
- }
1909
- };
1910
- }
1911
- if (model.doStream) {
1912
- const originalDoStream = model.doStream.bind(model);
1913
- tracedModel.doStream = async function(...args) {
1914
- if (!isInitialized()) return originalDoStream(...args);
1915
- const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1916
- const traceId = traceCtx?.traceId || generateHexId(32);
1917
- const spanId = generateHexId(16);
1918
- const startTime = Date.now();
1919
- const modelId = model.modelId || "unknown";
1920
- try {
1921
- const result = await originalDoStream(...args);
1922
- sendTrace({
1923
- config_key: ctx.configKey,
1924
- session_id: ctx.sessionId,
1925
- customer_id: ctx.customerId,
1926
- trace_id: traceId,
1927
- span_id: spanId,
1928
- parent_span_id: traceCtx?.parentSpanId,
1929
- name: "streamText",
1930
- kind: "llm",
1931
- model: modelId,
1932
- start_time: new Date(startTime).toISOString(),
1933
- end_time: new Date(Date.now()).toISOString(),
1934
- duration_ms: Date.now() - startTime,
1935
- status: "OK",
1936
- is_streaming: true
1937
- }).catch(() => {
1938
- });
1939
- return result;
1940
- } catch (error) {
1941
- sendTrace({
1942
- config_key: ctx.configKey,
1943
- session_id: ctx.sessionId,
1944
- customer_id: ctx.customerId,
1945
- trace_id: traceId,
1946
- span_id: spanId,
1947
- parent_span_id: traceCtx?.parentSpanId,
1948
- name: "streamText",
1949
- kind: "llm",
1950
- model: modelId,
1951
- start_time: new Date(startTime).toISOString(),
1952
- end_time: new Date(Date.now()).toISOString(),
1953
- duration_ms: Date.now() - startTime,
1954
- status: "ERROR",
1955
- error_message: error instanceof Error ? error.message : String(error),
1956
- is_streaming: true
1957
- }).catch(() => {
1958
- });
1959
- throw error;
1960
- }
1961
- };
1962
- }
1963
- return tracedModel;
1964
- }
1965
- /** Wrap OpenAI client. Delegates to shared wrapper. */
1966
- wrapOpenAI(client) {
1967
- return wrapOpenAI(client, this.ctx);
1968
- }
1969
- /** Wrap Anthropic client. Delegates to shared wrapper. */
1970
- wrapAnthropic(client) {
1971
- return wrapAnthropic(client, this.ctx);
1972
- }
1973
- /** Wrap Google AI model. Delegates to shared wrapper. */
1974
- wrapGoogleAI(model) {
1975
- return wrapGoogleAI(model, this.ctx);
1976
- }
1977
- /** Wrap Vercel AI SDK. Delegates to shared wrapper. */
1978
- wrapAISDK(ai, options) {
1979
- return wrapAISDK(ai, this.ctx, options);
1980
- }
1981
- /** Wrap Mastra agent. Delegates to shared wrapper. */
1982
- wrapMastraAgent(agent) {
1983
- return wrapMastraAgent(agent, this.ctx);
1984
- }
1985
- };
1986
- function session(options) {
1987
- return new FallomSession(options);
1988
- }
1989
-
1990
- // src/models.ts
1991
- var apiKey2 = null;
1992
- var baseUrl2 = "https://configs.fallom.com";
1993
- var initialized2 = false;
1994
- var syncInterval = null;
1995
- var debugMode2 = false;
1996
- var configCache = /* @__PURE__ */ new Map();
1997
- var SYNC_TIMEOUT = 2e3;
1998
- var RECORD_TIMEOUT = 1e3;
1999
- function log4(msg) {
2000
- if (debugMode2) {
2001
- console.log(`[Fallom] ${msg}`);
2002
- }
2003
- }
2004
- function init2(options = {}) {
2005
- apiKey2 = options.apiKey || process.env.FALLOM_API_KEY || null;
2006
- baseUrl2 = options.baseUrl || process.env.FALLOM_CONFIGS_URL || process.env.FALLOM_BASE_URL || "https://configs.fallom.com";
2007
- initialized2 = true;
2008
- if (!apiKey2) {
2009
- return;
2010
- }
2011
- fetchConfigs().catch(() => {
2012
- });
2013
- if (!syncInterval) {
2014
- syncInterval = setInterval(() => {
2015
- fetchConfigs().catch(() => {
2016
- });
2017
- }, 3e4);
2018
- syncInterval.unref();
2019
- }
2020
- }
2021
- function ensureInit() {
2022
- if (!initialized2) {
2023
- try {
2024
- init2();
2025
- } catch {
2026
- }
2027
- }
2028
- }
2029
- async function fetchConfigs(timeout = SYNC_TIMEOUT) {
2030
- if (!apiKey2) {
2031
- log4("_fetchConfigs: No API key, skipping");
2032
- return;
2033
- }
2034
- try {
2035
- log4(`Fetching configs from ${baseUrl2}/configs`);
2036
- const controller = new AbortController();
2037
- const timeoutId = setTimeout(() => controller.abort(), timeout);
2038
- const resp = await fetch(`${baseUrl2}/configs`, {
2039
- headers: { Authorization: `Bearer ${apiKey2}` },
2040
- signal: controller.signal
2041
- });
2042
- clearTimeout(timeoutId);
2043
- log4(`Response status: ${resp.status}`);
2044
- if (resp.ok) {
2045
- const data = await resp.json();
2046
- const configs = data.configs || [];
2047
- log4(`Got ${configs.length} configs: ${configs.map((c) => c.key)}`);
2048
- for (const c of configs) {
2049
- const key = c.key;
2050
- const version = c.version || 1;
2051
- log4(`Config '${key}' v${version}: ${JSON.stringify(c.variants)}`);
2052
- if (!configCache.has(key)) {
2053
- configCache.set(key, { versions: /* @__PURE__ */ new Map(), latest: null });
2054
- }
2055
- const cached = configCache.get(key);
2056
- cached.versions.set(version, c);
2057
- cached.latest = version;
2058
- }
2059
- } else {
2060
- log4(`Fetch failed: ${resp.statusText}`);
2061
- }
2062
- } catch (e) {
2063
- log4(`Fetch exception: ${e}`);
2064
- }
2065
- }
2066
- async function fetchSpecificVersion(configKey, version, timeout = SYNC_TIMEOUT) {
2067
- if (!apiKey2) return null;
2068
- try {
2069
- const controller = new AbortController();
2070
- const timeoutId = setTimeout(() => controller.abort(), timeout);
2071
- const resp = await fetch(
2072
- `${baseUrl2}/configs/${configKey}/version/${version}`,
2073
- {
2074
- headers: { Authorization: `Bearer ${apiKey2}` },
2075
- signal: controller.signal
2076
- }
2077
- );
2078
- clearTimeout(timeoutId);
2079
- if (resp.ok) {
2080
- const config = await resp.json();
2081
- if (!configCache.has(configKey)) {
2082
- configCache.set(configKey, { versions: /* @__PURE__ */ new Map(), latest: null });
2083
- }
2084
- configCache.get(configKey).versions.set(version, config);
2085
- return config;
2086
- }
2087
- } catch {
2088
- }
2089
- return null;
2090
- }
2091
- async function get(configKey, sessionId, options = {}) {
2092
- const { version, fallback, debug = false } = options;
2093
- debugMode2 = debug;
2094
- ensureInit();
2095
- log4(
2096
- `get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`
2097
- );
2098
- try {
2099
- let configData = configCache.get(configKey);
2100
- log4(
2101
- `Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`
2102
- );
2103
- if (!configData) {
2104
- log4("Not in cache, fetching...");
2105
- await fetchConfigs(SYNC_TIMEOUT);
2106
- configData = configCache.get(configKey);
2107
- log4(
2108
- `After fetch, cache lookup: ${configData ? "found" : "still not found"}`
2109
- );
2110
- }
2111
- if (!configData) {
2112
- log4(`Config not found, using fallback: ${fallback}`);
2113
- if (fallback) {
2114
- console.warn(
2115
- `[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`
2116
- );
2117
- return returnWithTrace(configKey, sessionId, fallback, 0);
2118
- }
2119
- throw new Error(
2120
- `Config '${configKey}' not found. Check that it exists in your Fallom dashboard.`
2121
- );
2122
- }
2123
- let config;
2124
- let targetVersion;
2125
- if (version !== void 0) {
2126
- config = configData.versions.get(version);
2127
- if (!config) {
2128
- config = await fetchSpecificVersion(configKey, version, SYNC_TIMEOUT) || void 0;
2129
- }
2130
- if (!config) {
2131
- if (fallback) {
2132
- console.warn(
2133
- `[Fallom WARNING] Config '${configKey}' version ${version} not found, using fallback: ${fallback}`
2134
- );
2135
- return returnWithTrace(configKey, sessionId, fallback, 0);
2136
- }
2137
- throw new Error(`Config '${configKey}' version ${version} not found.`);
2138
- }
2139
- targetVersion = version;
2140
- } else {
2141
- targetVersion = configData.latest;
2142
- config = configData.versions.get(targetVersion);
2143
- if (!config) {
2144
- if (fallback) {
2145
- console.warn(
2146
- `[Fallom WARNING] Config '${configKey}' has no cached version, using fallback: ${fallback}`
2147
- );
2148
- return returnWithTrace(configKey, sessionId, fallback, 0);
2149
- }
2150
- throw new Error(`Config '${configKey}' has no cached version.`);
2151
- }
2152
- }
2153
- const variantsRaw = config.variants;
2154
- const configVersion = config.version || targetVersion;
2155
- const variants = Array.isArray(variantsRaw) ? variantsRaw : Object.values(variantsRaw);
2156
- log4(
2157
- `Config found! Version: ${configVersion}, Variants: ${JSON.stringify(
2158
- variants
2159
- )}`
2160
- );
2161
- const hashBytes = createHash("md5").update(sessionId).digest();
2162
- const hashVal = hashBytes.readUInt32BE(0) % 1e6;
2163
- log4(`Session hash: ${hashVal} (out of 1,000,000)`);
2164
- let cumulative = 0;
2165
- let assignedModel = variants[variants.length - 1].model;
2166
- for (const v of variants) {
2167
- const oldCumulative = cumulative;
2168
- cumulative += v.weight * 1e4;
2169
- log4(
2170
- `Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`
2171
- );
2172
- if (hashVal < cumulative) {
2173
- assignedModel = v.model;
2174
- break;
2175
- }
2176
- }
2177
- log4(`\u2705 Assigned model: ${assignedModel}`);
2178
- return returnWithTrace(configKey, sessionId, assignedModel, configVersion);
2179
- } catch (e) {
2180
- if (e instanceof Error && e.message.includes("not found")) {
2181
- throw e;
2182
- }
2183
- if (fallback) {
2184
- console.warn(
2185
- `[Fallom WARNING] Error getting model for '${configKey}': ${e}. Using fallback: ${fallback}`
2186
- );
2187
- return returnWithTrace(configKey, sessionId, fallback, 0);
2188
- }
2189
- throw e;
2190
- }
2191
- }
2192
- function returnWithTrace(configKey, sessionId, model, version) {
2193
- try {
2194
- setSession(configKey, sessionId);
2195
- } catch {
2196
- }
2197
- if (version > 0) {
2198
- recordSession(configKey, version, sessionId, model).catch(() => {
2199
- });
2200
- }
2201
- return model;
2202
- }
2203
- async function recordSession(configKey, version, sessionId, model) {
2204
- if (!apiKey2) return;
2205
- try {
2206
- const controller = new AbortController();
2207
- const timeoutId = setTimeout(() => controller.abort(), RECORD_TIMEOUT);
2208
- await fetch(`${baseUrl2}/sessions`, {
2209
- method: "POST",
2210
- headers: {
2211
- Authorization: `Bearer ${apiKey2}`,
2212
- "Content-Type": "application/json"
2213
- },
2214
- body: JSON.stringify({
2215
- config_key: configKey,
2216
- config_version: version,
2217
- session_id: sessionId,
2218
- assigned_model: model
2219
- }),
2220
- signal: controller.signal
2221
- });
2222
- clearTimeout(timeoutId);
2223
- } catch {
2224
- }
2225
- }
2226
-
2227
- export {
2228
- __export,
2229
- init,
2230
- getSession,
2231
- init2,
2232
- get,
2233
- models_exports,
2234
- FallomSession,
2235
- session,
2236
- trace_exports
2237
- };