@kopai/core 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,971 @@
1
+ import { t as __exportAll } from "./chunk-Bo1DHCg-.mjs";
2
+ import z$1, { z } from "zod";
3
+
4
+ //#region src/denormalized-signals-zod.ts
5
+ var denormalized_signals_zod_exports = /* @__PURE__ */ __exportAll({
6
+ otelExponentialHistogramSchema: () => otelExponentialHistogramSchema,
7
+ otelGaugeSchema: () => otelGaugeSchema,
8
+ otelHistogramSchema: () => otelHistogramSchema,
9
+ otelLogsSchema: () => otelLogsSchema,
10
+ otelMetricsSchema: () => otelMetricsSchema,
11
+ otelSumSchema: () => otelSumSchema,
12
+ otelSummarySchema: () => otelSummarySchema,
13
+ otelTracesSchema: () => otelTracesSchema
14
+ });
15
+ const attributeValue = z.union([
16
+ z.string(),
17
+ z.number(),
18
+ z.boolean()
19
+ ]);
20
+ const otelTracesSchema = z.object({
21
+ SpanId: z.string().describe("Unique identifier for a span within a trace. The ID is an 8-byte array."),
22
+ Timestamp: z.string().describe("Start time of the span. UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Expressed as string in JSON."),
23
+ TraceId: z.string().describe("Unique identifier for a trace. All spans from the same trace share the same trace_id. The ID is a 16-byte array."),
24
+ Duration: z.string().optional().describe("Duration of the span in nanoseconds (end_time - start_time). Expressed as string in JSON."),
25
+ "Events.Attributes": z.array(z.record(z.string(), attributeValue)).optional().describe("Attribute key/value pairs on the event (one object per event)."),
26
+ "Events.Name": z.array(z.string()).optional().describe("Name of the event. Semantically required to be non-empty."),
27
+ "Events.Timestamp": z.array(z.string()).optional().describe("Time the event occurred (nanoseconds). Expressed as strings in JSON."),
28
+ "Links.Attributes": z.array(z.record(z.string(), attributeValue)).optional().describe("Attribute key/value pairs on the link (one object per link)."),
29
+ "Links.SpanId": z.array(z.string()).optional().describe("Unique identifier for the linked span. The ID is an 8-byte array."),
30
+ "Links.TraceId": z.array(z.string()).optional().describe("Unique identifier of a trace that the linked span is part of. The ID is a 16-byte array."),
31
+ "Links.TraceState": z.array(z.string()).optional().describe("The trace_state associated with the link."),
32
+ ParentSpanId: z.string().optional().describe("The span_id of this span's parent span. Empty if this is a root span."),
33
+ ResourceAttributes: z.record(z.string(), attributeValue).optional().describe("Attributes that describe the resource."),
34
+ ScopeName: z.string().optional().describe("Name denoting the instrumentation scope."),
35
+ ScopeVersion: z.string().optional().describe("Version of the instrumentation scope."),
36
+ ServiceName: z.string().optional().describe("Service name from resource attributes (service.name)."),
37
+ SpanAttributes: z.record(z.string(), attributeValue).optional().describe("Key/value pairs describing the span."),
38
+ SpanKind: z.string().optional().describe("Type of span (INTERNAL, SERVER, CLIENT, PRODUCER, CONSUMER). Used to identify relationships between spans."),
39
+ SpanName: z.string().optional().describe("Description of the span's operation. E.g., qualified method name or file name with line number."),
40
+ StatusCode: z.string().optional().describe("Status code (UNSET, OK, ERROR)."),
41
+ StatusMessage: z.string().optional().describe("Developer-facing human readable error message."),
42
+ TraceState: z.string().optional().describe("Conveys information about request position in multiple distributed tracing graphs. W3C trace-context format.")
43
+ });
44
+ const otelLogsSchema = z.object({
45
+ Timestamp: z.string().describe("Time when the event occurred. UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Expressed as string in JSON."),
46
+ Body: z.string().optional().describe("Body of the log record. Can be a human-readable string message or structured data."),
47
+ LogAttributes: z.record(z.string(), attributeValue).optional().describe("Additional attributes that describe the specific event occurrence."),
48
+ ResourceAttributes: z.record(z.string(), attributeValue).optional().describe("Attributes that describe the resource."),
49
+ ResourceSchemaUrl: z.string().optional().describe("Schema URL for the resource data."),
50
+ ScopeAttributes: z.record(z.string(), attributeValue).optional().describe("Attributes of the instrumentation scope."),
51
+ ScopeName: z.string().optional().describe("Name denoting the instrumentation scope."),
52
+ ScopeSchemaUrl: z.string().optional().describe("Schema URL for the scope data."),
53
+ ScopeVersion: z.string().optional().describe("Version of the instrumentation scope."),
54
+ ServiceName: z.string().optional().describe("Service name from resource attributes (service.name)."),
55
+ SeverityNumber: z.number().optional().describe("Numerical value of the severity, normalized to values described in Log Data Model."),
56
+ SeverityText: z.string().optional().describe("Severity text (also known as log level). Original string representation as known at the source."),
57
+ SpanId: z.string().optional().describe("Unique identifier for a span within a trace. The ID is an 8-byte array."),
58
+ TraceFlags: z.number().optional().describe("Bit field. 8 least significant bits are trace flags as defined in W3C Trace Context."),
59
+ TraceId: z.string().optional().describe("Unique identifier for a trace. All logs from the same trace share the same trace_id. The ID is a 16-byte array.")
60
+ });
61
+ const metricsBaseSchema = z.object({
62
+ TimeUnix: z.string().describe("Time when the data point was recorded. UNIX Epoch time in nanoseconds. Expressed as string in JSON."),
63
+ StartTimeUnix: z.string().describe("Start time for cumulative/delta metrics. UNIX Epoch time in nanoseconds. Expressed as string in JSON."),
64
+ Attributes: z.record(z.string(), attributeValue).optional().describe("Key/value pairs that uniquely identify the timeseries."),
65
+ MetricName: z.string().optional().describe("The name of the metric."),
66
+ MetricDescription: z.string().optional().describe("A description of the metric, which can be used in documentation."),
67
+ MetricUnit: z.string().optional().describe("The unit in which the metric value is reported (UCUM format)."),
68
+ ResourceAttributes: z.record(z.string(), attributeValue).optional().describe("Attributes that describe the resource."),
69
+ ResourceSchemaUrl: z.string().optional().describe("Schema URL for the resource data."),
70
+ ScopeAttributes: z.record(z.string(), attributeValue).optional().describe("Attributes of the instrumentation scope."),
71
+ ScopeDroppedAttrCount: z.number().optional().describe("Number of attributes dropped from the scope."),
72
+ ScopeName: z.string().optional().describe("Name denoting the instrumentation scope."),
73
+ ScopeSchemaUrl: z.string().optional().describe("Schema URL for the scope data."),
74
+ ScopeVersion: z.string().optional().describe("Version of the instrumentation scope."),
75
+ ServiceName: z.string().optional().describe("Service name from resource attributes (service.name)."),
76
+ "Exemplars.FilteredAttributes": z.array(z.record(z.string(), attributeValue)).optional().describe("Filtered attributes of exemplars."),
77
+ "Exemplars.SpanId": z.array(z.string()).optional().describe("Span IDs associated with exemplars."),
78
+ "Exemplars.TimeUnix": z.array(z.string()).optional().describe("Timestamps of exemplars (nanoseconds). Expressed as strings in JSON."),
79
+ "Exemplars.TraceId": z.array(z.string()).optional().describe("Trace IDs associated with exemplars."),
80
+ "Exemplars.Value": z.array(z.number()).optional().describe("Values of exemplars.")
81
+ });
82
+ const otelGaugeSchema = metricsBaseSchema.extend({
83
+ MetricType: z.literal("Gauge").describe("Gauge metric type."),
84
+ Value: z.number().describe("Current scalar value."),
85
+ Flags: z.number().optional().describe("Flags that apply to this data point (see DataPointFlags).")
86
+ });
87
+ const otelSumSchema = metricsBaseSchema.extend({
88
+ MetricType: z.literal("Sum").describe("Sum metric type."),
89
+ Value: z.number().describe("Scalar sum value."),
90
+ Flags: z.number().optional().describe("Flags that apply to this data point (see DataPointFlags)."),
91
+ AggTemporality: z.string().optional().describe("Aggregation temporality (DELTA or CUMULATIVE)."),
92
+ IsMonotonic: z.number().optional().describe("Whether the sum is monotonic (0 = false, 1 = true).")
93
+ });
94
+ const otelHistogramSchema = metricsBaseSchema.extend({
95
+ MetricType: z.literal("Histogram").describe("Histogram metric type."),
96
+ Count: z.number().optional().describe("Number of values in the histogram."),
97
+ Sum: z.number().optional().describe("Sum of all values."),
98
+ Min: z.number().nullable().optional().describe("Minimum value recorded."),
99
+ Max: z.number().nullable().optional().describe("Maximum value recorded."),
100
+ BucketCounts: z.array(z.number()).optional().describe("Count of values in each bucket."),
101
+ ExplicitBounds: z.array(z.number()).optional().describe("Bucket boundaries."),
102
+ AggTemporality: z.string().optional().describe("Aggregation temporality (DELTA or CUMULATIVE).")
103
+ });
104
+ const otelExponentialHistogramSchema = metricsBaseSchema.extend({
105
+ MetricType: z.literal("ExponentialHistogram").describe("Exponential histogram metric type."),
106
+ Count: z.number().optional().describe("Number of values in the histogram."),
107
+ Sum: z.number().optional().describe("Sum of all values."),
108
+ Min: z.number().nullable().optional().describe("Minimum value recorded."),
109
+ Max: z.number().nullable().optional().describe("Maximum value recorded."),
110
+ Scale: z.number().optional().describe("Resolution of the histogram. Boundaries are at powers of base."),
111
+ ZeroCount: z.number().optional().describe("Count of values that are exactly zero."),
112
+ PositiveBucketCounts: z.array(z.number()).optional().describe("Counts for positive value buckets."),
113
+ PositiveOffset: z.number().optional().describe("Offset for positive bucket indices."),
114
+ NegativeBucketCounts: z.array(z.number()).optional().describe("Counts for negative value buckets."),
115
+ NegativeOffset: z.number().optional().describe("Offset for negative bucket indices."),
116
+ AggTemporality: z.string().optional().describe("Aggregation temporality (DELTA or CUMULATIVE).")
117
+ });
118
+ const otelSummarySchema = metricsBaseSchema.extend({
119
+ MetricType: z.literal("Summary").describe("Summary metric type."),
120
+ Count: z.number().optional().describe("Number of values in the summary."),
121
+ Sum: z.number().optional().describe("Sum of all values."),
122
+ "ValueAtQuantiles.Quantile": z.array(z.number()).optional().describe("Quantile values (0.0 to 1.0)."),
123
+ "ValueAtQuantiles.Value": z.array(z.number()).optional().describe("Values at each quantile.")
124
+ });
125
+ const otelMetricsSchema = z.discriminatedUnion("MetricType", [
126
+ otelGaugeSchema,
127
+ otelSumSchema,
128
+ otelHistogramSchema,
129
+ otelExponentialHistogramSchema,
130
+ otelSummarySchema
131
+ ]);
132
+
133
+ //#endregion
134
+ //#region src/telemetry-datasource.ts
135
+ var telemetry_datasource_exports = {};
136
+
137
+ //#endregion
138
+ //#region src/otlp-generated.ts
139
+ var otlp_generated_exports = /* @__PURE__ */ __exportAll({
140
+ AggregationTemporality: () => AggregationTemporality,
141
+ DataPointFlags: () => DataPointFlags,
142
+ LogRecordFlags: () => LogRecordFlags,
143
+ SeverityNumber: () => SeverityNumber,
144
+ SpanFlags: () => SpanFlags,
145
+ SpanKind: () => SpanKind,
146
+ StatusCode: () => StatusCode
147
+ });
148
+ /**
149
+ * SpanFlags represents constants used to interpret the
150
+ * Span.flags field, which is protobuf 'fixed32' type and is to
151
+ * be used as bit-fields. Each non-zero value defined in this enum is
152
+ * a bit-mask. To extract the bit-field, for example, use an
153
+ * expression like:
154
+ *
155
+ * (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
156
+ *
157
+ * See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
158
+ *
159
+ * Note that Span flags were introduced in version 1.1 of the
160
+ * OpenTelemetry protocol. Older Span producers do not set this
161
+ * field, consequently consumers should not rely on the absence of a
162
+ * particular flag bit to indicate the presence of a particular feature.
163
+ */
164
+ let SpanFlags = /* @__PURE__ */ function(SpanFlags) {
165
+ /**
166
+ * SPAN_FLAGS_DO_NOT_USE - The zero value for the enum. Should not be used for comparisons.
167
+ * Instead use bitwise "and" with the appropriate mask as shown above.
168
+ */
169
+ SpanFlags[SpanFlags["SPAN_FLAGS_DO_NOT_USE"] = 0] = "SPAN_FLAGS_DO_NOT_USE";
170
+ /** SPAN_FLAGS_TRACE_FLAGS_MASK - Bits 0-7 are used for trace flags. */
171
+ SpanFlags[SpanFlags["SPAN_FLAGS_TRACE_FLAGS_MASK"] = 255] = "SPAN_FLAGS_TRACE_FLAGS_MASK";
172
+ /**
173
+ * SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK - Bits 8 and 9 are used to indicate that the parent span or link span is remote.
174
+ * Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
175
+ * Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
176
+ */
177
+ SpanFlags[SpanFlags["SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK"] = 256] = "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK";
178
+ SpanFlags[SpanFlags["SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK"] = 512] = "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK";
179
+ SpanFlags[SpanFlags["UNRECOGNIZED"] = -1] = "UNRECOGNIZED";
180
+ return SpanFlags;
181
+ }({});
182
+ /**
183
+ * SpanKind is the type of span. Can be used to specify additional relationships between spans
184
+ * in addition to a parent/child relationship.
185
+ */
186
+ let SpanKind = /* @__PURE__ */ function(SpanKind) {
187
+ /**
188
+ * SPAN_KIND_UNSPECIFIED - Unspecified. Do NOT use as default.
189
+ * Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
190
+ */
191
+ SpanKind[SpanKind["SPAN_KIND_UNSPECIFIED"] = 0] = "SPAN_KIND_UNSPECIFIED";
192
+ /**
193
+ * SPAN_KIND_INTERNAL - Indicates that the span represents an internal operation within an application,
194
+ * as opposed to an operation happening at the boundaries. Default value.
195
+ */
196
+ SpanKind[SpanKind["SPAN_KIND_INTERNAL"] = 1] = "SPAN_KIND_INTERNAL";
197
+ /**
198
+ * SPAN_KIND_SERVER - Indicates that the span covers server-side handling of an RPC or other
199
+ * remote network request.
200
+ */
201
+ SpanKind[SpanKind["SPAN_KIND_SERVER"] = 2] = "SPAN_KIND_SERVER";
202
+ /** SPAN_KIND_CLIENT - Indicates that the span describes a request to some remote service. */
203
+ SpanKind[SpanKind["SPAN_KIND_CLIENT"] = 3] = "SPAN_KIND_CLIENT";
204
+ /**
205
+ * SPAN_KIND_PRODUCER - Indicates that the span describes a producer sending a message to a broker.
206
+ * Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
207
+ * between producer and consumer spans. A PRODUCER span ends when the message was accepted
208
+ * by the broker while the logical processing of the message might span a much longer time.
209
+ */
210
+ SpanKind[SpanKind["SPAN_KIND_PRODUCER"] = 4] = "SPAN_KIND_PRODUCER";
211
+ /**
212
+ * SPAN_KIND_CONSUMER - Indicates that the span describes consumer receiving a message from a broker.
213
+ * Like the PRODUCER kind, there is often no direct critical path latency relationship
214
+ * between producer and consumer spans.
215
+ */
216
+ SpanKind[SpanKind["SPAN_KIND_CONSUMER"] = 5] = "SPAN_KIND_CONSUMER";
217
+ SpanKind[SpanKind["UNRECOGNIZED"] = -1] = "UNRECOGNIZED";
218
+ return SpanKind;
219
+ }({});
220
+ /**
221
+ * For the semantics of status codes see
222
+ * https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
223
+ */
224
+ let StatusCode = /* @__PURE__ */ function(StatusCode) {
225
+ /** STATUS_CODE_UNSET - The default status. */
226
+ StatusCode[StatusCode["STATUS_CODE_UNSET"] = 0] = "STATUS_CODE_UNSET";
227
+ /**
228
+ * STATUS_CODE_OK - The Span has been validated by an Application developer or Operator to
229
+ * have completed successfully.
230
+ */
231
+ StatusCode[StatusCode["STATUS_CODE_OK"] = 1] = "STATUS_CODE_OK";
232
+ /** STATUS_CODE_ERROR - The Span contains an error. */
233
+ StatusCode[StatusCode["STATUS_CODE_ERROR"] = 2] = "STATUS_CODE_ERROR";
234
+ StatusCode[StatusCode["UNRECOGNIZED"] = -1] = "UNRECOGNIZED";
235
+ return StatusCode;
236
+ }({});
237
+ /** Possible values for LogRecord.SeverityNumber. */
238
+ let SeverityNumber = /* @__PURE__ */ function(SeverityNumber) {
239
+ /** SEVERITY_NUMBER_UNSPECIFIED - UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. */
240
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_UNSPECIFIED"] = 0] = "SEVERITY_NUMBER_UNSPECIFIED";
241
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_TRACE"] = 1] = "SEVERITY_NUMBER_TRACE";
242
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_TRACE2"] = 2] = "SEVERITY_NUMBER_TRACE2";
243
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_TRACE3"] = 3] = "SEVERITY_NUMBER_TRACE3";
244
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_TRACE4"] = 4] = "SEVERITY_NUMBER_TRACE4";
245
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_DEBUG"] = 5] = "SEVERITY_NUMBER_DEBUG";
246
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_DEBUG2"] = 6] = "SEVERITY_NUMBER_DEBUG2";
247
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_DEBUG3"] = 7] = "SEVERITY_NUMBER_DEBUG3";
248
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_DEBUG4"] = 8] = "SEVERITY_NUMBER_DEBUG4";
249
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_INFO"] = 9] = "SEVERITY_NUMBER_INFO";
250
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_INFO2"] = 10] = "SEVERITY_NUMBER_INFO2";
251
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_INFO3"] = 11] = "SEVERITY_NUMBER_INFO3";
252
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_INFO4"] = 12] = "SEVERITY_NUMBER_INFO4";
253
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_WARN"] = 13] = "SEVERITY_NUMBER_WARN";
254
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_WARN2"] = 14] = "SEVERITY_NUMBER_WARN2";
255
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_WARN3"] = 15] = "SEVERITY_NUMBER_WARN3";
256
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_WARN4"] = 16] = "SEVERITY_NUMBER_WARN4";
257
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_ERROR"] = 17] = "SEVERITY_NUMBER_ERROR";
258
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_ERROR2"] = 18] = "SEVERITY_NUMBER_ERROR2";
259
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_ERROR3"] = 19] = "SEVERITY_NUMBER_ERROR3";
260
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_ERROR4"] = 20] = "SEVERITY_NUMBER_ERROR4";
261
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_FATAL"] = 21] = "SEVERITY_NUMBER_FATAL";
262
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_FATAL2"] = 22] = "SEVERITY_NUMBER_FATAL2";
263
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_FATAL3"] = 23] = "SEVERITY_NUMBER_FATAL3";
264
+ SeverityNumber[SeverityNumber["SEVERITY_NUMBER_FATAL4"] = 24] = "SEVERITY_NUMBER_FATAL4";
265
+ SeverityNumber[SeverityNumber["UNRECOGNIZED"] = -1] = "UNRECOGNIZED";
266
+ return SeverityNumber;
267
+ }({});
268
+ /**
269
+ * LogRecordFlags represents constants used to interpret the
270
+ * LogRecord.flags field, which is protobuf 'fixed32' type and is to
271
+ * be used as bit-fields. Each non-zero value defined in this enum is
272
+ * a bit-mask. To extract the bit-field, for example, use an
273
+ * expression like:
274
+ *
275
+ * (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK)
276
+ */
277
+ let LogRecordFlags = /* @__PURE__ */ function(LogRecordFlags) {
278
+ /**
279
+ * LOG_RECORD_FLAGS_DO_NOT_USE - The zero value for the enum. Should not be used for comparisons.
280
+ * Instead use bitwise "and" with the appropriate mask as shown above.
281
+ */
282
+ LogRecordFlags[LogRecordFlags["LOG_RECORD_FLAGS_DO_NOT_USE"] = 0] = "LOG_RECORD_FLAGS_DO_NOT_USE";
283
+ /** LOG_RECORD_FLAGS_TRACE_FLAGS_MASK - Bits 0-7 are used for trace flags. */
284
+ LogRecordFlags[LogRecordFlags["LOG_RECORD_FLAGS_TRACE_FLAGS_MASK"] = 255] = "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK";
285
+ LogRecordFlags[LogRecordFlags["UNRECOGNIZED"] = -1] = "UNRECOGNIZED";
286
+ return LogRecordFlags;
287
+ }({});
288
+ /**
289
+ * AggregationTemporality defines how a metric aggregator reports aggregated
290
+ * values. It describes how those values relate to the time interval over
291
+ * which they are aggregated.
292
+ */
293
+ let AggregationTemporality = /* @__PURE__ */ function(AggregationTemporality) {
294
+ /** AGGREGATION_TEMPORALITY_UNSPECIFIED - UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */
295
+ AggregationTemporality[AggregationTemporality["AGGREGATION_TEMPORALITY_UNSPECIFIED"] = 0] = "AGGREGATION_TEMPORALITY_UNSPECIFIED";
296
+ /**
297
+ * AGGREGATION_TEMPORALITY_DELTA - DELTA is an AggregationTemporality for a metric aggregator which reports
298
+ * changes since last report time. Successive metrics contain aggregation of
299
+ * values from continuous and non-overlapping intervals.
300
+ *
301
+ * The values for a DELTA metric are based only on the time interval
302
+ * associated with one measurement cycle. There is no dependency on
303
+ * previous measurements like is the case for CUMULATIVE metrics.
304
+ *
305
+ * For example, consider a system measuring the number of requests that
306
+ * it receives and reports the sum of these requests every second as a
307
+ * DELTA metric:
308
+ *
309
+ * 1. The system starts receiving at time=t_0.
310
+ * 2. A request is received, the system measures 1 request.
311
+ * 3. A request is received, the system measures 1 request.
312
+ * 4. A request is received, the system measures 1 request.
313
+ * 5. The 1 second collection cycle ends. A metric is exported for the
314
+ * number of requests received over the interval of time t_0 to
315
+ * t_0+1 with a value of 3.
316
+ * 6. A request is received, the system measures 1 request.
317
+ * 7. A request is received, the system measures 1 request.
318
+ * 8. The 1 second collection cycle ends. A metric is exported for the
319
+ * number of requests received over the interval of time t_0+1 to
320
+ * t_0+2 with a value of 2.
321
+ */
322
+ AggregationTemporality[AggregationTemporality["AGGREGATION_TEMPORALITY_DELTA"] = 1] = "AGGREGATION_TEMPORALITY_DELTA";
323
+ /**
324
+ * AGGREGATION_TEMPORALITY_CUMULATIVE - CUMULATIVE is an AggregationTemporality for a metric aggregator which
325
+ * reports changes since a fixed start time. This means that current values
326
+ * of a CUMULATIVE metric depend on all previous measurements since the
327
+ * start time. Because of this, the sender is required to retain this state
328
+ * in some form. If this state is lost or invalidated, the CUMULATIVE metric
329
+ * values MUST be reset and a new fixed start time following the last
330
+ * reported measurement time sent MUST be used.
331
+ *
332
+ * For example, consider a system measuring the number of requests that
333
+ * it receives and reports the sum of these requests every second as a
334
+ * CUMULATIVE metric:
335
+ *
336
+ * 1. The system starts receiving at time=t_0.
337
+ * 2. A request is received, the system measures 1 request.
338
+ * 3. A request is received, the system measures 1 request.
339
+ * 4. A request is received, the system measures 1 request.
340
+ * 5. The 1 second collection cycle ends. A metric is exported for the
341
+ * number of requests received over the interval of time t_0 to
342
+ * t_0+1 with a value of 3.
343
+ * 6. A request is received, the system measures 1 request.
344
+ * 7. A request is received, the system measures 1 request.
345
+ * 8. The 1 second collection cycle ends. A metric is exported for the
346
+ * number of requests received over the interval of time t_0 to
347
+ * t_0+2 with a value of 5.
348
+ * 9. The system experiences a fault and loses state.
349
+ * 10. The system recovers and resumes receiving at time=t_1.
350
+ * 11. A request is received, the system measures 1 request.
351
+ * 12. The 1 second collection cycle ends. A metric is exported for the
352
+ * number of requests received over the interval of time t_1 to
353
+ * t_0+1 with a value of 1.
354
+ *
355
+ * Note: Even though, when reporting changes since last report time, using
356
+ * CUMULATIVE is valid, it is not recommended. This may cause problems for
357
+ * systems that do not use start_time to determine when the aggregation
358
+ * value was reset (e.g. Prometheus).
359
+ */
360
+ AggregationTemporality[AggregationTemporality["AGGREGATION_TEMPORALITY_CUMULATIVE"] = 2] = "AGGREGATION_TEMPORALITY_CUMULATIVE";
361
+ AggregationTemporality[AggregationTemporality["UNRECOGNIZED"] = -1] = "UNRECOGNIZED";
362
+ return AggregationTemporality;
363
+ }({});
364
+ /**
365
+ * DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
366
+ * bit-field representing 32 distinct boolean flags. Each flag defined in this
367
+ * enum is a bit-mask. To test the presence of a single flag in the flags of
368
+ * a data point, for example, use an expression like:
369
+ *
370
+ * (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
371
+ */
372
+ let DataPointFlags = /* @__PURE__ */ function(DataPointFlags) {
373
+ /**
374
+ * DATA_POINT_FLAGS_DO_NOT_USE - The zero value for the enum. Should not be used for comparisons.
375
+ * Instead use bitwise "and" with the appropriate mask as shown above.
376
+ */
377
+ DataPointFlags[DataPointFlags["DATA_POINT_FLAGS_DO_NOT_USE"] = 0] = "DATA_POINT_FLAGS_DO_NOT_USE";
378
+ /**
379
+ * DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK - This DataPoint is valid but has no recorded value. This value
380
+ * SHOULD be used to reflect explicitly missing data in a series, as
381
+ * for an equivalent to the Prometheus "staleness marker".
382
+ */
383
+ DataPointFlags[DataPointFlags["DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK"] = 1] = "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK";
384
+ DataPointFlags[DataPointFlags["UNRECOGNIZED"] = -1] = "UNRECOGNIZED";
385
+ return DataPointFlags;
386
+ }({});
387
+
388
+ //#endregion
389
+ //#region src/otlp-metrics-generated.ts
390
+ var otlp_metrics_generated_exports = {};
391
+
392
+ //#endregion
393
+ //#region src/otlp-zod.ts
394
+ var otlp_zod_exports = /* @__PURE__ */ __exportAll({
395
+ aggregationTemporalitySchema: () => aggregationTemporalitySchema,
396
+ anyValueSchema: () => anyValueSchema,
397
+ arrayValueSchema: () => arrayValueSchema,
398
+ dataPointFlagsSchema: () => dataPointFlagsSchema,
399
+ entityRefSchema: () => entityRefSchema,
400
+ exponentialHistogramDataPointBucketsSchema: () => exponentialHistogramDataPointBucketsSchema$1,
401
+ instrumentationScopeSchema: () => instrumentationScopeSchema,
402
+ keyValueListSchema: () => keyValueListSchema,
403
+ keyValueSchema: () => keyValueSchema,
404
+ logRecordFlagsSchema: () => logRecordFlagsSchema,
405
+ logRecordSchema: () => logRecordSchema,
406
+ logsDataSchema: () => logsDataSchema,
407
+ resourceLogsSchema: () => resourceLogsSchema,
408
+ resourceSchema: () => resourceSchema,
409
+ resourceSpansSchema: () => resourceSpansSchema,
410
+ scopeLogsSchema: () => scopeLogsSchema,
411
+ scopeSpansSchema: () => scopeSpansSchema,
412
+ severityNumberSchema: () => severityNumberSchema,
413
+ spanEventSchema: () => spanEventSchema,
414
+ spanFlagsSchema: () => spanFlagsSchema,
415
+ spanKindSchema: () => spanKindSchema,
416
+ spanLinkSchema: () => spanLinkSchema,
417
+ spanSchema: () => spanSchema,
418
+ statusCodeSchema: () => statusCodeSchema,
419
+ statusSchema: () => statusSchema,
420
+ summaryDataPointSchema: () => summaryDataPointSchema$1,
421
+ summaryDataPointValueAtQuantileSchema: () => summaryDataPointValueAtQuantileSchema$1,
422
+ summarySchema: () => summarySchema$1,
423
+ tracesDataSchema: () => tracesDataSchema
424
+ });
425
+ const entityRefSchema = z.object({
426
+ schemaUrl: z.union([z.string(), z.undefined()]).optional(),
427
+ type: z.union([z.string(), z.undefined()]).optional(),
428
+ idKeys: z.union([z.array(z.string()), z.undefined()]).optional(),
429
+ descriptionKeys: z.union([z.array(z.string()), z.undefined()]).optional()
430
+ });
431
+ const spanFlagsSchema = z.enum(SpanFlags);
432
+ const spanKindSchema = z.enum(SpanKind);
433
+ const statusCodeSchema = z.enum(StatusCode);
434
+ const severityNumberSchema = z.enum(SeverityNumber);
435
+ const logRecordFlagsSchema = z.enum(LogRecordFlags);
436
+ const aggregationTemporalitySchema = z.union([z.enum(AggregationTemporality), z.number()]);
437
+ const dataPointFlagsSchema = z.union([z.enum(DataPointFlags), z.number()]);
438
+ const exponentialHistogramDataPointBucketsSchema$1 = z.object({
439
+ offset: z.union([z.number(), z.undefined()]).optional(),
440
+ bucketCounts: z.union([z.array(z.union([z.string(), z.number()])), z.undefined()]).optional()
441
+ });
442
+ const summaryDataPointValueAtQuantileSchema$1 = z.object({
443
+ quantile: z.union([z.number(), z.undefined()]).optional(),
444
+ value: z.union([z.number(), z.undefined()]).optional()
445
+ });
446
+ const statusSchema = z.object({
447
+ message: z.union([z.string(), z.undefined()]).optional(),
448
+ code: z.union([statusCodeSchema, z.undefined()]).optional()
449
+ });
450
+ const anyValueSchema = z.lazy(() => z.object({
451
+ stringValue: z.union([z.string(), z.undefined()]).optional(),
452
+ boolValue: z.union([z.boolean(), z.undefined()]).optional(),
453
+ intValue: z.union([
454
+ z.string(),
455
+ z.number(),
456
+ z.undefined()
457
+ ]).optional(),
458
+ doubleValue: z.union([z.number(), z.undefined()]).optional(),
459
+ arrayValue: z.union([arrayValueSchema, z.undefined()]).optional(),
460
+ kvlistValue: z.union([keyValueListSchema, z.undefined()]).optional(),
461
+ bytesValue: z.union([z.string(), z.undefined()]).optional()
462
+ }));
463
+ const arrayValueSchema = z.lazy(() => z.object({ values: z.union([z.array(anyValueSchema), z.undefined()]).optional() }));
464
+ const keyValueListSchema = z.lazy(() => z.object({ values: z.union([z.array(keyValueSchema), z.undefined()]).optional() }));
465
+ const keyValueSchema = z.lazy(() => z.object({
466
+ key: z.union([z.string(), z.undefined()]).optional(),
467
+ value: z.union([anyValueSchema, z.undefined()]).optional()
468
+ }));
469
+ const instrumentationScopeSchema = z.lazy(() => z.object({
470
+ name: z.union([z.string(), z.undefined()]).optional(),
471
+ version: z.union([z.string(), z.undefined()]).optional(),
472
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
473
+ droppedAttributesCount: z.union([z.number(), z.undefined()]).optional()
474
+ }));
475
+ const resourceSchema = z.lazy(() => z.object({
476
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
477
+ droppedAttributesCount: z.union([z.number(), z.undefined()]).optional(),
478
+ entityRefs: z.union([z.array(entityRefSchema), z.undefined()]).optional()
479
+ }));
480
+ const tracesDataSchema = z.lazy(() => z.object({ resourceSpans: z.union([z.array(resourceSpansSchema), z.undefined()]).optional() }));
481
+ const resourceSpansSchema = z.lazy(() => z.object({
482
+ resource: z.union([resourceSchema, z.undefined()]).optional(),
483
+ scopeSpans: z.union([z.array(scopeSpansSchema), z.undefined()]).optional(),
484
+ schemaUrl: z.union([z.string(), z.undefined()]).optional()
485
+ }));
486
+ const scopeSpansSchema = z.lazy(() => z.object({
487
+ scope: z.union([instrumentationScopeSchema, z.undefined()]).optional(),
488
+ spans: z.union([z.array(spanSchema), z.undefined()]).optional(),
489
+ schemaUrl: z.union([z.string(), z.undefined()]).optional()
490
+ }));
491
+ const spanSchema = z.lazy(() => z.object({
492
+ traceId: z.union([z.string(), z.undefined()]).optional(),
493
+ spanId: z.union([z.string(), z.undefined()]).optional(),
494
+ traceState: z.union([z.string(), z.undefined()]).optional(),
495
+ parentSpanId: z.union([z.string(), z.undefined()]).optional(),
496
+ flags: z.union([z.number(), z.undefined()]).optional(),
497
+ name: z.union([z.string(), z.undefined()]).optional(),
498
+ kind: z.union([spanKindSchema, z.undefined()]).optional(),
499
+ startTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
500
+ endTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
501
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
502
+ droppedAttributesCount: z.union([z.number(), z.undefined()]).optional(),
503
+ events: z.union([z.array(spanEventSchema), z.undefined()]).optional(),
504
+ droppedEventsCount: z.union([z.number(), z.undefined()]).optional(),
505
+ links: z.union([z.array(spanLinkSchema), z.undefined()]).optional(),
506
+ droppedLinksCount: z.union([z.number(), z.undefined()]).optional(),
507
+ status: z.union([statusSchema, z.undefined()]).optional()
508
+ }));
509
+ const spanEventSchema = z.lazy(() => z.object({
510
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
511
+ name: z.union([z.string(), z.undefined()]).optional(),
512
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
513
+ droppedAttributesCount: z.union([z.number(), z.undefined()]).optional()
514
+ }));
515
+ const spanLinkSchema = z.lazy(() => z.object({
516
+ traceId: z.union([z.string(), z.undefined()]).optional(),
517
+ spanId: z.union([z.string(), z.undefined()]).optional(),
518
+ traceState: z.union([z.string(), z.undefined()]).optional(),
519
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
520
+ droppedAttributesCount: z.union([z.number(), z.undefined()]).optional(),
521
+ flags: z.union([z.number(), z.undefined()]).optional()
522
+ }));
523
+ const logsDataSchema = z.lazy(() => z.object({ resourceLogs: z.union([z.array(resourceLogsSchema), z.undefined()]).optional() }));
524
+ const resourceLogsSchema = z.lazy(() => z.object({
525
+ resource: z.union([resourceSchema, z.undefined()]).optional(),
526
+ scopeLogs: z.union([z.array(scopeLogsSchema), z.undefined()]).optional(),
527
+ schemaUrl: z.union([z.string(), z.undefined()]).optional()
528
+ }));
529
+ const scopeLogsSchema = z.lazy(() => z.object({
530
+ scope: z.union([instrumentationScopeSchema, z.undefined()]).optional(),
531
+ logRecords: z.union([z.array(logRecordSchema), z.undefined()]).optional(),
532
+ schemaUrl: z.union([z.string(), z.undefined()]).optional()
533
+ }));
534
+ const logRecordSchema = z.lazy(() => z.object({
535
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
536
+ observedTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
537
+ severityNumber: z.union([severityNumberSchema, z.undefined()]).optional(),
538
+ severityText: z.union([z.string(), z.undefined()]).optional(),
539
+ body: z.union([anyValueSchema, z.undefined()]).optional(),
540
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
541
+ droppedAttributesCount: z.union([z.number(), z.undefined()]).optional(),
542
+ flags: z.union([z.number(), z.undefined()]).optional(),
543
+ traceId: z.union([z.string(), z.undefined()]).optional(),
544
+ spanId: z.union([z.string(), z.undefined()]).optional(),
545
+ eventName: z.union([z.string(), z.undefined()]).optional()
546
+ }));
547
+ const summarySchema$1 = z.lazy(() => z.object({ dataPoints: z.union([z.array(summaryDataPointSchema$1), z.undefined()]).optional() }));
548
+ const summaryDataPointSchema$1 = z.lazy(() => z.object({
549
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
550
+ startTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
551
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
552
+ count: z.union([
553
+ z.string(),
554
+ z.number(),
555
+ z.undefined()
556
+ ]).optional(),
557
+ sum: z.union([z.number(), z.undefined()]).optional(),
558
+ quantileValues: z.union([z.array(summaryDataPointValueAtQuantileSchema$1), z.undefined()]).optional(),
559
+ flags: z.union([z.number(), z.undefined()]).optional()
560
+ }));
561
+
562
+ //#endregion
563
+ //#region src/otlp-metrics-zod.ts
564
+ var otlp_metrics_zod_exports = /* @__PURE__ */ __exportAll({
565
+ exemplarSchema: () => exemplarSchema,
566
+ exponentialHistogramDataPointBucketsSchema: () => exponentialHistogramDataPointBucketsSchema,
567
+ exponentialHistogramDataPointSchema: () => exponentialHistogramDataPointSchema,
568
+ exponentialHistogramSchema: () => exponentialHistogramSchema,
569
+ gaugeSchema: () => gaugeSchema,
570
+ histogramDataPointSchema: () => histogramDataPointSchema,
571
+ histogramSchema: () => histogramSchema,
572
+ metricSchema: () => metricSchema,
573
+ metricsDataSchema: () => metricsDataSchema,
574
+ numberDataPointSchema: () => numberDataPointSchema,
575
+ resourceMetricsSchema: () => resourceMetricsSchema,
576
+ scopeMetricsSchema: () => scopeMetricsSchema,
577
+ sumSchema: () => sumSchema,
578
+ summaryDataPointSchema: () => summaryDataPointSchema,
579
+ summaryDataPointValueAtQuantileSchema: () => summaryDataPointValueAtQuantileSchema,
580
+ summarySchema: () => summarySchema
581
+ });
582
+ /**
583
+ * Represents the value at a given quantile of a distribution.
584
+ *
585
+ * To record Min and Max values following conventions are used:
586
+ * - The 1.0 quantile is equivalent to the maximum value observed.
587
+ * - The 0.0 quantile is equivalent to the minimum value observed.
588
+ *
589
+ * See the following issue for more context:
590
+ * https://github.com/open-telemetry/opentelemetry-proto/issues/125
591
+ */
592
+ const summaryDataPointValueAtQuantileSchema = z.object({
593
+ quantile: z.union([z.number(), z.undefined()]).optional(),
594
+ value: z.union([z.number(), z.undefined()]).optional()
595
+ });
596
+ /**
597
+ * Buckets are a set of bucket counts, encoded in a contiguous array
598
+ * of counts.
599
+ */
600
+ const exponentialHistogramDataPointBucketsSchema = z.object({
601
+ offset: z.union([z.number(), z.undefined()]).optional(),
602
+ bucketCounts: z.union([z.array(z.union([z.string(), z.number()])), z.undefined()]).optional()
603
+ });
604
+ /**
605
+ * A representation of an exemplar, which is a sample input measurement.
606
+ * Exemplars also hold information about the environment when the measurement
607
+ * was recorded, for example the span and trace ID of the active span when the
608
+ * exemplar was recorded.
609
+ */
610
+ const exemplarSchema = z.lazy(() => z.object({
611
+ filteredAttributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
612
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
613
+ asDouble: z.union([z.number(), z.undefined()]).optional(),
614
+ asInt: z.union([z.string(), z.undefined()]).optional(),
615
+ spanId: z.union([z.string(), z.undefined()]).optional(),
616
+ traceId: z.union([z.instanceof(Uint8Array), z.undefined()]).optional()
617
+ }));
618
+ /**
619
+ * NumberDataPoint is a single data point in a timeseries that describes the
620
+ * time-varying scalar value of a metric.
621
+ */
622
+ const numberDataPointSchema = z.lazy(() => z.object({
623
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
624
+ startTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
625
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
626
+ asDouble: z.union([z.number(), z.undefined()]).optional(),
627
+ asInt: z.union([z.string(), z.undefined()]).optional(),
628
+ exemplars: z.union([z.array(exemplarSchema), z.undefined()]).optional(),
629
+ flags: z.union([z.number(), z.undefined()]).optional()
630
+ }));
631
+ /**
632
+ * HistogramDataPoint is a single data point in a timeseries that describes the
633
+ * time-varying values of a Histogram. A Histogram contains summary statistics
634
+ * for a population of values, it may optionally contain the distribution of
635
+ * those values across a set of buckets.
636
+ *
637
+ * If the histogram contains the distribution of values, then both
638
+ * "explicit_bounds" and "bucket counts" fields must be defined.
639
+ * If the histogram does not contain the distribution of values, then both
640
+ * "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
641
+ * "sum" are known.
642
+ */
643
+ const histogramDataPointSchema = z.lazy(() => z.object({
644
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
645
+ startTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
646
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
647
+ count: z.union([
648
+ z.string(),
649
+ z.number(),
650
+ z.undefined()
651
+ ]).optional(),
652
+ sum: z.union([z.number(), z.undefined()]).optional(),
653
+ bucketCounts: z.union([z.array(z.union([z.string(), z.number()])), z.undefined()]).optional(),
654
+ explicitBounds: z.union([z.array(z.number()), z.undefined()]).optional(),
655
+ exemplars: z.union([z.array(exemplarSchema), z.undefined()]).optional(),
656
+ flags: z.union([z.number(), z.undefined()]).optional(),
657
+ min: z.union([z.number(), z.undefined()]).optional(),
658
+ max: z.union([z.number(), z.undefined()]).optional()
659
+ }));
660
+ /**
661
+ * ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
662
+ * time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
663
+ * summary statistics for a population of values, it may optionally contain the
664
+ * distribution of those values across a set of buckets.
665
+ */
666
+ const exponentialHistogramDataPointSchema = z.lazy(() => z.object({
667
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
668
+ startTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
669
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
670
+ count: z.union([
671
+ z.string(),
672
+ z.number(),
673
+ z.undefined()
674
+ ]).optional(),
675
+ sum: z.union([z.number(), z.undefined()]).optional(),
676
+ scale: z.union([z.number(), z.undefined()]).optional(),
677
+ zeroCount: z.union([
678
+ z.string(),
679
+ z.number(),
680
+ z.undefined()
681
+ ]).optional(),
682
+ positive: z.union([exponentialHistogramDataPointBucketsSchema, z.undefined()]).optional(),
683
+ negative: z.union([exponentialHistogramDataPointBucketsSchema, z.undefined()]).optional(),
684
+ flags: z.union([z.number(), z.undefined()]).optional(),
685
+ exemplars: z.union([z.array(exemplarSchema), z.undefined()]).optional(),
686
+ min: z.union([z.number(), z.undefined()]).optional(),
687
+ max: z.union([z.number(), z.undefined()]).optional(),
688
+ zeroThreshold: z.union([z.number(), z.undefined()]).optional()
689
+ }));
690
+ /**
691
+ * SummaryDataPoint is a single data point in a timeseries that describes the
692
+ * time-varying values of a Summary metric. The count and sum fields represent
693
+ * cumulative values.
694
+ */
695
+ const summaryDataPointSchema = z.lazy(() => z.object({
696
+ attributes: z.union([z.array(keyValueSchema), z.undefined()]).optional(),
697
+ startTimeUnixNano: z.union([z.string(), z.undefined()]).optional(),
698
+ timeUnixNano: z.union([z.string(), z.undefined()]).optional(),
699
+ count: z.union([
700
+ z.string(),
701
+ z.number(),
702
+ z.undefined()
703
+ ]).optional(),
704
+ sum: z.union([z.number(), z.undefined()]).optional(),
705
+ quantileValues: z.union([z.array(summaryDataPointValueAtQuantileSchema), z.undefined()]).optional(),
706
+ flags: z.union([z.number(), z.undefined()]).optional()
707
+ }));
708
+ /**
709
+ * Gauge represents the type of a scalar metric that always exports the
710
+ * "current value" for every data point. It should be used for an "unknown"
711
+ * aggregation.
712
+ *
713
+ * A Gauge does not support different aggregation temporalities. Given the
714
+ * aggregation is unknown, points cannot be combined using the same
715
+ * aggregation, regardless of aggregation temporalities. Therefore,
716
+ * AggregationTemporality is not included. Consequently, this also means
717
+ * "StartTimeUnixNano" is ignored for all data points.
718
+ */
719
+ const gaugeSchema = z.lazy(() => z.object({ dataPoints: z.union([z.array(numberDataPointSchema), z.undefined()]).optional() }));
720
+ /**
721
+ * Sum represents the type of a scalar metric that is calculated as a sum of all
722
+ * reported measurements over a time interval.
723
+ */
724
+ const sumSchema = z.lazy(() => z.object({
725
+ dataPoints: z.union([z.array(numberDataPointSchema), z.undefined()]).optional(),
726
+ aggregationTemporality: z.union([aggregationTemporalitySchema, z.undefined()]).optional(),
727
+ isMonotonic: z.union([z.boolean(), z.undefined()]).optional()
728
+ }));
729
+ /**
730
+ * Histogram represents the type of a metric that is calculated by aggregating
731
+ * as a Histogram of all reported measurements over a time interval.
732
+ */
733
+ const histogramSchema = z.lazy(() => z.object({
734
+ dataPoints: z.union([z.array(histogramDataPointSchema), z.undefined()]).optional(),
735
+ aggregationTemporality: z.union([aggregationTemporalitySchema, z.undefined()]).optional()
736
+ }));
737
+ /**
738
+ * ExponentialHistogram represents the type of a metric that is calculated by aggregating
739
+ * as a ExponentialHistogram of all reported double measurements over a time interval.
740
+ */
741
+ const exponentialHistogramSchema = z.lazy(() => z.object({
742
+ dataPoints: z.union([z.array(exponentialHistogramDataPointSchema), z.undefined()]).optional(),
743
+ aggregationTemporality: z.union([aggregationTemporalitySchema, z.undefined()]).optional()
744
+ }));
745
+ /**
746
+ * Summary metric data are used to convey quantile summaries,
747
+ * a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
748
+ * and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
749
+ * data type. These data points cannot always be merged in a meaningful way.
750
+ * While they can be useful in some applications, histogram data points are
751
+ * recommended for new applications.
752
+ * Summary metrics do not have an aggregation temporality field. This is
753
+ * because the count and sum fields of a SummaryDataPoint are assumed to be
754
+ * cumulative values.
755
+ */
756
+ const summarySchema = z.lazy(() => z.object({ dataPoints: z.union([z.array(summaryDataPointSchema), z.undefined()]).optional() }));
757
+ /**
758
+ * Defines a Metric which has one or more timeseries. The following is a
759
+ * brief summary of the Metric data model. For more details, see:
760
+ *
761
+ * https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
762
+ *
763
+ * The data model and relation between entities is shown in the
764
+ * diagram below. Here, "DataPoint" is the term used to refer to any
765
+ * one of the specific data point value types, and "points" is the term used
766
+ * to refer to any one of the lists of points contained in the Metric.
767
+ *
768
+ * - Metric is composed of a metadata and data.
769
+ * - Metadata part contains a name, description, unit.
770
+ * - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
771
+ * - DataPoint contains timestamps, attributes, and one of the possible value type
772
+ * fields.
773
+ *
774
+ * Metric
775
+ * +------------+
776
+ * |name |
777
+ * |description |
778
+ * |unit | +------------------------------------+
779
+ * |data |---> |Gauge, Sum, Histogram, Summary, ... |
780
+ * +------------+ +------------------------------------+
781
+ *
782
+ * Data [One of Gauge, Sum, Histogram, Summary, ...]
783
+ * +-----------+
784
+ * |... | // Metadata about the Data.
785
+ * |points |--+
786
+ * +-----------+ |
787
+ * | +---------------------------+
788
+ * | |DataPoint 1 |
789
+ * v |+------+------+ +------+ |
790
+ * +-----+ ||label |label |...|label | |
791
+ * | 1 |-->||value1|value2|...|valueN| |
792
+ * +-----+ |+------+------+ +------+ |
793
+ * | . | |+-----+ |
794
+ * | . | ||value| |
795
+ * | . | |+-----+ |
796
+ * | . | +---------------------------+
797
+ * | . | .
798
+ * | . | .
799
+ * | . | .
800
+ * | . | +---------------------------+
801
+ * | . | |DataPoint M |
802
+ * +-----+ |+------+------+ +------+ |
803
+ * | M |-->||label |label |...|label | |
804
+ * +-----+ ||value1|value2|...|valueN| |
805
+ * |+------+------+ +------+ |
806
+ * |+-----+ |
807
+ * ||value| |
808
+ * |+-----+ |
809
+ * +---------------------------+
810
+ *
811
+ * Each distinct type of DataPoint represents the output of a specific
812
+ * aggregation function, the result of applying the DataPoint's
813
+ * associated function of to one or more measurements.
814
+ *
815
+ * All DataPoint types have three common fields:
816
+ * - Attributes includes key-value pairs associated with the data point
817
+ * - TimeUnixNano is required, set to the end time of the aggregation
818
+ * - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
819
+ * having an AggregationTemporality field, as discussed below.
820
+ *
821
+ * Both TimeUnixNano and StartTimeUnixNano values are expressed as
822
+ * UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
823
+ *
824
+ * # TimeUnixNano
825
+ *
826
+ * This field is required, having consistent interpretation across
827
+ * DataPoint types. TimeUnixNano is the moment corresponding to when
828
+ * the data point's aggregate value was captured.
829
+ *
830
+ * Data points with the 0 value for TimeUnixNano SHOULD be rejected
831
+ * by consumers.
832
+ *
833
+ * # StartTimeUnixNano
834
+ *
835
+ * StartTimeUnixNano in general allows detecting when a sequence of
836
+ * observations is unbroken. This field indicates to consumers the
837
+ * start time for points with cumulative and delta
838
+ * AggregationTemporality, and it should be included whenever possible
839
+ * to support correct rate calculation. Although it may be omitted
840
+ * when the start time is truly unknown, setting StartTimeUnixNano is
841
+ * strongly encouraged.
842
+ */
843
+ const metricSchema = z.lazy(() => z.object({
844
+ name: z.union([z.string(), z.undefined()]).optional(),
845
+ description: z.union([z.string(), z.undefined()]).optional(),
846
+ unit: z.union([z.string(), z.undefined()]).optional(),
847
+ gauge: z.union([gaugeSchema, z.undefined()]).optional(),
848
+ sum: z.union([sumSchema, z.undefined()]).optional(),
849
+ histogram: z.union([histogramSchema, z.undefined()]).optional(),
850
+ exponentialHistogram: z.union([exponentialHistogramSchema, z.undefined()]).optional(),
851
+ summary: z.union([summarySchema, z.undefined()]).optional(),
852
+ metadata: z.union([z.array(keyValueSchema), z.undefined()]).optional()
853
+ }));
854
+ /** A collection of Metrics produced by an Scope. */
855
+ const scopeMetricsSchema = z.lazy(() => z.object({
856
+ scope: z.union([instrumentationScopeSchema, z.undefined()]).optional(),
857
+ metrics: z.union([z.array(metricSchema), z.undefined()]).optional(),
858
+ schemaUrl: z.union([z.string(), z.undefined()]).optional()
859
+ }));
860
+ /** A collection of ScopeMetrics from a Resource. */
861
+ const resourceMetricsSchema = z.lazy(() => z.object({
862
+ resource: z.union([resourceSchema, z.undefined()]).optional(),
863
+ scopeMetrics: z.union([z.array(scopeMetricsSchema), z.undefined()]).optional(),
864
+ schemaUrl: z.union([z.string(), z.undefined()]).optional()
865
+ }));
866
+ /**
867
+ * MetricsData represents the metrics data that can be stored in a persistent
868
+ * storage, OR can be embedded by other protocols that transfer OTLP metrics
869
+ * data but do not implement the OTLP protocol.
870
+ *
871
+ * MetricsData
872
+ * └─── ResourceMetrics
873
+ * ├── Resource
874
+ * ├── SchemaURL
875
+ * └── ScopeMetrics
876
+ * ├── Scope
877
+ * ├── SchemaURL
878
+ * └── Metric
879
+ * ├── Name
880
+ * ├── Description
881
+ * ├── Unit
882
+ * └── data
883
+ * ├── Gauge
884
+ * ├── Sum
885
+ * ├── Histogram
886
+ * ├── ExponentialHistogram
887
+ * └── Summary
888
+ *
889
+ * The main difference between this message and collector protocol is that
890
+ * in this message there will not be any "control" or "metadata" specific to
891
+ * OTLP protocol.
892
+ *
893
+ * When new fields are added into this message, the OTLP request MUST be updated
894
+ * as well.
895
+ */
896
+ const metricsDataSchema = z.lazy(() => z.object({ resourceMetrics: z.union([z.array(resourceMetricsSchema), z.undefined()]).optional() }));
897
+
898
+ //#endregion
899
+ //#region src/data-filters-zod.ts
900
+ var data_filters_zod_exports = /* @__PURE__ */ __exportAll({
901
+ logsDataFilterSchema: () => logsDataFilterSchema,
902
+ metricsDataFilterSchema: () => metricsDataFilterSchema,
903
+ tracesDataFilterSchema: () => tracesDataFilterSchema
904
+ });
905
+ const tracesDataFilterSchema = z$1.object({
906
+ traceId: z$1.string().optional().describe("Unique identifier for a trace. All spans from the same trace share the same trace_id. The ID is a 16-byte array."),
907
+ spanId: z$1.string().optional().describe("Unique identifier for a span within a trace. The ID is an 8-byte array."),
908
+ parentSpanId: z$1.string().optional().describe("The span_id of this span's parent span. Empty if this is a root span."),
909
+ serviceName: z$1.string().optional().describe("Service name from resource attributes (service.name)."),
910
+ spanName: z$1.string().optional().describe("Description of the span's operation. E.g., qualified method name or file name with line number."),
911
+ spanKind: z$1.string().optional().describe("Type of span (INTERNAL, SERVER, CLIENT, PRODUCER, CONSUMER). Used to identify relationships between spans."),
912
+ statusCode: z$1.string().optional().describe("Status code (UNSET, OK, ERROR)."),
913
+ scopeName: z$1.string().optional().describe("Name denoting the instrumentation scope."),
914
+ timestampMin: z$1.string().optional().describe("Minimum start time of the span. UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Expressed as string in JSON."),
915
+ timestampMax: z$1.string().optional().describe("Maximum start time of the span. UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Expressed as string in JSON."),
916
+ durationMin: z$1.string().optional().describe("Minimum duration of the span in nanoseconds (end_time - start_time). Expressed as string in JSON."),
917
+ durationMax: z$1.string().optional().describe("Maximum duration of the span in nanoseconds (end_time - start_time). Expressed as string in JSON."),
918
+ spanAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Key/value pairs describing the span."),
919
+ resourceAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Attributes that describe the resource."),
920
+ eventsAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Attribute key/value pairs on the event."),
921
+ linksAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Attribute key/value pairs on the link."),
922
+ limit: z$1.number().int().positive().max(1e3).optional().describe("Max items to return. Default determined by datasource."),
923
+ cursor: z$1.string().optional().describe("Opaque cursor from previous response for next page."),
924
+ sortOrder: z$1.enum(["ASC", "DESC"]).optional().describe("Sort by timestamp. ASC = oldest first, DESC = newest first.")
925
+ });
926
+ const logsDataFilterSchema = z$1.object({
927
+ traceId: z$1.string().optional().describe("Unique identifier for a trace. All logs from the same trace share the same trace_id. The ID is a 16-byte array."),
928
+ spanId: z$1.string().optional().describe("Unique identifier for a span within a trace. The ID is an 8-byte array."),
929
+ serviceName: z$1.string().optional().describe("Service name from resource attributes (service.name)."),
930
+ scopeName: z$1.string().optional().describe("Name denoting the instrumentation scope."),
931
+ severityText: z$1.string().optional().describe("Severity text (also known as log level). Original string representation as known at the source."),
932
+ severityNumberMin: z$1.number().optional().describe("Minimum severity number (inclusive). Normalized to values described in Log Data Model."),
933
+ severityNumberMax: z$1.number().optional().describe("Maximum severity number (inclusive). Normalized to values described in Log Data Model."),
934
+ bodyContains: z$1.string().optional().describe("Filter logs where body contains this substring."),
935
+ timestampMin: z$1.string().optional().describe("Minimum time when the event occurred. UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Expressed as string in JSON."),
936
+ timestampMax: z$1.string().optional().describe("Maximum time when the event occurred. UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. Expressed as string in JSON."),
937
+ logAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Additional attributes that describe the specific event occurrence."),
938
+ resourceAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Attributes that describe the resource."),
939
+ scopeAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Attributes of the instrumentation scope."),
940
+ limit: z$1.number().int().positive().max(1e3).optional().describe("Max items to return. Default determined by datasource."),
941
+ cursor: z$1.string().optional().describe("Opaque cursor from previous response for next page."),
942
+ sortOrder: z$1.enum(["ASC", "DESC"]).optional().describe("Sort by timestamp. ASC = oldest first, DESC = newest first.")
943
+ });
944
+ const metricsDataFilterSchema = z$1.object({
945
+ metricType: z$1.enum([
946
+ "Gauge",
947
+ "Sum",
948
+ "Histogram",
949
+ "ExponentialHistogram",
950
+ "Summary"
951
+ ]).describe("Metric type to query."),
952
+ metricName: z$1.string().optional().describe("The name of the metric."),
953
+ serviceName: z$1.string().optional().describe("Service name from resource attributes (service.name)."),
954
+ scopeName: z$1.string().optional().describe("Name denoting the instrumentation scope."),
955
+ timeUnixMin: z$1.string().optional().describe("Minimum time when the data point was recorded. UNIX Epoch time in nanoseconds. Expressed as string in JSON."),
956
+ timeUnixMax: z$1.string().optional().describe("Maximum time when the data point was recorded. UNIX Epoch time in nanoseconds. Expressed as string in JSON."),
957
+ attributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Key/value pairs that uniquely identify the timeseries."),
958
+ resourceAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Attributes that describe the resource."),
959
+ scopeAttributes: z$1.record(z$1.string(), z$1.string()).optional().describe("Attributes of the instrumentation scope."),
960
+ limit: z$1.number().int().positive().max(1e3).optional().describe("Max items to return. Default determined by datasource."),
961
+ cursor: z$1.string().optional().describe("Opaque cursor from previous response for next page."),
962
+ sortOrder: z$1.enum(["ASC", "DESC"]).optional().describe("Sort by timestamp. ASC = oldest first, DESC = newest first.")
963
+ });
964
+
965
+ //#endregion
966
+ //#region src/index.ts
967
+ const name = "@kopai/core";
968
+
969
+ //#endregion
970
+ export { data_filters_zod_exports as dataFilterSchemas, telemetry_datasource_exports as datasource, denormalized_signals_zod_exports as denormalizedSignals, name, otlp_generated_exports as otlp, otlp_metrics_generated_exports as otlpMetrics, otlp_metrics_zod_exports as otlpMetricsZod, otlp_zod_exports as otlpZod };
971
+ //# sourceMappingURL=index.mjs.map