@rheonic/sdk 0.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md ADDED
@@ -0,0 +1,21 @@
1
+ # Changelog
2
+
3
+ All notable changes to `rheonic-node` will be documented in this file.
4
+
5
+ ## Unreleased
6
+
7
+ - Publish-ready changelog entries will be added here for the next release.
8
+
9
+ ## 0.1.0
10
+
11
+ ### Added
12
+ - Initial public beta release of the Rheonic Node SDK.
13
+ - Manual event capture and protect preflight client APIs.
14
+ - OpenAI, Anthropic, and Google provider instrumentation helpers.
15
+ - Structured SDK logging with trace correlation.
16
+
17
+ ### Changed
18
+ - Packaged contents are limited to the compiled SDK, README, and changelog for clean npm publishing.
19
+
20
+ ### Docs
21
+ - Added install, configuration, and minimal integration guidance for beta users.
package/README.md ADDED
@@ -0,0 +1,150 @@
1
+ # Rheonic Node SDK
2
+
3
+ The Rheonic Node SDK runs inside your app process, captures provider telemetry, and can request protect preflight decisions before provider calls.
4
+
5
+ ## Install
6
+
7
+ Beta prerelease install:
8
+
9
+ ```bash
10
+ npm install rheonic-node@next
11
+ ```
12
+
13
+ Compatibility:
14
+ - Node.js 18+
15
+ - One of the supported provider SDKs: `openai`, `@anthropic-ai/sdk`, or `@google/generative-ai`
16
+
17
+ Beta note:
18
+ - Public beta releases may add guardrail fields and provider wrappers before `1.0.0`.
19
+
20
+ ## Configuration
21
+
22
+ - Required: `ingestKey`
23
+ - Optional for local development: `baseUrl` (defaults to `RHEONIC_BASE_URL`, else `http://localhost:8000`)
24
+ - Optional: `environment` (default `dev`)
25
+
26
+ For hosted beta, staging, or production deployments, set `RHEONIC_BASE_URL` or pass `baseUrl` explicitly. The localhost default is intended only for local development.
27
+
28
+ Provider/model validation: SDK wrappers fail fast with `RHEONICValidationError` when provider is missing/unsupported or model is missing/empty. Supported providers are `openai`, `anthropic`, and `google`. Model naming is not pattern-validated so future vendor naming changes remain compatible.
29
+
30
+ ## Integration Recommendation
31
+
32
+ Create one long-lived SDK client at app startup and reuse it for all provider calls. The SDK prewarms tokenizer state and the backend connection on client initialization, so reusing a single client avoids paying protect cold-start cost on every request.
33
+
34
+ ## Logging
35
+
36
+ The SDK emits structured JSON logs to stdout. You do not need to configure file logging.
37
+
38
+ Example log:
39
+
40
+ ```json
41
+ {
42
+ "timestamp": "2026-03-18T09:20:15.145102+00:00",
43
+ "level": "info",
44
+ "service": "sdk-node",
45
+ "env": "staging",
46
+ "trace_id": "f4ac8b6b-6f8d-4f4c-b54f-3c2c2f76a27b",
47
+ "span_id": "9f12db3a1d204f8f",
48
+ "event": "sdk_client_initialized",
49
+ "message": "SDK client initialized",
50
+ "metadata": {}
51
+ }
52
+ ```
53
+
54
+ Notes:
55
+ - backend requests automatically include `X-Trace-ID`,
56
+ - SDK logs share that `trace_id` so you can correlate SDK, backend, worker, and webhook activity,
57
+ - sensitive fields such as API keys and tokens are redacted.
58
+
59
+ ## Integration Path 1: Manual Capture (generic)
60
+
61
+ ```ts
62
+ import { buildEvent, createClient } from "@rheonic/sdk";
63
+
64
+ const client = createClient({
65
+ baseUrl: process.env.RHEONIC_BASE_URL!,
66
+ ingestKey: process.env.RHEONIC_INGEST_KEY!,
67
+ });
68
+
69
+ await client.captureEvent(
70
+ buildEvent({
71
+ provider: "openai",
72
+ model: "gpt-4o-mini",
73
+ request: { endpoint: "/chat", input_tokens: 12 },
74
+ response: { total_tokens: 32, latency_ms: 140, http_status: 200 },
75
+ }),
76
+ );
77
+ ```
78
+
79
+ Initialize `client` once during app startup, then reuse that same instance for manual capture and provider instrumentation.
80
+
81
+ Minimal protect preflight usage:
82
+
83
+ ```ts
84
+ const decision = await client.protect({
85
+ provider: "openai",
86
+ model: "gpt-4o-mini",
87
+ feature: "assistant",
88
+ inputTokensEstimate: 32,
89
+ maxOutputTokens: 256,
90
+ });
91
+ ```
92
+
93
+ ## Integration Path 2: OpenAI instrumentation (convenience wrapper)
94
+
95
+ ```ts
96
+ import OpenAI from "openai";
97
+ import { createClient, instrumentOpenAI } from "rheonic-node";
98
+
99
+ const rheonicClient = createClient({
100
+ baseUrl: process.env.RHEONIC_BASE_URL!,
101
+ ingestKey: process.env.RHEONIC_INGEST_KEY!,
102
+ });
103
+ const openai = instrumentOpenAI(new OpenAI({ apiKey: process.env.OPENAI_API_KEY }), {
104
+ client: rheonicClient,
105
+ endpoint: "/chat/completions",
106
+ feature: "assistant",
107
+ });
108
+ ```
109
+
110
+ ## Integration Path 3: Anthropic and Google wrappers
111
+
112
+ ```ts
113
+ import Anthropic from "@anthropic-ai/sdk";
114
+ import { GoogleGenerativeAI } from "@google/generative-ai";
115
+ import { createClient } from "rheonic-node";
116
+
117
+ const client = createClient({
118
+ baseUrl: process.env.RHEONIC_BASE_URL!,
119
+ ingestKey: process.env.RHEONIC_INGEST_KEY!,
120
+ });
121
+
122
+ const anthropic = client.instrumentAnthropic(new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY }));
123
+ await anthropic.messages.create({
124
+ model: "claude-3-5-sonnet-latest",
125
+ max_tokens: 256,
126
+ messages: [{ role: "user", content: "Hello Claude" }],
127
+ });
128
+
129
+ const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY!);
130
+ const googleModel = client.instrumentGoogle(genAI.getGenerativeModel({ model: "gemini-1.5-pro" }));
131
+ await googleModel.generateContent("Hello Google model");
132
+ ```
133
+
134
+ Runtime call path:
135
+ - SDK instrumentation calls `POST /api/v1/protect/decision` then `POST /api/v1/events`.
136
+ - Project mode in dashboard controls decision behavior:
137
+ - Observe: allow only.
138
+ - Protect: allow/warn/block with cooldown.
139
+
140
+ ## Provider SDKs
141
+
142
+ Install only the provider SDKs you actually use alongside `rheonic-node`:
143
+
144
+ ```bash
145
+ npm install openai
146
+ npm install @anthropic-ai/sdk
147
+ npm install @google/generative-ai
148
+ ```
149
+
150
+ Beta prereleases use semver prerelease format such as `0.2.0-beta.1` and are published under the `next` tag.
@@ -0,0 +1,59 @@
1
+ import { type ProtectContext, type ProtectEvaluation, type ProtectFailMode } from "./protectEngine.js";
2
+ import { type AnthropicInstrumentationOptions } from "./providers/anthropicAdapter.js";
3
+ import { type GoogleInstrumentationOptions } from "./providers/googleAdapter.js";
4
+ import { type OpenAIInstrumentationOptions } from "./providers/openaiAdapter.js";
5
+ import type { EventPayload } from "./eventBuilder.js";
6
+ export type OverflowPolicy = "drop_oldest" | "drop_newest";
7
+ export interface ClientStats {
8
+ queued: number;
9
+ dropped: number;
10
+ sent: number;
11
+ failed: number;
12
+ }
13
+ export interface ClientConfig {
14
+ baseUrl?: string;
15
+ ingestKey: string;
16
+ environment?: string;
17
+ flushIntervalMs?: number;
18
+ maxQueueSize?: number;
19
+ overflowPolicy?: OverflowPolicy;
20
+ requestTimeoutMs?: number;
21
+ protectFailMode?: ProtectFailMode;
22
+ debug?: boolean;
23
+ }
24
+ export declare class Client {
25
+ readonly baseUrl: string;
26
+ readonly ingestKey: string;
27
+ readonly environment: string;
28
+ private readonly flushIntervalMs;
29
+ private readonly maxQueueSize;
30
+ private readonly overflowPolicy;
31
+ private readonly requestTimeoutMs;
32
+ private readonly protectEngine;
33
+ private readonly debug;
34
+ private queue;
35
+ private isFlushing;
36
+ private timer;
37
+ private dropped;
38
+ private sent;
39
+ private failed;
40
+ private isClosed;
41
+ private warmupPromise;
42
+ private warmupCompleted;
43
+ constructor(config: ClientConfig);
44
+ captureEvent(event: EventPayload): Promise<void>;
45
+ getStats(): ClientStats;
46
+ flush(): Promise<void>;
47
+ evaluateProtectDecision(context: ProtectContext): Promise<ProtectEvaluation>;
48
+ warmConnections(): Promise<void>;
49
+ private ensureWarmup;
50
+ private runWarmup;
51
+ instrumentOpenAI<T extends Record<string, any>>(openaiClient: T, options?: Omit<OpenAIInstrumentationOptions, "client">): T;
52
+ instrumentAnthropic<T extends Record<string, any>>(anthropicClient: T, options?: Omit<AnthropicInstrumentationOptions, "client">): T;
53
+ instrumentGoogle<T extends Record<string, any>>(googleModel: T, options?: Omit<GoogleInstrumentationOptions, "client">): T;
54
+ flushWithTimeout(timeoutMs?: 500): Promise<void>;
55
+ close(): void;
56
+ private sendEvent;
57
+ private sendEventOnce;
58
+ debugLog(message: string, meta?: Record<string, unknown>): void;
59
+ }
package/dist/client.js ADDED
@@ -0,0 +1,305 @@
1
+ import { sdkNodeConfig } from "./config.js";
2
+ import { requestJson } from "./httpTransport.js";
3
+ import { bindTraceContext, emitLog, generateSpanId, generateTraceId, getSpanId, getTraceId } from "./logger.js";
4
+ import { ProtectEngine } from "./protectEngine.js";
5
+ import { instrumentAnthropic as instrumentAnthropicProvider } from "./providers/anthropicAdapter.js";
6
+ import { instrumentGoogle as instrumentGoogleProvider } from "./providers/googleAdapter.js";
7
+ import { instrumentOpenAI as instrumentOpenAIProvider } from "./providers/openaiAdapter.js";
8
+ import { prewarmTokenEstimator } from "./tokenEstimator.js";
9
+ const CLIENT_REGISTRY = new Set();
10
+ let EXIT_HOOKS_REGISTERED = false;
11
+ export class Client {
12
+ baseUrl;
13
+ ingestKey;
14
+ environment;
15
+ flushIntervalMs;
16
+ maxQueueSize;
17
+ overflowPolicy;
18
+ requestTimeoutMs;
19
+ protectEngine;
20
+ debug;
21
+ queue = [];
22
+ isFlushing = false;
23
+ timer = null;
24
+ dropped = 0;
25
+ sent = 0;
26
+ failed = 0;
27
+ isClosed = false;
28
+ warmupPromise = null;
29
+ warmupCompleted = false;
30
+ constructor(config) {
31
+ this.baseUrl = config.baseUrl ?? process.env.RHEONIC_BASE_URL ?? sdkNodeConfig.defaultBaseUrl;
32
+ this.ingestKey = config.ingestKey;
33
+ this.environment = config.environment ?? sdkNodeConfig.defaultEnvironment;
34
+ this.flushIntervalMs = config.flushIntervalMs ?? sdkNodeConfig.defaultFlushIntervalMs;
35
+ this.maxQueueSize = config.maxQueueSize ?? sdkNodeConfig.defaultMaxQueueSize;
36
+ this.overflowPolicy = config.overflowPolicy ?? "drop_oldest";
37
+ this.requestTimeoutMs = config.requestTimeoutMs ?? sdkNodeConfig.defaultRequestTimeoutMs;
38
+ const initialFailMode = config.protectFailMode ?? sdkNodeConfig.defaultProtectFailMode;
39
+ const envDebug = process.env.RHEONIC_DEBUG === "1" || process.env.RHEONIC_DEBUG === "true";
40
+ this.debug = config.debug ?? envDebug;
41
+ // Warm default/model-specific tokenizer state before the first protected call.
42
+ prewarmTokenEstimator(null);
43
+ const prewarmModel = process.env.RHEONIC_MODEL?.trim();
44
+ if (prewarmModel) {
45
+ prewarmTokenEstimator(prewarmModel);
46
+ }
47
+ this.protectEngine = new ProtectEngine({
48
+ baseUrl: this.baseUrl,
49
+ ingestKey: this.ingestKey,
50
+ environment: this.environment,
51
+ fallbackRequestTimeoutMs: this.requestTimeoutMs,
52
+ initialFailMode,
53
+ debugLog: this.debugLog.bind(this),
54
+ });
55
+ this.warmupPromise = this.runWarmup();
56
+ this.timer = setInterval(() => {
57
+ void this.flush();
58
+ }, this.flushIntervalMs);
59
+ this.timer.unref?.();
60
+ CLIENT_REGISTRY.add(this);
61
+ registerExitHooks();
62
+ emitLog({
63
+ level: "info",
64
+ event: "sdk_client_initialized",
65
+ message: "SDK client initialized",
66
+ environment: this.environment,
67
+ traceId: generateTraceId(),
68
+ spanId: generateSpanId(),
69
+ });
70
+ }
71
+ async captureEvent(event) {
72
+ try {
73
+ if (this.queue.length >= this.maxQueueSize) {
74
+ if (this.overflowPolicy === "drop_oldest") {
75
+ this.queue.shift();
76
+ this.dropped += 1;
77
+ }
78
+ else {
79
+ this.dropped += 1;
80
+ return;
81
+ }
82
+ }
83
+ if (this.isClosed) {
84
+ return;
85
+ }
86
+ this.queue.push({
87
+ ...event,
88
+ environment: event.environment || this.environment,
89
+ });
90
+ }
91
+ catch {
92
+ return;
93
+ }
94
+ }
95
+ getStats() {
96
+ return {
97
+ queued: this.queue.length,
98
+ dropped: this.dropped,
99
+ sent: this.sent,
100
+ failed: this.failed,
101
+ };
102
+ }
103
+ async flush() {
104
+ if (this.isFlushing || this.isClosed) {
105
+ return;
106
+ }
107
+ this.isFlushing = true;
108
+ try {
109
+ while (this.queue.length > 0) {
110
+ const event = this.queue.shift();
111
+ if (!event) {
112
+ continue;
113
+ }
114
+ await this.sendEvent(event);
115
+ }
116
+ }
117
+ finally {
118
+ this.isFlushing = false;
119
+ }
120
+ }
121
+ async evaluateProtectDecision(context) {
122
+ await this.ensureWarmup();
123
+ return this.protectEngine.evaluate(context);
124
+ }
125
+ async warmConnections() {
126
+ if (this.warmupCompleted) {
127
+ return;
128
+ }
129
+ if (this.warmupPromise) {
130
+ await this.warmupPromise;
131
+ this.warmupPromise = null;
132
+ return;
133
+ }
134
+ this.warmupPromise = this.runWarmup();
135
+ await this.warmupPromise;
136
+ this.warmupPromise = null;
137
+ }
138
+ async ensureWarmup() {
139
+ if (this.warmupCompleted) {
140
+ return;
141
+ }
142
+ if (this.warmupPromise) {
143
+ await this.warmupPromise;
144
+ this.warmupPromise = null;
145
+ }
146
+ }
147
+ async runWarmup() {
148
+ try {
149
+ const response = await bindTraceContext(generateTraceId(), generateSpanId(), async () => await requestJson(`${this.baseUrl}/health`, {
150
+ method: "GET",
151
+ headers: {
152
+ "X-Trace-ID": getTraceId(),
153
+ "X-Span-ID": getSpanId(),
154
+ },
155
+ }));
156
+ this.debugLog("SDK connection warmup completed", { status_code: response.status });
157
+ }
158
+ catch {
159
+ this.debugLog("SDK connection warmup failed");
160
+ }
161
+ try {
162
+ await this.protectEngine.bootstrap();
163
+ this.debugLog("SDK protect config bootstrap completed");
164
+ }
165
+ catch {
166
+ this.debugLog("SDK protect config bootstrap failed");
167
+ }
168
+ this.warmupCompleted = true;
169
+ }
170
+ instrumentOpenAI(openaiClient, options) {
171
+ return instrumentOpenAIProvider(openaiClient, {
172
+ client: this,
173
+ environment: options?.environment,
174
+ endpoint: options?.endpoint,
175
+ feature: options?.feature,
176
+ });
177
+ }
178
+ instrumentAnthropic(anthropicClient, options) {
179
+ return instrumentAnthropicProvider(anthropicClient, {
180
+ client: this,
181
+ environment: options?.environment,
182
+ endpoint: options?.endpoint,
183
+ feature: options?.feature,
184
+ });
185
+ }
186
+ instrumentGoogle(googleModel, options) {
187
+ return instrumentGoogleProvider(googleModel, {
188
+ client: this,
189
+ environment: options?.environment,
190
+ endpoint: options?.endpoint,
191
+ feature: options?.feature,
192
+ });
193
+ }
194
+ async flushWithTimeout(timeoutMs = sdkNodeConfig.defaultFlushTimeoutMs) {
195
+ await Promise.race([
196
+ this.flush(),
197
+ new Promise((resolve) => {
198
+ setTimeout(resolve, timeoutMs);
199
+ }),
200
+ ]);
201
+ }
202
+ close() {
203
+ if (this.isClosed) {
204
+ return;
205
+ }
206
+ this.isClosed = true;
207
+ if (this.timer) {
208
+ clearInterval(this.timer);
209
+ this.timer = null;
210
+ }
211
+ CLIENT_REGISTRY.delete(this);
212
+ }
213
+ async sendEvent(event) {
214
+ const firstAttempt = await this.sendEventOnce(event);
215
+ if (firstAttempt.ok) {
216
+ this.sent += 1;
217
+ return;
218
+ }
219
+ if (!firstAttempt.shouldRetry) {
220
+ this.failed += 1;
221
+ return;
222
+ }
223
+ await waitMs(jitterMs(sdkNodeConfig.retryDelayMinMs, sdkNodeConfig.retryDelayMaxMs));
224
+ const secondAttempt = await this.sendEventOnce(event);
225
+ if (secondAttempt.ok) {
226
+ this.sent += 1;
227
+ return;
228
+ }
229
+ this.failed += 1;
230
+ }
231
+ async sendEventOnce(event) {
232
+ const controller = new AbortController();
233
+ const timeout = setTimeout(() => controller.abort(), this.requestTimeoutMs);
234
+ const traceId = generateTraceId();
235
+ const spanId = generateSpanId();
236
+ try {
237
+ const response = await bindTraceContext(traceId, spanId, async () => await requestJson(`${this.baseUrl}/api/v1/events`, {
238
+ method: "POST",
239
+ headers: {
240
+ "Content-Type": "application/json",
241
+ "X-Project-Ingest-Key": this.ingestKey,
242
+ "X-Trace-ID": getTraceId(),
243
+ "X-Span-ID": spanId,
244
+ },
245
+ body: JSON.stringify(event),
246
+ signal: controller.signal,
247
+ }));
248
+ clearTimeout(timeout);
249
+ if (response.ok) {
250
+ return { ok: true, shouldRetry: false };
251
+ }
252
+ if (response.status >= 500) {
253
+ this.debugLog(`Server error ${response.status}; scheduling retry`);
254
+ return { ok: false, shouldRetry: true };
255
+ }
256
+ return { ok: false, shouldRetry: false };
257
+ }
258
+ catch {
259
+ clearTimeout(timeout);
260
+ return { ok: false, shouldRetry: true };
261
+ }
262
+ }
263
+ debugLog(message, meta) {
264
+ if (!this.debug) {
265
+ return;
266
+ }
267
+ emitLog({
268
+ level: "debug",
269
+ event: "sdk_debug",
270
+ message,
271
+ metadata: meta,
272
+ environment: this.environment,
273
+ });
274
+ }
275
+ }
276
+ function registerExitHooks() {
277
+ if (EXIT_HOOKS_REGISTERED || typeof process === "undefined") {
278
+ return;
279
+ }
280
+ EXIT_HOOKS_REGISTERED = true;
281
+ process.on("beforeExit", () => {
282
+ void flushAllClients();
283
+ });
284
+ process.on("SIGINT", () => {
285
+ void flushAllClients().finally(() => {
286
+ process.exit(0);
287
+ });
288
+ });
289
+ process.on("SIGTERM", () => {
290
+ void flushAllClients().finally(() => {
291
+ process.exit(0);
292
+ });
293
+ });
294
+ }
295
+ async function flushAllClients() {
296
+ await Promise.all(Array.from(CLIENT_REGISTRY, (client) => client.flushWithTimeout()));
297
+ }
298
+ function jitterMs(minMs, maxMs) {
299
+ return Math.floor(Math.random() * (maxMs - minMs + 1)) + minMs;
300
+ }
301
+ function waitMs(delayMs) {
302
+ return new Promise((resolve) => {
303
+ setTimeout(resolve, delayMs);
304
+ });
305
+ }
@@ -0,0 +1,15 @@
1
+ export declare const sdkNodeConfig: {
2
+ readonly defaultBaseUrl: "http://localhost:8000";
3
+ readonly defaultEnvironment: "dev";
4
+ readonly defaultFlushIntervalMs: 1000;
5
+ readonly defaultMaxQueueSize: 1000;
6
+ readonly defaultFlushTimeoutMs: 500;
7
+ readonly defaultRequestTimeoutMs: 1000;
8
+ readonly defaultProtectFailMode: "open";
9
+ readonly internalProtectDecisionTimeoutMs: 150;
10
+ readonly retryDelayMinMs: 200;
11
+ readonly retryDelayMaxMs: 400;
12
+ readonly defaultTokenizerEncoding: "cl100k_base";
13
+ readonly maxInputTokenEstimate: 50000;
14
+ readonly supportedProviders: readonly ["openai", "anthropic", "google"];
15
+ };
package/dist/config.js ADDED
@@ -0,0 +1,15 @@
1
+ export const sdkNodeConfig = {
2
+ defaultBaseUrl: "http://localhost:8000",
3
+ defaultEnvironment: "dev",
4
+ defaultFlushIntervalMs: 1000,
5
+ defaultMaxQueueSize: 1000,
6
+ defaultFlushTimeoutMs: 500,
7
+ defaultRequestTimeoutMs: 1000,
8
+ defaultProtectFailMode: "open",
9
+ internalProtectDecisionTimeoutMs: 150,
10
+ retryDelayMinMs: 200,
11
+ retryDelayMaxMs: 400,
12
+ defaultTokenizerEncoding: "cl100k_base",
13
+ maxInputTokenEstimate: 50_000,
14
+ supportedProviders: ["openai", "anthropic", "google"],
15
+ };
@@ -0,0 +1,3 @@
1
+ export declare class CostCalculator {
2
+ calculate(_usage: Record<string, unknown>): number;
3
+ }
@@ -0,0 +1,6 @@
1
+ export class CostCalculator {
2
+ calculate(_usage) {
3
+ // TODO: Implement provider-aware deterministic cost calculation.
4
+ return 0;
5
+ }
6
+ }
@@ -0,0 +1,35 @@
1
+ export interface EventRequest {
2
+ endpoint?: string;
3
+ feature?: string;
4
+ input_tokens?: number;
5
+ input_tokens_estimate?: number;
6
+ max_output_tokens?: number;
7
+ protect_decision?: string;
8
+ protect_reason?: string;
9
+ }
10
+ export interface EventResponse {
11
+ http_status?: number;
12
+ latency_ms?: number;
13
+ total_tokens?: number;
14
+ error_type?: string;
15
+ }
16
+ export interface EventPayload {
17
+ ts: string;
18
+ provider: string;
19
+ model: string | null;
20
+ environment: string;
21
+ request: EventRequest;
22
+ response: EventResponse;
23
+ }
24
+ export interface BuildEventInput {
25
+ provider: string;
26
+ model?: string | null;
27
+ environment?: string;
28
+ ts?: string;
29
+ request?: EventRequest;
30
+ response?: EventResponse;
31
+ }
32
+ export declare function buildEvent(input: BuildEventInput): EventPayload;
33
+ export declare class EventBuilder {
34
+ build(payload: BuildEventInput): EventPayload;
35
+ }
@@ -0,0 +1,15 @@
1
+ export function buildEvent(input) {
2
+ return {
3
+ ts: input.ts ?? new Date().toISOString(),
4
+ provider: input.provider,
5
+ model: input.model ?? null,
6
+ environment: input.environment ?? "dev",
7
+ request: input.request ?? {},
8
+ response: input.response ?? {},
9
+ };
10
+ }
11
+ export class EventBuilder {
12
+ build(payload) {
13
+ return buildEvent(payload);
14
+ }
15
+ }
@@ -0,0 +1,12 @@
1
+ export interface JsonRequestOptions {
2
+ method: "GET" | "POST";
3
+ headers?: Record<string, string>;
4
+ body?: string;
5
+ signal?: AbortSignal;
6
+ }
7
+ export interface JsonHttpResponse {
8
+ ok: boolean;
9
+ status: number;
10
+ json(): Promise<unknown>;
11
+ }
12
+ export declare function requestJson(url: string, options: JsonRequestOptions): Promise<JsonHttpResponse>;