@trustgateai/sdk 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,59 @@
1
+ import { T as TrustGateConfig } from '../types-BxyYog9f.mjs';
2
+
3
+ /**
4
+ * Vercel AI SDK middleware: swap the provider to TrustGate with one line.
5
+ *
6
+ * Use createOpenAI from @openai/api or the AI SDK's OpenAI provider, and point
7
+ * baseURL to your TrustGate gateway. This ensures all LLM calls go through
8
+ * TrustGate with workflow context for Shadow AI detection.
9
+ *
10
+ * Example:
11
+ *
12
+ * import { createOpenAI } from '@ai-sdk/openai';
13
+ * import { createTrustGateOpenAI } from 'trustgate-node/middleware/vercel-ai';
14
+ *
15
+ * const openai = createTrustGateOpenAI({
16
+ * baseUrl: process.env.TRUSTGATE_BASE_URL!,
17
+ * apiKey: process.env.TRUSTGATE_API_KEY,
18
+ * });
19
+ *
20
+ * // Then use `openai` with streamText() as usual — requests go through TrustGate.
21
+ * const result = streamText({ model: openai('gpt-4'), messages, ... });
22
+ */
23
+
24
+ interface TrustGateOpenAIOptions extends TrustGateConfig {
25
+ /**
26
+ * Optional OpenAI-compatible API key (if your gateway expects it in addition to TrustGate key).
27
+ */
28
+ openaiApiKey?: string;
29
+ }
30
+ /**
31
+ * Returns extra headers (lowercase keys) to pass to the Vercel AI SDK OpenAI provider
32
+ * so that requests sent to TrustGate include workflow context and x-trustgate-source-tool
33
+ * JSON for Shadow AI detection. Use with createOpenAI({ baseURL, headers: getTrustGateHeaders() }).
34
+ */
35
+ declare function getTrustGateHeaders(workflowContext?: TrustGateConfig["workflowContext"]): Record<string, string>;
36
+ /**
37
+ * Creates options suitable for the Vercel AI SDK's OpenAI provider so that
38
+ * you can swap the provider to TrustGate in one line.
39
+ *
40
+ * Usage with AI SDK 3.x:
41
+ *
42
+ * import { createOpenAI } from '@ai-sdk/openai';
43
+ * import { createTrustGateOpenAIOptions } from 'trustgate-node/middleware/vercel-ai';
44
+ *
45
+ * const openaiOptions = createTrustGateOpenAIOptions({
46
+ * baseUrl: process.env.TRUSTGATE_BASE_URL!,
47
+ * apiKey: process.env.TRUSTGATE_API_KEY,
48
+ * });
49
+ *
50
+ * const openai = createOpenAI(openaiOptions);
51
+ * // use openai('gpt-4') with streamText()
52
+ */
53
+ declare function createTrustGateOpenAIOptions(options: TrustGateOpenAIOptions): {
54
+ baseURL: string;
55
+ apiKey?: string;
56
+ headers?: Record<string, string>;
57
+ };
58
+
59
+ export { type TrustGateOpenAIOptions, createTrustGateOpenAIOptions, getTrustGateHeaders };
@@ -0,0 +1,59 @@
1
+ import { T as TrustGateConfig } from '../types-BxyYog9f.js';
2
+
3
+ /**
4
+ * Vercel AI SDK middleware: swap the provider to TrustGate with one line.
5
+ *
6
+ * Use createOpenAI from @openai/api or the AI SDK's OpenAI provider, and point
7
+ * baseURL to your TrustGate gateway. This ensures all LLM calls go through
8
+ * TrustGate with workflow context for Shadow AI detection.
9
+ *
10
+ * Example:
11
+ *
12
+ * import { createOpenAI } from '@ai-sdk/openai';
13
+ * import { createTrustGateOpenAI } from 'trustgate-node/middleware/vercel-ai';
14
+ *
15
+ * const openai = createTrustGateOpenAI({
16
+ * baseUrl: process.env.TRUSTGATE_BASE_URL!,
17
+ * apiKey: process.env.TRUSTGATE_API_KEY,
18
+ * });
19
+ *
20
+ * // Then use `openai` with streamText() as usual — requests go through TrustGate.
21
+ * const result = streamText({ model: openai('gpt-4'), messages, ... });
22
+ */
23
+
24
+ interface TrustGateOpenAIOptions extends TrustGateConfig {
25
+ /**
26
+ * Optional OpenAI-compatible API key (if your gateway expects it in addition to TrustGate key).
27
+ */
28
+ openaiApiKey?: string;
29
+ }
30
+ /**
31
+ * Returns extra headers (lowercase keys) to pass to the Vercel AI SDK OpenAI provider
32
+ * so that requests sent to TrustGate include workflow context and x-trustgate-source-tool
33
+ * JSON for Shadow AI detection. Use with createOpenAI({ baseURL, headers: getTrustGateHeaders() }).
34
+ */
35
+ declare function getTrustGateHeaders(workflowContext?: TrustGateConfig["workflowContext"]): Record<string, string>;
36
+ /**
37
+ * Creates options suitable for the Vercel AI SDK's OpenAI provider so that
38
+ * you can swap the provider to TrustGate in one line.
39
+ *
40
+ * Usage with AI SDK 3.x:
41
+ *
42
+ * import { createOpenAI } from '@ai-sdk/openai';
43
+ * import { createTrustGateOpenAIOptions } from 'trustgate-node/middleware/vercel-ai';
44
+ *
45
+ * const openaiOptions = createTrustGateOpenAIOptions({
46
+ * baseUrl: process.env.TRUSTGATE_BASE_URL!,
47
+ * apiKey: process.env.TRUSTGATE_API_KEY,
48
+ * });
49
+ *
50
+ * const openai = createOpenAI(openaiOptions);
51
+ * // use openai('gpt-4') with streamText()
52
+ */
53
+ declare function createTrustGateOpenAIOptions(options: TrustGateOpenAIOptions): {
54
+ baseURL: string;
55
+ apiKey?: string;
56
+ headers?: Record<string, string>;
57
+ };
58
+
59
+ export { type TrustGateOpenAIOptions, createTrustGateOpenAIOptions, getTrustGateHeaders };
@@ -0,0 +1,158 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/middleware/vercel-ai.ts
21
+ var vercel_ai_exports = {};
22
+ __export(vercel_ai_exports, {
23
+ createTrustGateOpenAIOptions: () => createTrustGateOpenAIOptions,
24
+ getTrustGateHeaders: () => getTrustGateHeaders
25
+ });
26
+ module.exports = __toCommonJS(vercel_ai_exports);
27
+
28
+ // src/version.ts
29
+ var SDK_VERSION = "1.0.0";
30
+
31
+ // src/context.ts
32
+ var HEADER_PREFIX = "x-trustgate";
33
+ var SOURCE_TOOL_HEADER = "x-trustgate-source-tool";
34
+ var N8N_KEYS = [
35
+ "N8N_WORKFLOW_ID",
36
+ "N8N_EXECUTION_ID",
37
+ "N8N_WORKFLOW_NAME",
38
+ "N8N_EXECUTION_MODE"
39
+ ];
40
+ var VERCEL_KEYS = [
41
+ "VERCEL",
42
+ "VERCEL_ENV",
43
+ "VERCEL_URL",
44
+ "VERCEL_GIT_COMMIT_REF",
45
+ "VERCEL_GIT_REPO_ID",
46
+ "VERCEL_GIT_COMMIT_SHA"
47
+ ];
48
+ var GITHUB_KEYS = [
49
+ "GITHUB_ACTION",
50
+ "GITHUB_ACTIONS",
51
+ "GITHUB_WORKFLOW",
52
+ "GITHUB_RUN_ID",
53
+ "GITHUB_RUN_NUMBER",
54
+ "GITHUB_REPOSITORY",
55
+ "GITHUB_SHA"
56
+ ];
57
+ function pickEnv(keys) {
58
+ const out = {};
59
+ for (const key of keys) {
60
+ const v = process.env[key];
61
+ if (v !== void 0 && v !== "") out[key] = v;
62
+ }
63
+ return out;
64
+ }
65
+ function getBaseMetadata() {
66
+ return {
67
+ sdk_version: SDK_VERSION,
68
+ node_version: process.version,
69
+ os: process.platform,
70
+ arch: process.arch
71
+ };
72
+ }
73
+ function getWorkflowContextFromEnv() {
74
+ const baseMeta = getBaseMetadata();
75
+ const n8n = pickEnv(N8N_KEYS);
76
+ if (Object.keys(n8n).length > 0) {
77
+ return {
78
+ source: "n8n",
79
+ workflow_id: n8n.N8N_WORKFLOW_ID,
80
+ execution_id: n8n.N8N_EXECUTION_ID,
81
+ metadata: { ...baseMeta, ...n8n }
82
+ };
83
+ }
84
+ const vercel = pickEnv(VERCEL_KEYS);
85
+ if (Object.keys(vercel).length > 0) {
86
+ return {
87
+ source: "vercel",
88
+ workflow_id: vercel.VERCEL_GIT_REPO_ID || vercel.VERCEL_URL,
89
+ execution_id: vercel.VERCEL_GIT_COMMIT_SHA,
90
+ metadata: { ...baseMeta, ...vercel }
91
+ };
92
+ }
93
+ const github = pickEnv(GITHUB_KEYS);
94
+ if (Object.keys(github).length > 0) {
95
+ return {
96
+ source: "github",
97
+ workflow_id: github.GITHUB_WORKFLOW || github.GITHUB_REPOSITORY,
98
+ execution_id: github.GITHUB_RUN_ID || github.GITHUB_RUN_NUMBER,
99
+ metadata: { ...baseMeta, ...github }
100
+ };
101
+ }
102
+ return {
103
+ source: "local_script",
104
+ metadata: baseMeta
105
+ };
106
+ }
107
+ function buildSourceToolJson(ctx) {
108
+ const meta = ctx.metadata ?? getBaseMetadata();
109
+ const payload = {
110
+ source: ctx.source,
111
+ metadata: meta
112
+ };
113
+ if (ctx.workflow_id !== void 0) payload.workflow_id = ctx.workflow_id;
114
+ if (ctx.execution_id !== void 0) payload.execution_id = ctx.execution_id;
115
+ if (ctx.workflow_step !== void 0) payload.workflow_step = ctx.workflow_step;
116
+ return JSON.stringify(payload);
117
+ }
118
+ function buildContextHeaders(ctx) {
119
+ const meta = ctx.metadata ?? getBaseMetadata();
120
+ const h = {
121
+ [SOURCE_TOOL_HEADER]: buildSourceToolJson({ ...ctx, metadata: meta }),
122
+ [`${HEADER_PREFIX}-source`]: ctx.source
123
+ };
124
+ if (ctx.workflow_id) h[`${HEADER_PREFIX}-workflow-id`] = ctx.workflow_id;
125
+ if (ctx.execution_id) h[`${HEADER_PREFIX}-execution-id`] = ctx.execution_id;
126
+ if (ctx.workflow_step) h[`${HEADER_PREFIX}-workflow-step`] = ctx.workflow_step;
127
+ for (const [k, v] of Object.entries(meta)) {
128
+ if (v) h[`${HEADER_PREFIX}-meta-${k.toLowerCase()}`] = String(v);
129
+ }
130
+ return h;
131
+ }
132
+
133
+ // src/middleware/vercel-ai.ts
134
+ function getTrustGateHeaders(workflowContext) {
135
+ const ctx = workflowContext ?? getWorkflowContextFromEnv();
136
+ return buildContextHeaders(ctx);
137
+ }
138
+ function createTrustGateOpenAIOptions(options) {
139
+ const baseUrl = options.baseUrl.replace(/\/$/, "");
140
+ const headers = {
141
+ ...options.headers,
142
+ ...getTrustGateHeaders(options.workflowContext)
143
+ };
144
+ if (options.apiKey) {
145
+ headers["authorization"] = `Bearer ${options.apiKey}`;
146
+ headers["x-api-key"] = options.apiKey;
147
+ }
148
+ return {
149
+ baseURL: baseUrl,
150
+ apiKey: options.openaiApiKey ?? options.apiKey,
151
+ headers: Object.keys(headers).length ? headers : void 0
152
+ };
153
+ }
154
+ // Annotate the CommonJS export names for ESM import in node:
155
+ 0 && (module.exports = {
156
+ createTrustGateOpenAIOptions,
157
+ getTrustGateHeaders
158
+ });
@@ -0,0 +1,9 @@
1
+ import {
2
+ createTrustGateOpenAIOptions,
3
+ getTrustGateHeaders
4
+ } from "../chunk-UUGIIO3Q.mjs";
5
+ import "../chunk-SV5FTXAH.mjs";
6
+ export {
7
+ createTrustGateOpenAIOptions,
8
+ getTrustGateHeaders
9
+ };
@@ -0,0 +1,69 @@
1
+ /**
2
+ * Runtime metadata included in every context for parity with the Python SDK.
3
+ * Sent in x-trustgate-source-tool and in x-trustgate-meta-* headers.
4
+ */
5
+ interface RuntimeMetadata {
6
+ /** SDK version (e.g. "1.0.0"). */
7
+ sdk_version: string;
8
+ /** Node.js version (e.g. "v20.1.0"). */
9
+ node_version: string;
10
+ /** Operating system platform (process.platform, e.g. "win32", "darwin", "linux"). */
11
+ os: string;
12
+ /** CPU architecture (process.arch, e.g. "x64", "arm64"). */
13
+ arch: string;
14
+ [key: string]: string;
15
+ }
16
+ /**
17
+ * Workflow/source metadata for Shadow AI detection and observability.
18
+ * Populated from n8n, Vercel, GitHub Actions, or default "local_script".
19
+ * Metadata always includes sdk_version, node_version, os, arch for Python parity.
20
+ */
21
+ interface WorkflowContext {
22
+ /** Source: "n8n" | "vercel" | "github" | "local_script" | "custom". Never null. */
23
+ source: string;
24
+ /** Workflow or execution identifier */
25
+ workflow_id?: string;
26
+ /** Execution or run ID */
27
+ execution_id?: string;
28
+ /** Step identifier; key for generating Gantt chart bars in the Agent Intelligence dashboard. */
29
+ workflow_step?: string;
30
+ /** Runtime + environment metadata. When omitted, SDK fills sdk_version, node_version, os, arch. */
31
+ metadata?: RuntimeMetadata;
32
+ }
33
+ /**
34
+ * TrustGate client configuration.
35
+ */
36
+ interface TrustGateConfig {
37
+ /** TrustGate gateway base URL (e.g. https://gateway.example.com) */
38
+ baseUrl: string;
39
+ /** API key for gateway authentication (if required) */
40
+ apiKey?: string;
41
+ /** Override workflow context; if not set, context is derived from process.env (never null). */
42
+ workflowContext?: WorkflowContext;
43
+ /** Extra headers sent with every request */
44
+ headers?: Record<string, string>;
45
+ }
46
+ /**
47
+ * Options for a single request (merge with client config).
48
+ */
49
+ interface RequestOptions {
50
+ /** Override workflow context for this request only */
51
+ workflowContext?: WorkflowContext;
52
+ /** Additional headers for this request */
53
+ headers?: Record<string, string>;
54
+ /** Request timeout in ms */
55
+ timeout?: number;
56
+ }
57
+ /**
58
+ * Pre-formatted headers for n8n HTTP Request node (name/value pairs).
59
+ */
60
+ interface N8nHeaderSnippet {
61
+ headers: Array<{
62
+ name: string;
63
+ value: string;
64
+ }>;
65
+ /** JSON string suitable for pasting into n8n */
66
+ json: string;
67
+ }
68
+
69
+ export type { N8nHeaderSnippet as N, RequestOptions as R, TrustGateConfig as T, WorkflowContext as W, RuntimeMetadata as a };
@@ -0,0 +1,69 @@
1
+ /**
2
+ * Runtime metadata included in every context for parity with the Python SDK.
3
+ * Sent in x-trustgate-source-tool and in x-trustgate-meta-* headers.
4
+ */
5
+ interface RuntimeMetadata {
6
+ /** SDK version (e.g. "1.0.0"). */
7
+ sdk_version: string;
8
+ /** Node.js version (e.g. "v20.1.0"). */
9
+ node_version: string;
10
+ /** Operating system platform (process.platform, e.g. "win32", "darwin", "linux"). */
11
+ os: string;
12
+ /** CPU architecture (process.arch, e.g. "x64", "arm64"). */
13
+ arch: string;
14
+ [key: string]: string;
15
+ }
16
+ /**
17
+ * Workflow/source metadata for Shadow AI detection and observability.
18
+ * Populated from n8n, Vercel, GitHub Actions, or default "local_script".
19
+ * Metadata always includes sdk_version, node_version, os, arch for Python parity.
20
+ */
21
+ interface WorkflowContext {
22
+ /** Source: "n8n" | "vercel" | "github" | "local_script" | "custom". Never null. */
23
+ source: string;
24
+ /** Workflow or execution identifier */
25
+ workflow_id?: string;
26
+ /** Execution or run ID */
27
+ execution_id?: string;
28
+ /** Step identifier; key for generating Gantt chart bars in the Agent Intelligence dashboard. */
29
+ workflow_step?: string;
30
+ /** Runtime + environment metadata. When omitted, SDK fills sdk_version, node_version, os, arch. */
31
+ metadata?: RuntimeMetadata;
32
+ }
33
+ /**
34
+ * TrustGate client configuration.
35
+ */
36
+ interface TrustGateConfig {
37
+ /** TrustGate gateway base URL (e.g. https://gateway.example.com) */
38
+ baseUrl: string;
39
+ /** API key for gateway authentication (if required) */
40
+ apiKey?: string;
41
+ /** Override workflow context; if not set, context is derived from process.env (never null). */
42
+ workflowContext?: WorkflowContext;
43
+ /** Extra headers sent with every request */
44
+ headers?: Record<string, string>;
45
+ }
46
+ /**
47
+ * Options for a single request (merge with client config).
48
+ */
49
+ interface RequestOptions {
50
+ /** Override workflow context for this request only */
51
+ workflowContext?: WorkflowContext;
52
+ /** Additional headers for this request */
53
+ headers?: Record<string, string>;
54
+ /** Request timeout in ms */
55
+ timeout?: number;
56
+ }
57
+ /**
58
+ * Pre-formatted headers for n8n HTTP Request node (name/value pairs).
59
+ */
60
+ interface N8nHeaderSnippet {
61
+ headers: Array<{
62
+ name: string;
63
+ value: string;
64
+ }>;
65
+ /** JSON string suitable for pasting into n8n */
66
+ json: string;
67
+ }
68
+
69
+ export type { N8nHeaderSnippet as N, RequestOptions as R, TrustGateConfig as T, WorkflowContext as W, RuntimeMetadata as a };
package/package.json ADDED
@@ -0,0 +1,56 @@
1
+ {
2
+ "name": "@trustgateai/sdk",
3
+ "version": "1.0.0",
4
+ "description": "TrustGate Node.js SDK with Shadow AI detection and workflow context",
5
+ "main": "dist/index.js",
6
+ "module": "dist/index.mjs",
7
+ "types": "dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.mjs",
12
+ "require": "./dist/index.js"
13
+ },
14
+ "./middleware/vercel-ai": {
15
+ "types": "./dist/middleware/vercel-ai.d.ts",
16
+ "import": "./dist/middleware/vercel-ai.mjs",
17
+ "require": "./dist/middleware/vercel-ai.js"
18
+ },
19
+ "./middleware/langchain": {
20
+ "types": "./dist/middleware/langchain.d.ts",
21
+ "import": "./dist/middleware/langchain.mjs",
22
+ "require": "./dist/middleware/langchain.js"
23
+ }
24
+ },
25
+ "scripts": {
26
+ "build": "tsup src/index.ts src/middleware/vercel-ai.ts src/middleware/langchain.ts --format cjs,esm --dts --clean",
27
+ "dev": "tsup src/index.ts src/middleware/vercel-ai.ts src/middleware/langchain.ts --format cjs,esm --dts --watch"
28
+ },
29
+ "keywords": [
30
+ "trustgate",
31
+ "neuraltrust",
32
+ "shadow-ai",
33
+ "ai-gateway",
34
+ "n8n",
35
+ "vercel",
36
+ "langchain"
37
+ ],
38
+ "author": "",
39
+ "license": "MIT",
40
+ "devDependencies": {
41
+ "@types/node": "^20.10.0",
42
+ "tsup": "^8.0.0",
43
+ "typescript": "^5.3.0"
44
+ },
45
+ "peerDependencies": {
46
+ "ai": "^3.0.0",
47
+ "@langchain/openai": "^0.2.0"
48
+ },
49
+ "peerDependenciesMeta": {
50
+ "ai": { "optional": true },
51
+ "@langchain/openai": { "optional": true }
52
+ },
53
+ "engines": {
54
+ "node": ">=18"
55
+ }
56
+ }
package/src/client.ts ADDED
@@ -0,0 +1,125 @@
1
+ import type { TrustGateConfig, RequestOptions, WorkflowContext } from "./types.js";
2
+ import { getWorkflowContextFromEnv, buildContextHeaders } from "./context.js";
3
+
4
+ function resolveContext(config: TrustGateConfig, options?: RequestOptions): WorkflowContext {
5
+ return (
6
+ options?.workflowContext ??
7
+ config.workflowContext ??
8
+ getWorkflowContextFromEnv()
9
+ );
10
+ }
11
+
12
+ function mergeHeaders(
13
+ config: TrustGateConfig,
14
+ options?: RequestOptions,
15
+ context?: WorkflowContext
16
+ ): Record<string, string> {
17
+ const out: Record<string, string> = {
18
+ "content-type": "application/json",
19
+ ...config.headers,
20
+ ...options?.headers,
21
+ };
22
+ if (config.apiKey) {
23
+ out["authorization"] = `Bearer ${config.apiKey}`;
24
+ out["x-api-key"] = config.apiKey;
25
+ }
26
+ if (context) {
27
+ Object.assign(out, buildContextHeaders(context));
28
+ }
29
+ return out;
30
+ }
31
+
32
+ /**
33
+ * TrustGate Node.js client. Forwards requests to the TrustGate gateway with
34
+ * workflow context for Shadow AI detection and observability.
35
+ *
36
+ * Context is sent in lowercase headers (e.g. x-trustgate-source) and as JSON
37
+ * in x-trustgate-source-tool for parity with the Python SDK. The `workflow_step`
38
+ * field in context is the key used to generate Gantt chart bars in the
39
+ * Agent Intelligence dashboard.
40
+ */
41
+ export class TrustGate {
42
+ private readonly config: TrustGateConfig;
43
+
44
+ constructor(config: TrustGateConfig) {
45
+ const baseUrl = config.baseUrl.replace(/\/$/, "");
46
+ this.config = { ...config, baseUrl };
47
+ }
48
+
49
+ /** Base URL of the TrustGate gateway. */
50
+ get baseUrl(): string {
51
+ return this.config.baseUrl;
52
+ }
53
+
54
+ /**
55
+ * Get the current effective workflow context (from config override or process.env).
56
+ * Never null; defaults to source "local_script" when no managed env is detected.
57
+ */
58
+ getWorkflowContext(options?: RequestOptions): WorkflowContext {
59
+ return resolveContext(this.config, options);
60
+ }
61
+
62
+ /**
63
+ * Perform a non-streaming JSON request through TrustGate.
64
+ */
65
+ async fetch(path: string, init: RequestInit = {}, options?: RequestOptions): Promise<Response> {
66
+ const context = resolveContext(this.config, options);
67
+ const url = path.startsWith("http") ? path : `${this.config.baseUrl}/${path.replace(/^\//, "")}`;
68
+ const headers = mergeHeaders(this.config, options, context);
69
+ const timeout = options?.timeout ?? 60_000;
70
+
71
+ const controller = new AbortController();
72
+ const id = setTimeout(() => controller.abort(), timeout);
73
+ try {
74
+ const res = await fetch(url, {
75
+ ...init,
76
+ headers: { ...headers, ...(init.headers as Record<string, string>) },
77
+ signal: init.signal ?? controller.signal,
78
+ });
79
+ return res;
80
+ } finally {
81
+ clearTimeout(id);
82
+ }
83
+ }
84
+
85
+ /**
86
+ * Perform a streaming request (SSE). Forwards the response stream and ensures
87
+ * workflow_id (and other context) is sent in the initial request so the
88
+ * background task is attributed correctly. Set Accept: text/event-stream
89
+ * when calling for SSE endpoints.
90
+ */
91
+ async fetchStream(
92
+ path: string,
93
+ init: RequestInit = {},
94
+ options?: RequestOptions
95
+ ): Promise<Response> {
96
+ const context = resolveContext(this.config, options);
97
+ const url = path.startsWith("http") ? path : `${this.config.baseUrl}/${path.replace(/^\//, "")}`;
98
+ const headers = mergeHeaders(this.config, options, context);
99
+ const mergedHeaders = {
100
+ accept: "text/event-stream",
101
+ ...headers,
102
+ ...(init.headers as Record<string, string>),
103
+ };
104
+
105
+ const res = await fetch(url, {
106
+ ...init,
107
+ headers: mergedHeaders,
108
+ signal: init.signal,
109
+ });
110
+
111
+ if (!res.ok || !res.body) {
112
+ return res;
113
+ }
114
+
115
+ if (!res.headers.get("content-type")?.includes("text/event-stream")) {
116
+ return res;
117
+ }
118
+
119
+ return new Response(res.body, {
120
+ status: res.status,
121
+ statusText: res.statusText,
122
+ headers: res.headers,
123
+ });
124
+ }
125
+ }