@trustgateai/sdk 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,124 @@
1
+ # trustgate-node
2
+
3
+ TrustGate Node.js SDK with **Shadow AI** detection: workflow context, middleware for Vercel AI SDK and LangChain, n8n header helper, and SSE streaming.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ npm install trustgate-node
9
+ ```
10
+
11
+ Optional peer dependencies for middleware:
12
+
13
+ ```bash
14
+ npm install ai @ai-sdk/openai # Vercel AI SDK
15
+ npm install @langchain/openai # LangChain
16
+ ```
17
+
18
+ ## Core client
19
+
20
+ ```ts
21
+ import { TrustGate, getWorkflowContextFromEnv } from 'trustgate-node';
22
+
23
+ const client = new TrustGate({
24
+ baseUrl: process.env.TRUSTGATE_BASE_URL!,
25
+ apiKey: process.env.TRUSTGATE_API_KEY,
26
+ // workflowContext optional; otherwise read from process.env (n8n, Vercel, GitHub)
27
+ });
28
+
29
+ // Non-streaming
30
+ const res = await client.fetch('/v1/chat/completions', {
31
+ method: 'POST',
32
+ body: JSON.stringify({ model: 'gpt-4', messages: [...] }),
33
+ });
34
+
35
+ // SSE streaming (workflow_id sent in request for background attribution)
36
+ const streamRes = await client.fetchStream('/v1/chat/completions', {
37
+ method: 'POST',
38
+ body: JSON.stringify({ model: 'gpt-4', messages: [...], stream: true }),
39
+ });
40
+ // Consume streamRes.body (ReadableStream) as usual for SSE
41
+ ```
42
+
43
+ ## Context provider
44
+
45
+ Workflow metadata is read from `process.env` so TrustGate can attribute traffic (Shadow AI detection):
46
+
47
+ | Source | Example env vars |
48
+ |---------|-------------------|
49
+ | **n8n** | `N8N_WORKFLOW_ID`, `N8N_EXECUTION_ID`, `N8N_WORKFLOW_NAME` |
50
+ | **Vercel** | `VERCEL`, `VERCEL_ENV`, `VERCEL_URL`, `VERCEL_GIT_*` |
51
+ | **GitHub** | `GITHUB_ACTION`, `GITHUB_WORKFLOW`, `GITHUB_RUN_ID`, `GITHUB_REPOSITORY` |
52
+
53
+ ```ts
54
+ import { getWorkflowContextFromEnv } from 'trustgate-node';
55
+
56
+ const ctx = getWorkflowContextFromEnv();
57
+ // { source: 'n8n', workflow_id: '...', execution_id: '...', metadata: {...} }
58
+ ```
59
+
60
+ ## Middleware: Vercel AI SDK
61
+
62
+ Point the OpenAI provider at TrustGate so all calls go through the gateway with workflow context:
63
+
64
+ ```ts
65
+ import { createOpenAI } from '@ai-sdk/openai';
66
+ import { createTrustGateOpenAIOptions } from 'trustgate-node/middleware/vercel-ai';
67
+
68
+ const openai = createOpenAI(
69
+ createTrustGateOpenAIOptions({
70
+ baseUrl: process.env.TRUSTGATE_BASE_URL!,
71
+ apiKey: process.env.TRUSTGATE_API_KEY,
72
+ })
73
+ );
74
+
75
+ // Use with streamText / generateText as usual
76
+ import { streamText } from 'ai';
77
+ const result = streamText({
78
+ model: openai('gpt-4'),
79
+ messages: [{ role: 'user', content: 'Hello' }],
80
+ });
81
+ ```
82
+
83
+ ## Middleware: LangChain
84
+
85
+ Use TrustGate as the OpenAI endpoint for LangChain with one config object:
86
+
87
+ ```ts
88
+ import { ChatOpenAI } from '@langchain/openai';
89
+ import { createTrustGateLangChainConfig } from 'trustgate-node/middleware/langchain';
90
+
91
+ const config = createTrustGateLangChainConfig({
92
+ baseUrl: process.env.TRUSTGATE_BASE_URL!,
93
+ apiKey: process.env.TRUSTGATE_API_KEY,
94
+ });
95
+
96
+ const llm = new ChatOpenAI({
97
+ ...config,
98
+ modelName: 'gpt-4',
99
+ temperature: 0,
100
+ });
101
+ ```
102
+
103
+ ## n8n HTTP Request node: header snippet
104
+
105
+ Get a pre-formatted JSON object of headers for n8n “HTTP Request” nodes so each request includes TrustGate auth and workflow context:
106
+
107
+ ```ts
108
+ import { getN8nHeaderSnippet } from 'trustgate-node';
109
+
110
+ const { headers, json } = getN8nHeaderSnippet();
111
+ // headers: [ { name: 'X-Trustgate-Source', value: 'n8n' }, ... ]
112
+ // json: '{"X-Trustgate-Source":"n8n","X-Trustgate-Workflow-Id":"...", ...}'
113
+ ```
114
+
115
+ In n8n, set **Send Headers** (or equivalent) to the `headers` array or paste the `json` into your node config. You can pass `apiKey` and `baseUrl` explicitly or rely on `TRUSTGATE_API_KEY` and `TRUSTGATE_BASE_URL` in the environment.
116
+
117
+ ## Streaming (SSE)
118
+
119
+ - Use `client.fetchStream(path, init, options)` for SSE endpoints. The client sets `Accept: text/event-stream` and sends workflow context (e.g. `workflow_id`) in the **initial request**, so the gateway can attribute the background stream.
120
+ - The returned `Response` body is the raw stream; consume it with `response.body.getReader()` or pass through to your framework’s SSE handling.
121
+
122
+ ## License
123
+
124
+ MIT
@@ -0,0 +1,30 @@
1
+ import {
2
+ buildContextHeaders,
3
+ getWorkflowContextFromEnv
4
+ } from "./chunk-SV5FTXAH.mjs";
5
+
6
+ // src/middleware/langchain.ts
7
+ function createTrustGateLangChainConfig(config) {
8
+ const baseUrl = config.baseUrl.replace(/\/$/, "");
9
+ const ctx = config.workflowContext ?? getWorkflowContextFromEnv();
10
+ const headers = {
11
+ ...config.headers,
12
+ ...buildContextHeaders(ctx)
13
+ };
14
+ if (config.apiKey) {
15
+ headers["authorization"] = `Bearer ${config.apiKey}`;
16
+ headers["x-api-key"] = config.apiKey;
17
+ }
18
+ return {
19
+ baseURL: baseUrl,
20
+ defaultHeaders: Object.keys(headers).length ? headers : void 0,
21
+ configuration: {
22
+ baseURL: baseUrl,
23
+ defaultHeaders: Object.keys(headers).length ? headers : void 0
24
+ }
25
+ };
26
+ }
27
+
28
+ export {
29
+ createTrustGateLangChainConfig
30
+ };
@@ -0,0 +1,112 @@
1
+ // src/version.ts
2
+ var SDK_VERSION = "1.0.0";
3
+
4
+ // src/context.ts
5
+ var HEADER_PREFIX = "x-trustgate";
6
+ var SOURCE_TOOL_HEADER = "x-trustgate-source-tool";
7
+ var N8N_KEYS = [
8
+ "N8N_WORKFLOW_ID",
9
+ "N8N_EXECUTION_ID",
10
+ "N8N_WORKFLOW_NAME",
11
+ "N8N_EXECUTION_MODE"
12
+ ];
13
+ var VERCEL_KEYS = [
14
+ "VERCEL",
15
+ "VERCEL_ENV",
16
+ "VERCEL_URL",
17
+ "VERCEL_GIT_COMMIT_REF",
18
+ "VERCEL_GIT_REPO_ID",
19
+ "VERCEL_GIT_COMMIT_SHA"
20
+ ];
21
+ var GITHUB_KEYS = [
22
+ "GITHUB_ACTION",
23
+ "GITHUB_ACTIONS",
24
+ "GITHUB_WORKFLOW",
25
+ "GITHUB_RUN_ID",
26
+ "GITHUB_RUN_NUMBER",
27
+ "GITHUB_REPOSITORY",
28
+ "GITHUB_SHA"
29
+ ];
30
+ function pickEnv(keys) {
31
+ const out = {};
32
+ for (const key of keys) {
33
+ const v = process.env[key];
34
+ if (v !== void 0 && v !== "") out[key] = v;
35
+ }
36
+ return out;
37
+ }
38
+ function getBaseMetadata() {
39
+ return {
40
+ sdk_version: SDK_VERSION,
41
+ node_version: process.version,
42
+ os: process.platform,
43
+ arch: process.arch
44
+ };
45
+ }
46
+ function getWorkflowContextFromEnv() {
47
+ const baseMeta = getBaseMetadata();
48
+ const n8n = pickEnv(N8N_KEYS);
49
+ if (Object.keys(n8n).length > 0) {
50
+ return {
51
+ source: "n8n",
52
+ workflow_id: n8n.N8N_WORKFLOW_ID,
53
+ execution_id: n8n.N8N_EXECUTION_ID,
54
+ metadata: { ...baseMeta, ...n8n }
55
+ };
56
+ }
57
+ const vercel = pickEnv(VERCEL_KEYS);
58
+ if (Object.keys(vercel).length > 0) {
59
+ return {
60
+ source: "vercel",
61
+ workflow_id: vercel.VERCEL_GIT_REPO_ID || vercel.VERCEL_URL,
62
+ execution_id: vercel.VERCEL_GIT_COMMIT_SHA,
63
+ metadata: { ...baseMeta, ...vercel }
64
+ };
65
+ }
66
+ const github = pickEnv(GITHUB_KEYS);
67
+ if (Object.keys(github).length > 0) {
68
+ return {
69
+ source: "github",
70
+ workflow_id: github.GITHUB_WORKFLOW || github.GITHUB_REPOSITORY,
71
+ execution_id: github.GITHUB_RUN_ID || github.GITHUB_RUN_NUMBER,
72
+ metadata: { ...baseMeta, ...github }
73
+ };
74
+ }
75
+ return {
76
+ source: "local_script",
77
+ metadata: baseMeta
78
+ };
79
+ }
80
+ function buildSourceToolJson(ctx) {
81
+ const meta = ctx.metadata ?? getBaseMetadata();
82
+ const payload = {
83
+ source: ctx.source,
84
+ metadata: meta
85
+ };
86
+ if (ctx.workflow_id !== void 0) payload.workflow_id = ctx.workflow_id;
87
+ if (ctx.execution_id !== void 0) payload.execution_id = ctx.execution_id;
88
+ if (ctx.workflow_step !== void 0) payload.workflow_step = ctx.workflow_step;
89
+ return JSON.stringify(payload);
90
+ }
91
+ function buildContextHeaders(ctx) {
92
+ const meta = ctx.metadata ?? getBaseMetadata();
93
+ const h = {
94
+ [SOURCE_TOOL_HEADER]: buildSourceToolJson({ ...ctx, metadata: meta }),
95
+ [`${HEADER_PREFIX}-source`]: ctx.source
96
+ };
97
+ if (ctx.workflow_id) h[`${HEADER_PREFIX}-workflow-id`] = ctx.workflow_id;
98
+ if (ctx.execution_id) h[`${HEADER_PREFIX}-execution-id`] = ctx.execution_id;
99
+ if (ctx.workflow_step) h[`${HEADER_PREFIX}-workflow-step`] = ctx.workflow_step;
100
+ for (const [k, v] of Object.entries(meta)) {
101
+ if (v) h[`${HEADER_PREFIX}-meta-${k.toLowerCase()}`] = String(v);
102
+ }
103
+ return h;
104
+ }
105
+
106
+ export {
107
+ HEADER_PREFIX,
108
+ SOURCE_TOOL_HEADER,
109
+ getWorkflowContextFromEnv,
110
+ buildSourceToolJson,
111
+ buildContextHeaders
112
+ };
@@ -0,0 +1,31 @@
1
+ import {
2
+ buildContextHeaders,
3
+ getWorkflowContextFromEnv
4
+ } from "./chunk-SV5FTXAH.mjs";
5
+
6
+ // src/middleware/vercel-ai.ts
7
+ function getTrustGateHeaders(workflowContext) {
8
+ const ctx = workflowContext ?? getWorkflowContextFromEnv();
9
+ return buildContextHeaders(ctx);
10
+ }
11
+ function createTrustGateOpenAIOptions(options) {
12
+ const baseUrl = options.baseUrl.replace(/\/$/, "");
13
+ const headers = {
14
+ ...options.headers,
15
+ ...getTrustGateHeaders(options.workflowContext)
16
+ };
17
+ if (options.apiKey) {
18
+ headers["authorization"] = `Bearer ${options.apiKey}`;
19
+ headers["x-api-key"] = options.apiKey;
20
+ }
21
+ return {
22
+ baseURL: baseUrl,
23
+ apiKey: options.openaiApiKey ?? options.apiKey,
24
+ headers: Object.keys(headers).length ? headers : void 0
25
+ };
26
+ }
27
+
28
+ export {
29
+ getTrustGateHeaders,
30
+ createTrustGateOpenAIOptions
31
+ };
@@ -0,0 +1,75 @@
1
+ import { T as TrustGateConfig, R as RequestOptions, W as WorkflowContext, N as N8nHeaderSnippet } from './types-BxyYog9f.mjs';
2
+ export { a as RuntimeMetadata } from './types-BxyYog9f.mjs';
3
+ export { TrustGateOpenAIOptions, createTrustGateOpenAIOptions, getTrustGateHeaders } from './middleware/vercel-ai.mjs';
4
+ export { createTrustGateLangChainConfig } from './middleware/langchain.mjs';
5
+
6
+ /**
7
+ * TrustGate Node.js client. Forwards requests to the TrustGate gateway with
8
+ * workflow context for Shadow AI detection and observability.
9
+ *
10
+ * Context is sent in lowercase headers (e.g. x-trustgate-source) and as JSON
11
+ * in x-trustgate-source-tool for parity with the Python SDK. The `workflow_step`
12
+ * field in context is the key used to generate Gantt chart bars in the
13
+ * Agent Intelligence dashboard.
14
+ */
15
+ declare class TrustGate {
16
+ private readonly config;
17
+ constructor(config: TrustGateConfig);
18
+ /** Base URL of the TrustGate gateway. */
19
+ get baseUrl(): string;
20
+ /**
21
+ * Get the current effective workflow context (from config override or process.env).
22
+ * Never null; defaults to source "local_script" when no managed env is detected.
23
+ */
24
+ getWorkflowContext(options?: RequestOptions): WorkflowContext;
25
+ /**
26
+ * Perform a non-streaming JSON request through TrustGate.
27
+ */
28
+ fetch(path: string, init?: RequestInit, options?: RequestOptions): Promise<Response>;
29
+ /**
30
+ * Perform a streaming request (SSE). Forwards the response stream and ensures
31
+ * workflow_id (and other context) is sent in the initial request so the
32
+ * background task is attributed correctly. Set Accept: text/event-stream
33
+ * when calling for SSE endpoints.
34
+ */
35
+ fetchStream(path: string, init?: RequestInit, options?: RequestOptions): Promise<Response>;
36
+ }
37
+
38
+ /** Lowercase header prefix for proxy/gateway parity with Python SDK. */
39
+ declare const HEADER_PREFIX = "x-trustgate";
40
+ /** Header carrying the full context JSON (structurally identical to Python SDK). */
41
+ declare const SOURCE_TOOL_HEADER = "x-trustgate-source-tool";
42
+ /**
43
+ * Searches process.env for workflow metadata from n8n, Vercel, or GitHub.
44
+ * If no managed environment is detected, returns context with source "local_script"
45
+ * so local developer usage is correctly tagged as Shadow AI in the dashboard.
46
+ * Never returns null.
47
+ */
48
+ declare function getWorkflowContextFromEnv(): WorkflowContext;
49
+ /**
50
+ * JSON payload for x-trustgate-source-tool header (structurally identical to Python SDK).
51
+ */
52
+ declare function buildSourceToolJson(ctx: WorkflowContext): string;
53
+ /**
54
+ * Builds lowercase TrustGate context headers plus x-trustgate-source-tool JSON.
55
+ * Use for proxy/gateway parity and Python SDK contract.
56
+ */
57
+ declare function buildContextHeaders(ctx: WorkflowContext): Record<string, string>;
58
+
59
+ /**
60
+ * Returns a pre-formatted JSON object of headers for use in n8n "HTTP Request" nodes.
61
+ * Header keys are lowercase for gateway parity. Includes TrustGate auth and workflow
62
+ * context (workflow_id, execution_id, source, x-trustgate-source-tool JSON).
63
+ *
64
+ * Use in n8n:
65
+ * 1. Add an "HTTP Request" node.
66
+ * 2. In "Send Headers" (or equivalent), use the returned `headers` array
67
+ * or paste the `json` string into a "JSON" header configuration.
68
+ *
69
+ * @param apiKey - Optional TrustGate API key (or use TRUSTGATE_API_KEY env).
70
+ * @param baseUrl - Optional gateway base URL (or use TRUSTGATE_BASE_URL env).
71
+ * @returns Headers as name/value list and as a JSON string for n8n.
72
+ */
73
+ declare function getN8nHeaderSnippet(apiKey?: string, baseUrl?: string): N8nHeaderSnippet;
74
+
75
+ export { HEADER_PREFIX, N8nHeaderSnippet, RequestOptions, SOURCE_TOOL_HEADER, TrustGate, TrustGateConfig, WorkflowContext, buildContextHeaders, buildSourceToolJson, getN8nHeaderSnippet, getWorkflowContextFromEnv };
@@ -0,0 +1,75 @@
1
+ import { T as TrustGateConfig, R as RequestOptions, W as WorkflowContext, N as N8nHeaderSnippet } from './types-BxyYog9f.js';
2
+ export { a as RuntimeMetadata } from './types-BxyYog9f.js';
3
+ export { TrustGateOpenAIOptions, createTrustGateOpenAIOptions, getTrustGateHeaders } from './middleware/vercel-ai.js';
4
+ export { createTrustGateLangChainConfig } from './middleware/langchain.js';
5
+
6
+ /**
7
+ * TrustGate Node.js client. Forwards requests to the TrustGate gateway with
8
+ * workflow context for Shadow AI detection and observability.
9
+ *
10
+ * Context is sent in lowercase headers (e.g. x-trustgate-source) and as JSON
11
+ * in x-trustgate-source-tool for parity with the Python SDK. The `workflow_step`
12
+ * field in context is the key used to generate Gantt chart bars in the
13
+ * Agent Intelligence dashboard.
14
+ */
15
+ declare class TrustGate {
16
+ private readonly config;
17
+ constructor(config: TrustGateConfig);
18
+ /** Base URL of the TrustGate gateway. */
19
+ get baseUrl(): string;
20
+ /**
21
+ * Get the current effective workflow context (from config override or process.env).
22
+ * Never null; defaults to source "local_script" when no managed env is detected.
23
+ */
24
+ getWorkflowContext(options?: RequestOptions): WorkflowContext;
25
+ /**
26
+ * Perform a non-streaming JSON request through TrustGate.
27
+ */
28
+ fetch(path: string, init?: RequestInit, options?: RequestOptions): Promise<Response>;
29
+ /**
30
+ * Perform a streaming request (SSE). Forwards the response stream and ensures
31
+ * workflow_id (and other context) is sent in the initial request so the
32
+ * background task is attributed correctly. Set Accept: text/event-stream
33
+ * when calling for SSE endpoints.
34
+ */
35
+ fetchStream(path: string, init?: RequestInit, options?: RequestOptions): Promise<Response>;
36
+ }
37
+
38
+ /** Lowercase header prefix for proxy/gateway parity with Python SDK. */
39
+ declare const HEADER_PREFIX = "x-trustgate";
40
+ /** Header carrying the full context JSON (structurally identical to Python SDK). */
41
+ declare const SOURCE_TOOL_HEADER = "x-trustgate-source-tool";
42
+ /**
43
+ * Searches process.env for workflow metadata from n8n, Vercel, or GitHub.
44
+ * If no managed environment is detected, returns context with source "local_script"
45
+ * so local developer usage is correctly tagged as Shadow AI in the dashboard.
46
+ * Never returns null.
47
+ */
48
+ declare function getWorkflowContextFromEnv(): WorkflowContext;
49
+ /**
50
+ * JSON payload for x-trustgate-source-tool header (structurally identical to Python SDK).
51
+ */
52
+ declare function buildSourceToolJson(ctx: WorkflowContext): string;
53
+ /**
54
+ * Builds lowercase TrustGate context headers plus x-trustgate-source-tool JSON.
55
+ * Use for proxy/gateway parity and Python SDK contract.
56
+ */
57
+ declare function buildContextHeaders(ctx: WorkflowContext): Record<string, string>;
58
+
59
+ /**
60
+ * Returns a pre-formatted JSON object of headers for use in n8n "HTTP Request" nodes.
61
+ * Header keys are lowercase for gateway parity. Includes TrustGate auth and workflow
62
+ * context (workflow_id, execution_id, source, x-trustgate-source-tool JSON).
63
+ *
64
+ * Use in n8n:
65
+ * 1. Add an "HTTP Request" node.
66
+ * 2. In "Send Headers" (or equivalent), use the returned `headers` array
67
+ * or paste the `json` string into a "JSON" header configuration.
68
+ *
69
+ * @param apiKey - Optional TrustGate API key (or use TRUSTGATE_API_KEY env).
70
+ * @param baseUrl - Optional gateway base URL (or use TRUSTGATE_BASE_URL env).
71
+ * @returns Headers as name/value list and as a JSON string for n8n.
72
+ */
73
+ declare function getN8nHeaderSnippet(apiKey?: string, baseUrl?: string): N8nHeaderSnippet;
74
+
75
+ export { HEADER_PREFIX, N8nHeaderSnippet, RequestOptions, SOURCE_TOOL_HEADER, TrustGate, TrustGateConfig, WorkflowContext, buildContextHeaders, buildSourceToolJson, getN8nHeaderSnippet, getWorkflowContextFromEnv };