@axonflow/sdk 1.2.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +313 -9
- package/dist/cjs/client.d.ts +106 -41
- package/dist/cjs/client.d.ts.map +1 -1
- package/dist/cjs/client.js +446 -198
- package/dist/cjs/client.js.map +1 -1
- package/dist/cjs/errors.d.ts +51 -0
- package/dist/cjs/errors.d.ts.map +1 -0
- package/dist/cjs/errors.js +84 -0
- package/dist/cjs/errors.js.map +1 -0
- package/dist/cjs/index.d.ts +6 -2
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +16 -2
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/interceptors/anthropic.d.ts +1 -1
- package/dist/cjs/interceptors/anthropic.d.ts.map +1 -1
- package/dist/cjs/interceptors/anthropic.js +6 -6
- package/dist/cjs/interceptors/anthropic.js.map +1 -1
- package/dist/cjs/interceptors/bedrock.d.ts +141 -0
- package/dist/cjs/interceptors/bedrock.d.ts.map +1 -0
- package/dist/cjs/interceptors/bedrock.js +259 -0
- package/dist/cjs/interceptors/bedrock.js.map +1 -0
- package/dist/cjs/interceptors/gemini.d.ts +81 -0
- package/dist/cjs/interceptors/gemini.d.ts.map +1 -0
- package/dist/cjs/interceptors/gemini.js +110 -0
- package/dist/cjs/interceptors/gemini.js.map +1 -0
- package/dist/cjs/interceptors/ollama.d.ts +143 -0
- package/dist/cjs/interceptors/ollama.d.ts.map +1 -0
- package/dist/cjs/interceptors/ollama.js +150 -0
- package/dist/cjs/interceptors/ollama.js.map +1 -0
- package/dist/cjs/interceptors/openai.d.ts +1 -1
- package/dist/cjs/interceptors/openai.d.ts.map +1 -1
- package/dist/cjs/interceptors/openai.js +5 -5
- package/dist/cjs/interceptors/openai.js.map +1 -1
- package/dist/cjs/types/config.d.ts +7 -1
- package/dist/cjs/types/config.d.ts.map +1 -1
- package/dist/cjs/types/gateway.d.ts +51 -114
- package/dist/cjs/types/gateway.d.ts.map +1 -1
- package/dist/cjs/types/gateway.js +2 -7
- package/dist/cjs/types/gateway.js.map +1 -1
- package/dist/cjs/types/index.d.ts +1 -0
- package/dist/cjs/types/index.d.ts.map +1 -1
- package/dist/cjs/types/index.js +1 -0
- package/dist/cjs/types/index.js.map +1 -1
- package/dist/cjs/types/proxy.d.ts +78 -0
- package/dist/cjs/types/proxy.d.ts.map +1 -0
- package/dist/cjs/types/proxy.js +9 -0
- package/dist/cjs/types/proxy.js.map +1 -0
- package/dist/cjs/utils/helpers.d.ts.map +1 -1
- package/dist/cjs/utils/helpers.js +3 -1
- package/dist/cjs/utils/helpers.js.map +1 -1
- package/dist/esm/client.d.ts +106 -41
- package/dist/esm/client.d.ts.map +1 -1
- package/dist/esm/client.js +446 -198
- package/dist/esm/client.js.map +1 -1
- package/dist/esm/errors.d.ts +51 -0
- package/dist/esm/errors.d.ts.map +1 -0
- package/dist/esm/errors.js +75 -0
- package/dist/esm/errors.js.map +1 -0
- package/dist/esm/index.d.ts +6 -2
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +6 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/interceptors/anthropic.d.ts +1 -1
- package/dist/esm/interceptors/anthropic.d.ts.map +1 -1
- package/dist/esm/interceptors/anthropic.js +6 -6
- package/dist/esm/interceptors/anthropic.js.map +1 -1
- package/dist/esm/interceptors/bedrock.d.ts +141 -0
- package/dist/esm/interceptors/bedrock.d.ts.map +1 -0
- package/dist/esm/interceptors/bedrock.js +220 -0
- package/dist/esm/interceptors/bedrock.js.map +1 -0
- package/dist/esm/interceptors/gemini.d.ts +81 -0
- package/dist/esm/interceptors/gemini.d.ts.map +1 -0
- package/dist/esm/interceptors/gemini.js +105 -0
- package/dist/esm/interceptors/gemini.js.map +1 -0
- package/dist/esm/interceptors/ollama.d.ts +143 -0
- package/dist/esm/interceptors/ollama.d.ts.map +1 -0
- package/dist/esm/interceptors/ollama.js +144 -0
- package/dist/esm/interceptors/ollama.js.map +1 -0
- package/dist/esm/interceptors/openai.d.ts +1 -1
- package/dist/esm/interceptors/openai.d.ts.map +1 -1
- package/dist/esm/interceptors/openai.js +5 -5
- package/dist/esm/interceptors/openai.js.map +1 -1
- package/dist/esm/types/config.d.ts +7 -1
- package/dist/esm/types/config.d.ts.map +1 -1
- package/dist/esm/types/gateway.d.ts +51 -114
- package/dist/esm/types/gateway.d.ts.map +1 -1
- package/dist/esm/types/gateway.js +2 -7
- package/dist/esm/types/gateway.js.map +1 -1
- package/dist/esm/types/index.d.ts +1 -0
- package/dist/esm/types/index.d.ts.map +1 -1
- package/dist/esm/types/index.js +1 -0
- package/dist/esm/types/index.js.map +1 -1
- package/dist/esm/types/proxy.d.ts +78 -0
- package/dist/esm/types/proxy.d.ts.map +1 -0
- package/dist/esm/types/proxy.js +8 -0
- package/dist/esm/types/proxy.js.map +1 -0
- package/dist/esm/utils/helpers.d.ts.map +1 -1
- package/dist/esm/utils/helpers.js +3 -1
- package/dist/esm/utils/helpers.js.map +1 -1
- package/package.json +22 -7
package/dist/cjs/client.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.AxonFlow = void 0;
|
|
4
|
+
const errors_1 = require("./errors");
|
|
4
5
|
const openai_1 = require("./interceptors/openai");
|
|
5
6
|
const anthropic_1 = require("./interceptors/anthropic");
|
|
6
7
|
const helpers_1 = require("./utils/helpers");
|
|
@@ -10,24 +11,49 @@ const helpers_1 = require("./utils/helpers");
|
|
|
10
11
|
class AxonFlow {
|
|
11
12
|
constructor(config) {
|
|
12
13
|
this.interceptors = [];
|
|
13
|
-
// Set defaults
|
|
14
|
+
// Set defaults first to determine endpoint
|
|
15
|
+
const endpoint = config.endpoint || 'https://staging-eu.getaxonflow.com';
|
|
16
|
+
// Check if running in self-hosted mode (localhost)
|
|
17
|
+
const isLocalhost = endpoint.includes('localhost') || endpoint.includes('127.0.0.1');
|
|
18
|
+
// License key is optional for self-hosted deployments
|
|
19
|
+
// When not provided, agent must have SELF_HOSTED_MODE=true
|
|
20
|
+
if (!isLocalhost && !config.licenseKey && !config.apiKey) {
|
|
21
|
+
throw new Error('Either licenseKey or apiKey must be provided for non-localhost endpoints');
|
|
22
|
+
}
|
|
23
|
+
if (isLocalhost && !config.licenseKey && !config.apiKey && config.debug) {
|
|
24
|
+
console.warn('[AxonFlow] No license key provided - ensure agent has SELF_HOSTED_MODE=true');
|
|
25
|
+
}
|
|
26
|
+
// Set configuration
|
|
14
27
|
this.config = {
|
|
15
28
|
apiKey: config.apiKey,
|
|
16
|
-
|
|
17
|
-
|
|
29
|
+
licenseKey: config.licenseKey,
|
|
30
|
+
endpoint,
|
|
31
|
+
mode: config.mode || (isLocalhost ? 'sandbox' : 'production'),
|
|
18
32
|
tenant: config.tenant || 'default',
|
|
19
33
|
debug: config.debug || false,
|
|
20
34
|
timeout: config.timeout || 30000,
|
|
21
|
-
retry:
|
|
22
|
-
|
|
35
|
+
retry: {
|
|
36
|
+
enabled: config.retry?.enabled !== false,
|
|
37
|
+
maxAttempts: config.retry?.maxAttempts || 3,
|
|
38
|
+
delay: config.retry?.delay || 1000,
|
|
39
|
+
},
|
|
40
|
+
cache: {
|
|
41
|
+
enabled: config.cache?.enabled !== false,
|
|
42
|
+
ttl: config.cache?.ttl || 60000,
|
|
43
|
+
},
|
|
23
44
|
};
|
|
24
45
|
// Initialize interceptors
|
|
25
|
-
this.interceptors = [
|
|
26
|
-
new openai_1.OpenAIInterceptor(),
|
|
27
|
-
new anthropic_1.AnthropicInterceptor()
|
|
28
|
-
];
|
|
46
|
+
this.interceptors = [new openai_1.OpenAIInterceptor(), new anthropic_1.AnthropicInterceptor()];
|
|
29
47
|
if (this.config.debug) {
|
|
30
|
-
(0, helpers_1.debugLog)('AxonFlow initialized', {
|
|
48
|
+
(0, helpers_1.debugLog)('AxonFlow initialized', {
|
|
49
|
+
mode: this.config.mode,
|
|
50
|
+
endpoint: this.config.endpoint,
|
|
51
|
+
authMethod: isLocalhost
|
|
52
|
+
? 'self-hosted (no auth)'
|
|
53
|
+
: this.config.licenseKey
|
|
54
|
+
? 'license-key'
|
|
55
|
+
: 'api-key',
|
|
56
|
+
});
|
|
31
57
|
}
|
|
32
58
|
}
|
|
33
59
|
/**
|
|
@@ -48,7 +74,7 @@ class AxonFlow {
|
|
|
48
74
|
timestamp: Date.now(),
|
|
49
75
|
aiRequest,
|
|
50
76
|
mode: this.config.mode,
|
|
51
|
-
tenant: this.config.tenant
|
|
77
|
+
tenant: this.config.tenant,
|
|
52
78
|
};
|
|
53
79
|
// Check policies with AxonFlow Agent
|
|
54
80
|
const governanceResponse = await this.checkPolicies(governanceRequest);
|
|
@@ -93,7 +119,7 @@ class AxonFlow {
|
|
|
93
119
|
provider: 'unknown',
|
|
94
120
|
model: 'unknown',
|
|
95
121
|
prompt: aiCall.toString(),
|
|
96
|
-
parameters: {}
|
|
122
|
+
parameters: {},
|
|
97
123
|
};
|
|
98
124
|
}
|
|
99
125
|
/**
|
|
@@ -104,7 +130,7 @@ class AxonFlow {
|
|
|
104
130
|
// Transform SDK request to Agent API format
|
|
105
131
|
const agentRequest = {
|
|
106
132
|
query: request.aiRequest.prompt,
|
|
107
|
-
user_token: this.config.apiKey,
|
|
133
|
+
user_token: this.config.apiKey || '',
|
|
108
134
|
client_id: this.config.tenant,
|
|
109
135
|
request_type: 'llm_chat',
|
|
110
136
|
context: {
|
|
@@ -112,16 +138,23 @@ class AxonFlow {
|
|
|
112
138
|
model: request.aiRequest.model,
|
|
113
139
|
parameters: request.aiRequest.parameters,
|
|
114
140
|
requestId: request.requestId,
|
|
115
|
-
mode: this.config.mode
|
|
116
|
-
}
|
|
141
|
+
mode: this.config.mode,
|
|
142
|
+
},
|
|
143
|
+
};
|
|
144
|
+
const headers = {
|
|
145
|
+
'Content-Type': 'application/json',
|
|
117
146
|
};
|
|
147
|
+
// Add license key header if available (preferred auth method)
|
|
148
|
+
// Skip auth headers for localhost (self-hosted mode)
|
|
149
|
+
const isLocalhost = this.config.endpoint.includes('localhost') || this.config.endpoint.includes('127.0.0.1');
|
|
150
|
+
if (!isLocalhost && this.config.licenseKey) {
|
|
151
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
152
|
+
}
|
|
118
153
|
const response = await fetch(url, {
|
|
119
154
|
method: 'POST',
|
|
120
|
-
headers
|
|
121
|
-
'Content-Type': 'application/json'
|
|
122
|
-
},
|
|
155
|
+
headers,
|
|
123
156
|
body: JSON.stringify(agentRequest),
|
|
124
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
157
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
125
158
|
});
|
|
126
159
|
if (!response.ok) {
|
|
127
160
|
const errorText = await response.text();
|
|
@@ -129,23 +162,29 @@ class AxonFlow {
|
|
|
129
162
|
}
|
|
130
163
|
const agentResponse = await response.json();
|
|
131
164
|
// Transform Agent API response to SDK format
|
|
165
|
+
// Extract policy name from policy_info if available
|
|
166
|
+
const policyName = agentResponse.policy_info?.policies_evaluated?.[0] || 'agent-policy';
|
|
132
167
|
return {
|
|
133
168
|
requestId: request.requestId,
|
|
134
169
|
allowed: !agentResponse.blocked,
|
|
135
|
-
violations: agentResponse.blocked
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
170
|
+
violations: agentResponse.blocked
|
|
171
|
+
? [
|
|
172
|
+
{
|
|
173
|
+
type: 'security',
|
|
174
|
+
severity: 'high',
|
|
175
|
+
description: agentResponse.block_reason || 'Request blocked by policy',
|
|
176
|
+
policy: policyName,
|
|
177
|
+
action: 'blocked',
|
|
178
|
+
},
|
|
179
|
+
]
|
|
180
|
+
: [],
|
|
142
181
|
modifiedRequest: agentResponse.data,
|
|
143
|
-
policies: [],
|
|
182
|
+
policies: agentResponse.policy_info?.policies_evaluated || [],
|
|
144
183
|
audit: {
|
|
145
184
|
timestamp: Date.now(),
|
|
146
185
|
duration: parseInt(agentResponse.policy_info?.processing_time?.replace('ms', '') || '0'),
|
|
147
|
-
tenant: this.config.tenant
|
|
148
|
-
}
|
|
186
|
+
tenant: this.config.tenant,
|
|
187
|
+
},
|
|
149
188
|
};
|
|
150
189
|
}
|
|
151
190
|
/**
|
|
@@ -158,7 +197,7 @@ class AxonFlow {
|
|
|
158
197
|
(0, helpers_1.debugLog)('Request processed', {
|
|
159
198
|
allowed: response.allowed,
|
|
160
199
|
violations: response.violations?.length || 0,
|
|
161
|
-
duration: response.audit.duration
|
|
200
|
+
duration: response.audit.duration,
|
|
162
201
|
});
|
|
163
202
|
}
|
|
164
203
|
}
|
|
@@ -166,9 +205,9 @@ class AxonFlow {
|
|
|
166
205
|
* Check if an error is from AxonFlow (vs the AI provider)
|
|
167
206
|
*/
|
|
168
207
|
isAxonFlowError(error) {
|
|
169
|
-
return error?.message?.includes('AxonFlow') ||
|
|
208
|
+
return (error?.message?.includes('AxonFlow') ||
|
|
170
209
|
error?.message?.includes('governance') ||
|
|
171
|
-
error?.message?.includes('fetch');
|
|
210
|
+
error?.message?.includes('fetch'));
|
|
172
211
|
}
|
|
173
212
|
/**
|
|
174
213
|
* Create a sandbox client for testing
|
|
@@ -178,9 +217,176 @@ class AxonFlow {
|
|
|
178
217
|
apiKey,
|
|
179
218
|
mode: 'sandbox',
|
|
180
219
|
endpoint: 'https://staging-eu.getaxonflow.com',
|
|
181
|
-
debug: true
|
|
220
|
+
debug: true,
|
|
182
221
|
});
|
|
183
222
|
}
|
|
223
|
+
// ============================================================================
|
|
224
|
+
// Proxy Mode Methods
|
|
225
|
+
// ============================================================================
|
|
226
|
+
/**
|
|
227
|
+
* Check if AxonFlow Agent is healthy and available.
|
|
228
|
+
*
|
|
229
|
+
* @returns HealthStatus object with agent health information
|
|
230
|
+
*
|
|
231
|
+
* @example
|
|
232
|
+
* ```typescript
|
|
233
|
+
* const health = await axonflow.healthCheck();
|
|
234
|
+
* if (health.status === 'healthy') {
|
|
235
|
+
* console.log('Agent is healthy');
|
|
236
|
+
* }
|
|
237
|
+
* ```
|
|
238
|
+
*/
|
|
239
|
+
async healthCheck() {
|
|
240
|
+
const url = `${this.config.endpoint}/health`;
|
|
241
|
+
try {
|
|
242
|
+
const response = await fetch(url, {
|
|
243
|
+
method: 'GET',
|
|
244
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
245
|
+
});
|
|
246
|
+
if (!response.ok) {
|
|
247
|
+
return {
|
|
248
|
+
status: 'unhealthy',
|
|
249
|
+
components: {
|
|
250
|
+
agent: { status: 'error', message: `HTTP ${response.status}` },
|
|
251
|
+
},
|
|
252
|
+
};
|
|
253
|
+
}
|
|
254
|
+
const data = await response.json();
|
|
255
|
+
return {
|
|
256
|
+
status: data.status === 'healthy' ? 'healthy' : 'degraded',
|
|
257
|
+
version: data.version,
|
|
258
|
+
uptime: data.uptime,
|
|
259
|
+
components: data.components,
|
|
260
|
+
};
|
|
261
|
+
}
|
|
262
|
+
catch (error) {
|
|
263
|
+
if (this.config.debug) {
|
|
264
|
+
(0, helpers_1.debugLog)('Health check failed', error);
|
|
265
|
+
}
|
|
266
|
+
return {
|
|
267
|
+
status: 'unhealthy',
|
|
268
|
+
components: {
|
|
269
|
+
agent: {
|
|
270
|
+
status: 'error',
|
|
271
|
+
message: error instanceof Error ? error.message : 'Unknown error',
|
|
272
|
+
},
|
|
273
|
+
},
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
/**
|
|
278
|
+
* Execute a query through AxonFlow with policy enforcement (Proxy Mode).
|
|
279
|
+
*
|
|
280
|
+
* This is the primary method for Proxy Mode, where AxonFlow handles policy
|
|
281
|
+
* checking and optionally routes requests to LLM providers.
|
|
282
|
+
*
|
|
283
|
+
* @param options - Query execution options
|
|
284
|
+
* @returns ExecuteQueryResponse with results or error information
|
|
285
|
+
* @throws PolicyViolationError if request is blocked by policy
|
|
286
|
+
* @throws AuthenticationError if credentials are invalid
|
|
287
|
+
* @throws APIError for other API errors
|
|
288
|
+
*
|
|
289
|
+
* @example
|
|
290
|
+
* ```typescript
|
|
291
|
+
* const response = await axonflow.executeQuery({
|
|
292
|
+
* userToken: 'user-123',
|
|
293
|
+
* query: 'Explain quantum computing',
|
|
294
|
+
* requestType: 'chat',
|
|
295
|
+
* context: { provider: 'openai', model: 'gpt-4' }
|
|
296
|
+
* });
|
|
297
|
+
*
|
|
298
|
+
* if (response.success) {
|
|
299
|
+
* console.log('Response:', response.data);
|
|
300
|
+
* }
|
|
301
|
+
* ```
|
|
302
|
+
*/
|
|
303
|
+
async executeQuery(options) {
|
|
304
|
+
const agentRequest = {
|
|
305
|
+
query: options.query,
|
|
306
|
+
user_token: options.userToken,
|
|
307
|
+
client_id: this.config.tenant,
|
|
308
|
+
request_type: options.requestType,
|
|
309
|
+
context: options.context || {},
|
|
310
|
+
};
|
|
311
|
+
const url = `${this.config.endpoint}/api/request`;
|
|
312
|
+
const headers = {
|
|
313
|
+
'Content-Type': 'application/json',
|
|
314
|
+
};
|
|
315
|
+
// Add authentication headers
|
|
316
|
+
const isLocalhost = this.config.endpoint.includes('localhost') || this.config.endpoint.includes('127.0.0.1');
|
|
317
|
+
if (!isLocalhost) {
|
|
318
|
+
if (this.config.licenseKey) {
|
|
319
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
320
|
+
}
|
|
321
|
+
else if (this.config.apiKey) {
|
|
322
|
+
headers['X-Client-Secret'] = this.config.apiKey;
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
if (this.config.debug) {
|
|
326
|
+
(0, helpers_1.debugLog)('Proxy Mode: executeQuery', {
|
|
327
|
+
requestType: options.requestType,
|
|
328
|
+
query: options.query.substring(0, 50),
|
|
329
|
+
});
|
|
330
|
+
}
|
|
331
|
+
const response = await fetch(url, {
|
|
332
|
+
method: 'POST',
|
|
333
|
+
headers,
|
|
334
|
+
body: JSON.stringify(agentRequest),
|
|
335
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
336
|
+
});
|
|
337
|
+
if (!response.ok) {
|
|
338
|
+
const errorText = await response.text();
|
|
339
|
+
if (response.status === 401 || response.status === 403) {
|
|
340
|
+
// Try to parse as JSON for policy violation info
|
|
341
|
+
try {
|
|
342
|
+
const errorJson = JSON.parse(errorText);
|
|
343
|
+
if (errorJson.blocked || errorJson.block_reason) {
|
|
344
|
+
throw new errors_1.PolicyViolationError(errorJson.block_reason || 'Request blocked by policy', errorJson.policy_info?.policies_evaluated);
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
catch (e) {
|
|
348
|
+
if (e instanceof errors_1.PolicyViolationError)
|
|
349
|
+
throw e;
|
|
350
|
+
}
|
|
351
|
+
throw new errors_1.AuthenticationError(`Request failed: ${errorText}`);
|
|
352
|
+
}
|
|
353
|
+
throw new errors_1.APIError(response.status, response.statusText, errorText);
|
|
354
|
+
}
|
|
355
|
+
const data = await response.json();
|
|
356
|
+
// Check for policy violation in successful response (some blocked responses return 200)
|
|
357
|
+
if (data.blocked) {
|
|
358
|
+
throw new errors_1.PolicyViolationError(data.block_reason || 'Request blocked by policy', data.policy_info?.policies_evaluated);
|
|
359
|
+
}
|
|
360
|
+
// Transform snake_case response to camelCase
|
|
361
|
+
const result = {
|
|
362
|
+
success: data.success,
|
|
363
|
+
data: data.data,
|
|
364
|
+
result: data.result,
|
|
365
|
+
planId: data.plan_id,
|
|
366
|
+
requestId: data.request_id,
|
|
367
|
+
metadata: data.metadata || {},
|
|
368
|
+
error: data.error,
|
|
369
|
+
blocked: data.blocked || false,
|
|
370
|
+
blockReason: data.block_reason,
|
|
371
|
+
};
|
|
372
|
+
// Parse policy info if present
|
|
373
|
+
if (data.policy_info) {
|
|
374
|
+
result.policyInfo = {
|
|
375
|
+
policiesEvaluated: data.policy_info.policies_evaluated || [],
|
|
376
|
+
staticChecks: data.policy_info.static_checks || [],
|
|
377
|
+
processingTime: data.policy_info.processing_time || '',
|
|
378
|
+
tenantId: data.policy_info.tenant_id || '',
|
|
379
|
+
};
|
|
380
|
+
}
|
|
381
|
+
if (this.config.debug) {
|
|
382
|
+
(0, helpers_1.debugLog)('Proxy Mode: executeQuery result', {
|
|
383
|
+
success: result.success,
|
|
384
|
+
blocked: result.blocked,
|
|
385
|
+
hasData: !!result.data,
|
|
386
|
+
});
|
|
387
|
+
}
|
|
388
|
+
return result;
|
|
389
|
+
}
|
|
184
390
|
/**
|
|
185
391
|
* List all available MCP connectors from the marketplace
|
|
186
392
|
*/
|
|
@@ -188,7 +394,7 @@ class AxonFlow {
|
|
|
188
394
|
const url = `${this.config.endpoint}/api/connectors`;
|
|
189
395
|
const response = await fetch(url, {
|
|
190
396
|
method: 'GET',
|
|
191
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
397
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
192
398
|
});
|
|
193
399
|
if (!response.ok) {
|
|
194
400
|
throw new Error(`Failed to list connectors: ${response.status} ${response.statusText}`);
|
|
@@ -204,14 +410,21 @@ class AxonFlow {
|
|
|
204
410
|
*/
|
|
205
411
|
async installConnector(request) {
|
|
206
412
|
const url = `${this.config.endpoint}/api/connectors/install`;
|
|
413
|
+
const headers = {
|
|
414
|
+
'Content-Type': 'application/json',
|
|
415
|
+
};
|
|
416
|
+
// Add authentication headers
|
|
417
|
+
if (this.config.licenseKey) {
|
|
418
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
419
|
+
}
|
|
420
|
+
else if (this.config.apiKey) {
|
|
421
|
+
headers['X-Client-Secret'] = this.config.apiKey;
|
|
422
|
+
}
|
|
207
423
|
const response = await fetch(url, {
|
|
208
424
|
method: 'POST',
|
|
209
|
-
headers
|
|
210
|
-
'Content-Type': 'application/json',
|
|
211
|
-
'X-Client-Secret': this.config.apiKey
|
|
212
|
-
},
|
|
425
|
+
headers,
|
|
213
426
|
body: JSON.stringify(request),
|
|
214
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
427
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
215
428
|
});
|
|
216
429
|
if (!response.ok) {
|
|
217
430
|
const errorText = await response.text();
|
|
@@ -227,22 +440,26 @@ class AxonFlow {
|
|
|
227
440
|
async queryConnector(connectorName, query, params) {
|
|
228
441
|
const agentRequest = {
|
|
229
442
|
query,
|
|
230
|
-
user_token: this.config.apiKey,
|
|
443
|
+
user_token: this.config.apiKey || '',
|
|
231
444
|
client_id: this.config.tenant,
|
|
232
445
|
request_type: 'mcp-query',
|
|
233
446
|
context: {
|
|
234
447
|
connector: connectorName,
|
|
235
|
-
params: params || {}
|
|
236
|
-
}
|
|
448
|
+
params: params || {},
|
|
449
|
+
},
|
|
237
450
|
};
|
|
238
451
|
const url = `${this.config.endpoint}/api/request`;
|
|
452
|
+
const headers = {
|
|
453
|
+
'Content-Type': 'application/json',
|
|
454
|
+
};
|
|
455
|
+
if (this.config.licenseKey) {
|
|
456
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
457
|
+
}
|
|
239
458
|
const response = await fetch(url, {
|
|
240
459
|
method: 'POST',
|
|
241
|
-
headers
|
|
242
|
-
'Content-Type': 'application/json'
|
|
243
|
-
},
|
|
460
|
+
headers,
|
|
244
461
|
body: JSON.stringify(agentRequest),
|
|
245
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
462
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
246
463
|
});
|
|
247
464
|
if (!response.ok) {
|
|
248
465
|
const errorText = await response.text();
|
|
@@ -256,28 +473,35 @@ class AxonFlow {
|
|
|
256
473
|
success: agentResponse.success,
|
|
257
474
|
data: agentResponse.data,
|
|
258
475
|
error: agentResponse.error,
|
|
259
|
-
meta: agentResponse.metadata
|
|
476
|
+
meta: agentResponse.metadata,
|
|
260
477
|
};
|
|
261
478
|
}
|
|
262
479
|
/**
|
|
263
480
|
* Generate a multi-agent execution plan from a natural language query
|
|
481
|
+
* @param query - Natural language query describing the task
|
|
482
|
+
* @param domain - Optional domain hint (travel, healthcare, etc.)
|
|
483
|
+
* @param userToken - Optional user token for authentication (defaults to tenant/client_id)
|
|
264
484
|
*/
|
|
265
|
-
async generatePlan(query, domain) {
|
|
485
|
+
async generatePlan(query, domain, userToken) {
|
|
266
486
|
const agentRequest = {
|
|
267
487
|
query,
|
|
268
|
-
user_token: this.config.
|
|
488
|
+
user_token: userToken || this.config.tenant,
|
|
269
489
|
client_id: this.config.tenant,
|
|
270
490
|
request_type: 'multi-agent-plan',
|
|
271
|
-
context: domain ? { domain } : {}
|
|
491
|
+
context: domain ? { domain } : {},
|
|
272
492
|
};
|
|
273
493
|
const url = `${this.config.endpoint}/api/request`;
|
|
494
|
+
const headers = {
|
|
495
|
+
'Content-Type': 'application/json',
|
|
496
|
+
};
|
|
497
|
+
if (this.config.licenseKey) {
|
|
498
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
499
|
+
}
|
|
274
500
|
const response = await fetch(url, {
|
|
275
501
|
method: 'POST',
|
|
276
|
-
headers
|
|
277
|
-
'Content-Type': 'application/json'
|
|
278
|
-
},
|
|
502
|
+
headers,
|
|
279
503
|
body: JSON.stringify(agentRequest),
|
|
280
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
504
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
281
505
|
});
|
|
282
506
|
if (!response.ok) {
|
|
283
507
|
const errorText = await response.text();
|
|
@@ -296,28 +520,34 @@ class AxonFlow {
|
|
|
296
520
|
domain: agentResponse.data?.domain || domain || 'generic',
|
|
297
521
|
complexity: agentResponse.data?.complexity || 0,
|
|
298
522
|
parallel: agentResponse.data?.parallel || false,
|
|
299
|
-
metadata: agentResponse.metadata || {}
|
|
523
|
+
metadata: agentResponse.metadata || {},
|
|
300
524
|
};
|
|
301
525
|
}
|
|
302
526
|
/**
|
|
303
527
|
* Execute a previously generated multi-agent plan
|
|
528
|
+
* @param planId - ID of the plan to execute
|
|
529
|
+
* @param userToken - Optional user token for authentication (defaults to tenant/client_id)
|
|
304
530
|
*/
|
|
305
|
-
async executePlan(planId) {
|
|
531
|
+
async executePlan(planId, userToken) {
|
|
306
532
|
const agentRequest = {
|
|
307
533
|
query: '',
|
|
308
|
-
user_token: this.config.
|
|
534
|
+
user_token: userToken || this.config.tenant,
|
|
309
535
|
client_id: this.config.tenant,
|
|
310
536
|
request_type: 'execute-plan',
|
|
311
|
-
context: { plan_id: planId }
|
|
537
|
+
context: { plan_id: planId },
|
|
312
538
|
};
|
|
313
539
|
const url = `${this.config.endpoint}/api/request`;
|
|
540
|
+
const headers = {
|
|
541
|
+
'Content-Type': 'application/json',
|
|
542
|
+
};
|
|
543
|
+
if (this.config.licenseKey) {
|
|
544
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
545
|
+
}
|
|
314
546
|
const response = await fetch(url, {
|
|
315
547
|
method: 'POST',
|
|
316
|
-
headers
|
|
317
|
-
'Content-Type': 'application/json'
|
|
318
|
-
},
|
|
548
|
+
headers,
|
|
319
549
|
body: JSON.stringify(agentRequest),
|
|
320
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
550
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
321
551
|
});
|
|
322
552
|
if (!response.ok) {
|
|
323
553
|
const errorText = await response.text();
|
|
@@ -333,7 +563,7 @@ class AxonFlow {
|
|
|
333
563
|
result: agentResponse.result,
|
|
334
564
|
stepResults: agentResponse.metadata?.step_results,
|
|
335
565
|
error: agentResponse.error,
|
|
336
|
-
duration: agentResponse.metadata?.duration
|
|
566
|
+
duration: agentResponse.metadata?.duration,
|
|
337
567
|
};
|
|
338
568
|
}
|
|
339
569
|
/**
|
|
@@ -343,7 +573,7 @@ class AxonFlow {
|
|
|
343
573
|
const url = `${this.config.endpoint}/api/plans/${planId}`;
|
|
344
574
|
const response = await fetch(url, {
|
|
345
575
|
method: 'GET',
|
|
346
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
576
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
347
577
|
});
|
|
348
578
|
if (!response.ok) {
|
|
349
579
|
const errorText = await response.text();
|
|
@@ -356,191 +586,209 @@ class AxonFlow {
|
|
|
356
586
|
result: status.result,
|
|
357
587
|
stepResults: status.step_results,
|
|
358
588
|
error: status.error,
|
|
359
|
-
duration: status.duration
|
|
589
|
+
duration: status.duration,
|
|
360
590
|
};
|
|
361
591
|
}
|
|
362
|
-
//
|
|
363
|
-
// Gateway Mode
|
|
364
|
-
//
|
|
365
|
-
// Gateway Mode allows clients to make LLM calls directly while still using
|
|
366
|
-
// AxonFlow for policy enforcement and audit logging.
|
|
367
|
-
//
|
|
368
|
-
// Usage:
|
|
369
|
-
// 1. Call getPolicyApprovedContext() before making LLM call
|
|
370
|
-
// 2. Make LLM call directly to your provider (using returned approved data)
|
|
371
|
-
// 3. Call auditLLMCall() after to record the call for compliance
|
|
372
|
-
//
|
|
373
|
-
// Example:
|
|
374
|
-
// const ctx = await axonflow.getPolicyApprovedContext({
|
|
375
|
-
// userToken: 'user-jwt',
|
|
376
|
-
// dataSources: ['postgres'],
|
|
377
|
-
// query: 'Find patients with diabetes'
|
|
378
|
-
// });
|
|
379
|
-
// if (!ctx.approved) throw new Error(ctx.blockReason);
|
|
380
|
-
//
|
|
381
|
-
// const llmResp = await openai.chat.completions.create({...}); // Your LLM call
|
|
382
|
-
//
|
|
383
|
-
// await axonflow.auditLLMCall({
|
|
384
|
-
// contextId: ctx.contextId,
|
|
385
|
-
// responseSummary: 'Found 5 patients',
|
|
386
|
-
// provider: 'openai',
|
|
387
|
-
// model: 'gpt-4',
|
|
388
|
-
// tokenUsage: { promptTokens: 100, completionTokens: 50, totalTokens: 150 },
|
|
389
|
-
// latencyMs: 250
|
|
390
|
-
// });
|
|
592
|
+
// ============================================================================
|
|
593
|
+
// Gateway Mode Methods
|
|
594
|
+
// ============================================================================
|
|
391
595
|
/**
|
|
392
|
-
*
|
|
393
|
-
*
|
|
394
|
-
|
|
395
|
-
|
|
596
|
+
* Gateway Mode: Pre-check policy approval before making a direct LLM call.
|
|
597
|
+
* Alias for getPolicyApprovedContext() for simpler API.
|
|
598
|
+
*/
|
|
599
|
+
async preCheck(options) {
|
|
600
|
+
return this.getPolicyApprovedContext(options);
|
|
601
|
+
}
|
|
602
|
+
/**
|
|
603
|
+
* Gateway Mode: Get policy-approved context before making a direct LLM call.
|
|
396
604
|
*
|
|
397
|
-
*
|
|
398
|
-
*
|
|
605
|
+
* Use this when you want to:
|
|
606
|
+
* - Make direct LLM calls (not through AxonFlow proxy)
|
|
607
|
+
* - Have full control over your LLM provider/model selection
|
|
608
|
+
* - Minimize latency by calling LLM directly
|
|
399
609
|
*
|
|
400
610
|
* @example
|
|
401
|
-
*
|
|
402
|
-
*
|
|
403
|
-
*
|
|
404
|
-
* query: '
|
|
611
|
+
* ```typescript
|
|
612
|
+
* const ctx = await axonflow.getPolicyApprovedContext({
|
|
613
|
+
* userToken: 'user-jwt',
|
|
614
|
+
* query: 'Analyze this customer data',
|
|
615
|
+
* dataSources: ['postgres']
|
|
405
616
|
* });
|
|
406
617
|
*
|
|
407
|
-
* if (!
|
|
408
|
-
* throw new Error(`
|
|
618
|
+
* if (!ctx.approved) {
|
|
619
|
+
* throw new Error(`Blocked: ${ctx.blockReason}`);
|
|
409
620
|
* }
|
|
410
621
|
*
|
|
411
|
-
* //
|
|
412
|
-
* const
|
|
622
|
+
* // Make direct LLM call with approved data
|
|
623
|
+
* const response = await openai.chat.completions.create({
|
|
624
|
+
* model: 'gpt-4',
|
|
625
|
+
* messages: [{ role: 'user', content: JSON.stringify(ctx.approvedData) }]
|
|
626
|
+
* });
|
|
627
|
+
*
|
|
628
|
+
* // Audit the call
|
|
629
|
+
* await axonflow.auditLLMCall({
|
|
630
|
+
* contextId: ctx.contextId,
|
|
631
|
+
* responseSummary: response.choices[0].message.content.substring(0, 100),
|
|
632
|
+
* provider: 'openai',
|
|
633
|
+
* model: 'gpt-4',
|
|
634
|
+
* tokenUsage: {
|
|
635
|
+
* promptTokens: response.usage.prompt_tokens,
|
|
636
|
+
* completionTokens: response.usage.completion_tokens,
|
|
637
|
+
* totalTokens: response.usage.total_tokens
|
|
638
|
+
* },
|
|
639
|
+
* latencyMs: 250
|
|
640
|
+
* });
|
|
641
|
+
* ```
|
|
413
642
|
*/
|
|
414
|
-
async getPolicyApprovedContext(
|
|
643
|
+
async getPolicyApprovedContext(options) {
|
|
415
644
|
const url = `${this.config.endpoint}/api/policy/pre-check`;
|
|
416
|
-
const
|
|
417
|
-
user_token:
|
|
645
|
+
const requestBody = {
|
|
646
|
+
user_token: options.userToken,
|
|
418
647
|
client_id: this.config.tenant,
|
|
419
|
-
query:
|
|
420
|
-
data_sources:
|
|
421
|
-
context:
|
|
648
|
+
query: options.query,
|
|
649
|
+
data_sources: options.dataSources || [],
|
|
650
|
+
context: options.context || {},
|
|
651
|
+
};
|
|
652
|
+
const headers = {
|
|
653
|
+
'Content-Type': 'application/json',
|
|
422
654
|
};
|
|
655
|
+
// Add authentication headers
|
|
656
|
+
const isLocalhost = this.config.endpoint.includes('localhost') || this.config.endpoint.includes('127.0.0.1');
|
|
657
|
+
if (!isLocalhost) {
|
|
658
|
+
if (this.config.licenseKey) {
|
|
659
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
660
|
+
}
|
|
661
|
+
else if (this.config.apiKey) {
|
|
662
|
+
headers['X-Client-Secret'] = this.config.apiKey;
|
|
663
|
+
}
|
|
664
|
+
}
|
|
423
665
|
if (this.config.debug) {
|
|
424
|
-
(0, helpers_1.debugLog)('Gateway
|
|
425
|
-
query: request.query.substring(0, 50),
|
|
426
|
-
dataSources: request.dataSources
|
|
427
|
-
});
|
|
666
|
+
(0, helpers_1.debugLog)('Gateway Mode: Pre-check', { query: options.query.substring(0, 50) });
|
|
428
667
|
}
|
|
429
|
-
const startTime = Date.now();
|
|
430
668
|
const response = await fetch(url, {
|
|
431
669
|
method: 'POST',
|
|
432
|
-
headers
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
'X-License-Key': this.config.apiKey
|
|
436
|
-
},
|
|
437
|
-
body: JSON.stringify(body),
|
|
438
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
670
|
+
headers,
|
|
671
|
+
body: JSON.stringify(requestBody),
|
|
672
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
439
673
|
});
|
|
440
|
-
const duration = Date.now() - startTime;
|
|
441
674
|
if (!response.ok) {
|
|
442
675
|
const errorText = await response.text();
|
|
443
|
-
|
|
676
|
+
if (response.status === 401 || response.status === 403) {
|
|
677
|
+
throw new errors_1.AuthenticationError(`Policy pre-check authentication failed: ${errorText}`);
|
|
678
|
+
}
|
|
679
|
+
throw new errors_1.APIError(response.status, response.statusText, errorText);
|
|
444
680
|
}
|
|
445
681
|
const data = await response.json();
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
}
|
|
453
|
-
return {
|
|
682
|
+
// Transform snake_case response to camelCase
|
|
683
|
+
// Default expiration to 5 minutes from now if not provided
|
|
684
|
+
const expiresAt = data.expires_at
|
|
685
|
+
? new Date(data.expires_at)
|
|
686
|
+
: new Date(Date.now() + 5 * 60 * 1000);
|
|
687
|
+
const result = {
|
|
454
688
|
contextId: data.context_id,
|
|
455
689
|
approved: data.approved,
|
|
456
|
-
approvedData: data.approved_data,
|
|
690
|
+
approvedData: data.approved_data || {},
|
|
457
691
|
policies: data.policies || [],
|
|
458
|
-
|
|
692
|
+
expiresAt,
|
|
693
|
+
blockReason: data.block_reason,
|
|
694
|
+
};
|
|
695
|
+
// Parse rate limit info if present
|
|
696
|
+
if (data.rate_limit) {
|
|
697
|
+
result.rateLimitInfo = {
|
|
459
698
|
limit: data.rate_limit.limit,
|
|
460
699
|
remaining: data.rate_limit.remaining,
|
|
461
|
-
resetAt: new Date(data.rate_limit.reset_at)
|
|
462
|
-
}
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
700
|
+
resetAt: new Date(data.rate_limit.reset_at),
|
|
701
|
+
};
|
|
702
|
+
}
|
|
703
|
+
if (this.config.debug) {
|
|
704
|
+
(0, helpers_1.debugLog)('Gateway Mode: Pre-check result', {
|
|
705
|
+
approved: result.approved,
|
|
706
|
+
contextId: result.contextId,
|
|
707
|
+
policies: result.policies.length,
|
|
708
|
+
});
|
|
709
|
+
}
|
|
710
|
+
return result;
|
|
466
711
|
}
|
|
467
712
|
/**
|
|
468
|
-
*
|
|
469
|
-
*
|
|
470
|
-
* This is the second step in Gateway Mode. Call this after making your
|
|
471
|
-
* LLM call to record it in the audit trail.
|
|
713
|
+
* Gateway Mode: Audit an LLM call after completion.
|
|
472
714
|
*
|
|
473
|
-
*
|
|
474
|
-
*
|
|
475
|
-
* @param provider LLM provider name
|
|
476
|
-
* @param model Model name
|
|
477
|
-
* @param tokenUsage Token counts from LLM response
|
|
478
|
-
* @param latencyMs Time taken for LLM call in milliseconds
|
|
479
|
-
* @param metadata Optional additional metadata
|
|
480
|
-
* @returns AuditResult confirming the audit was recorded
|
|
715
|
+
* Call this after making a direct LLM call to log the audit trail.
|
|
716
|
+
* This is required for compliance and monitoring.
|
|
481
717
|
*
|
|
482
718
|
* @example
|
|
483
|
-
*
|
|
484
|
-
*
|
|
485
|
-
*
|
|
486
|
-
* '
|
|
487
|
-
* '
|
|
488
|
-
*
|
|
489
|
-
*
|
|
490
|
-
*
|
|
491
|
-
*
|
|
719
|
+
* ```typescript
|
|
720
|
+
* await axonflow.auditLLMCall({
|
|
721
|
+
* contextId: ctx.contextId,
|
|
722
|
+
* responseSummary: 'Generated report with 5 items',
|
|
723
|
+
* provider: 'openai',
|
|
724
|
+
* model: 'gpt-4',
|
|
725
|
+
* tokenUsage: {
|
|
726
|
+
* promptTokens: 100,
|
|
727
|
+
* completionTokens: 50,
|
|
728
|
+
* totalTokens: 150
|
|
729
|
+
* },
|
|
730
|
+
* latencyMs: 250
|
|
731
|
+
* });
|
|
732
|
+
* ```
|
|
492
733
|
*/
|
|
493
|
-
async auditLLMCall(
|
|
734
|
+
async auditLLMCall(options) {
|
|
494
735
|
const url = `${this.config.endpoint}/api/audit/llm-call`;
|
|
495
|
-
const
|
|
496
|
-
context_id: contextId,
|
|
736
|
+
const requestBody = {
|
|
737
|
+
context_id: options.contextId,
|
|
497
738
|
client_id: this.config.tenant,
|
|
498
|
-
response_summary: responseSummary,
|
|
499
|
-
provider,
|
|
500
|
-
model,
|
|
739
|
+
response_summary: options.responseSummary,
|
|
740
|
+
provider: options.provider,
|
|
741
|
+
model: options.model,
|
|
501
742
|
token_usage: {
|
|
502
|
-
prompt_tokens: tokenUsage.promptTokens,
|
|
503
|
-
completion_tokens: tokenUsage.completionTokens,
|
|
504
|
-
total_tokens: tokenUsage.totalTokens
|
|
743
|
+
prompt_tokens: options.tokenUsage.promptTokens,
|
|
744
|
+
completion_tokens: options.tokenUsage.completionTokens,
|
|
745
|
+
total_tokens: options.tokenUsage.totalTokens,
|
|
505
746
|
},
|
|
506
|
-
latency_ms: latencyMs,
|
|
507
|
-
metadata
|
|
747
|
+
latency_ms: options.latencyMs,
|
|
748
|
+
metadata: options.metadata || {},
|
|
749
|
+
};
|
|
750
|
+
const headers = {
|
|
751
|
+
'Content-Type': 'application/json',
|
|
508
752
|
};
|
|
753
|
+
// Add authentication headers
|
|
754
|
+
const isLocalhost = this.config.endpoint.includes('localhost') || this.config.endpoint.includes('127.0.0.1');
|
|
755
|
+
if (!isLocalhost) {
|
|
756
|
+
if (this.config.licenseKey) {
|
|
757
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
758
|
+
}
|
|
759
|
+
else if (this.config.apiKey) {
|
|
760
|
+
headers['X-Client-Secret'] = this.config.apiKey;
|
|
761
|
+
}
|
|
762
|
+
}
|
|
509
763
|
if (this.config.debug) {
|
|
510
|
-
(0, helpers_1.debugLog)('Gateway
|
|
511
|
-
contextId,
|
|
512
|
-
provider,
|
|
513
|
-
model,
|
|
514
|
-
tokens: tokenUsage.totalTokens
|
|
764
|
+
(0, helpers_1.debugLog)('Gateway Mode: Audit', {
|
|
765
|
+
contextId: options.contextId,
|
|
766
|
+
provider: options.provider,
|
|
767
|
+
model: options.model,
|
|
515
768
|
});
|
|
516
769
|
}
|
|
517
|
-
const startTime = Date.now();
|
|
518
770
|
const response = await fetch(url, {
|
|
519
771
|
method: 'POST',
|
|
520
|
-
headers
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
'X-License-Key': this.config.apiKey
|
|
524
|
-
},
|
|
525
|
-
body: JSON.stringify(body),
|
|
526
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
772
|
+
headers,
|
|
773
|
+
body: JSON.stringify(requestBody),
|
|
774
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
527
775
|
});
|
|
528
|
-
const duration = Date.now() - startTime;
|
|
529
776
|
if (!response.ok) {
|
|
530
777
|
const errorText = await response.text();
|
|
531
|
-
|
|
778
|
+
if (response.status === 401 || response.status === 403) {
|
|
779
|
+
throw new errors_1.AuthenticationError(`Audit logging authentication failed: ${errorText}`);
|
|
780
|
+
}
|
|
781
|
+
throw new errors_1.APIError(response.status, response.statusText, errorText);
|
|
532
782
|
}
|
|
533
783
|
const data = await response.json();
|
|
534
|
-
|
|
535
|
-
(0, helpers_1.debugLog)('Gateway audit complete', {
|
|
536
|
-
auditId: data.audit_id,
|
|
537
|
-
duration
|
|
538
|
-
});
|
|
539
|
-
}
|
|
540
|
-
return {
|
|
784
|
+
const result = {
|
|
541
785
|
success: data.success,
|
|
542
|
-
auditId: data.audit_id
|
|
786
|
+
auditId: data.audit_id,
|
|
543
787
|
};
|
|
788
|
+
if (this.config.debug) {
|
|
789
|
+
(0, helpers_1.debugLog)('Gateway Mode: Audit logged', { auditId: result.auditId });
|
|
790
|
+
}
|
|
791
|
+
return result;
|
|
544
792
|
}
|
|
545
793
|
}
|
|
546
794
|
exports.AxonFlow = AxonFlow;
|