@axonflow/sdk 1.2.0 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +194 -9
- package/dist/cjs/client.d.ts +64 -40
- package/dist/cjs/client.d.ts.map +1 -1
- package/dist/cjs/client.js +279 -198
- package/dist/cjs/client.js.map +1 -1
- package/dist/cjs/errors.d.ts +51 -0
- package/dist/cjs/errors.d.ts.map +1 -0
- package/dist/cjs/errors.js +84 -0
- package/dist/cjs/errors.js.map +1 -0
- package/dist/cjs/index.d.ts +3 -2
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +10 -2
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/interceptors/anthropic.d.ts +1 -1
- package/dist/cjs/interceptors/anthropic.d.ts.map +1 -1
- package/dist/cjs/interceptors/anthropic.js +6 -6
- package/dist/cjs/interceptors/anthropic.js.map +1 -1
- package/dist/cjs/interceptors/openai.d.ts +1 -1
- package/dist/cjs/interceptors/openai.d.ts.map +1 -1
- package/dist/cjs/interceptors/openai.js +5 -5
- package/dist/cjs/interceptors/openai.js.map +1 -1
- package/dist/cjs/types/config.d.ts +7 -1
- package/dist/cjs/types/config.d.ts.map +1 -1
- package/dist/cjs/types/gateway.d.ts +51 -114
- package/dist/cjs/types/gateway.d.ts.map +1 -1
- package/dist/cjs/types/gateway.js +2 -7
- package/dist/cjs/types/gateway.js.map +1 -1
- package/dist/cjs/utils/helpers.d.ts.map +1 -1
- package/dist/cjs/utils/helpers.js +3 -1
- package/dist/cjs/utils/helpers.js.map +1 -1
- package/dist/esm/client.d.ts +64 -40
- package/dist/esm/client.d.ts.map +1 -1
- package/dist/esm/client.js +279 -198
- package/dist/esm/client.js.map +1 -1
- package/dist/esm/errors.d.ts +51 -0
- package/dist/esm/errors.d.ts.map +1 -0
- package/dist/esm/errors.js +75 -0
- package/dist/esm/errors.js.map +1 -0
- package/dist/esm/index.d.ts +3 -2
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +3 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/interceptors/anthropic.d.ts +1 -1
- package/dist/esm/interceptors/anthropic.d.ts.map +1 -1
- package/dist/esm/interceptors/anthropic.js +6 -6
- package/dist/esm/interceptors/anthropic.js.map +1 -1
- package/dist/esm/interceptors/openai.d.ts +1 -1
- package/dist/esm/interceptors/openai.d.ts.map +1 -1
- package/dist/esm/interceptors/openai.js +5 -5
- package/dist/esm/interceptors/openai.js.map +1 -1
- package/dist/esm/types/config.d.ts +7 -1
- package/dist/esm/types/config.d.ts.map +1 -1
- package/dist/esm/types/gateway.d.ts +51 -114
- package/dist/esm/types/gateway.d.ts.map +1 -1
- package/dist/esm/types/gateway.js +2 -7
- package/dist/esm/types/gateway.js.map +1 -1
- package/dist/esm/utils/helpers.d.ts.map +1 -1
- package/dist/esm/utils/helpers.js +3 -1
- package/dist/esm/utils/helpers.js.map +1 -1
- package/package.json +21 -7
package/dist/esm/client.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { AuthenticationError, APIError } from './errors.js';
|
|
1
2
|
import { OpenAIInterceptor } from './interceptors/openai.js';
|
|
2
3
|
import { AnthropicInterceptor } from './interceptors/anthropic.js';
|
|
3
4
|
import { generateRequestId, debugLog } from './utils/helpers.js';
|
|
@@ -7,24 +8,49 @@ import { generateRequestId, debugLog } from './utils/helpers.js';
|
|
|
7
8
|
export class AxonFlow {
|
|
8
9
|
constructor(config) {
|
|
9
10
|
this.interceptors = [];
|
|
10
|
-
// Set defaults
|
|
11
|
+
// Set defaults first to determine endpoint
|
|
12
|
+
const endpoint = config.endpoint || 'https://staging-eu.getaxonflow.com';
|
|
13
|
+
// Check if running in self-hosted mode (localhost)
|
|
14
|
+
const isLocalhost = endpoint.includes('localhost') || endpoint.includes('127.0.0.1');
|
|
15
|
+
// License key is optional for self-hosted deployments
|
|
16
|
+
// When not provided, agent must have SELF_HOSTED_MODE=true
|
|
17
|
+
if (!isLocalhost && !config.licenseKey && !config.apiKey) {
|
|
18
|
+
throw new Error('Either licenseKey or apiKey must be provided for non-localhost endpoints');
|
|
19
|
+
}
|
|
20
|
+
if (isLocalhost && !config.licenseKey && !config.apiKey && config.debug) {
|
|
21
|
+
console.warn('[AxonFlow] No license key provided - ensure agent has SELF_HOSTED_MODE=true');
|
|
22
|
+
}
|
|
23
|
+
// Set configuration
|
|
11
24
|
this.config = {
|
|
12
25
|
apiKey: config.apiKey,
|
|
13
|
-
|
|
14
|
-
|
|
26
|
+
licenseKey: config.licenseKey,
|
|
27
|
+
endpoint,
|
|
28
|
+
mode: config.mode || (isLocalhost ? 'sandbox' : 'production'),
|
|
15
29
|
tenant: config.tenant || 'default',
|
|
16
30
|
debug: config.debug || false,
|
|
17
31
|
timeout: config.timeout || 30000,
|
|
18
|
-
retry:
|
|
19
|
-
|
|
32
|
+
retry: {
|
|
33
|
+
enabled: config.retry?.enabled !== false,
|
|
34
|
+
maxAttempts: config.retry?.maxAttempts || 3,
|
|
35
|
+
delay: config.retry?.delay || 1000,
|
|
36
|
+
},
|
|
37
|
+
cache: {
|
|
38
|
+
enabled: config.cache?.enabled !== false,
|
|
39
|
+
ttl: config.cache?.ttl || 60000,
|
|
40
|
+
},
|
|
20
41
|
};
|
|
21
42
|
// Initialize interceptors
|
|
22
|
-
this.interceptors = [
|
|
23
|
-
new OpenAIInterceptor(),
|
|
24
|
-
new AnthropicInterceptor()
|
|
25
|
-
];
|
|
43
|
+
this.interceptors = [new OpenAIInterceptor(), new AnthropicInterceptor()];
|
|
26
44
|
if (this.config.debug) {
|
|
27
|
-
debugLog('AxonFlow initialized', {
|
|
45
|
+
debugLog('AxonFlow initialized', {
|
|
46
|
+
mode: this.config.mode,
|
|
47
|
+
endpoint: this.config.endpoint,
|
|
48
|
+
authMethod: isLocalhost
|
|
49
|
+
? 'self-hosted (no auth)'
|
|
50
|
+
: this.config.licenseKey
|
|
51
|
+
? 'license-key'
|
|
52
|
+
: 'api-key',
|
|
53
|
+
});
|
|
28
54
|
}
|
|
29
55
|
}
|
|
30
56
|
/**
|
|
@@ -45,7 +71,7 @@ export class AxonFlow {
|
|
|
45
71
|
timestamp: Date.now(),
|
|
46
72
|
aiRequest,
|
|
47
73
|
mode: this.config.mode,
|
|
48
|
-
tenant: this.config.tenant
|
|
74
|
+
tenant: this.config.tenant,
|
|
49
75
|
};
|
|
50
76
|
// Check policies with AxonFlow Agent
|
|
51
77
|
const governanceResponse = await this.checkPolicies(governanceRequest);
|
|
@@ -90,7 +116,7 @@ export class AxonFlow {
|
|
|
90
116
|
provider: 'unknown',
|
|
91
117
|
model: 'unknown',
|
|
92
118
|
prompt: aiCall.toString(),
|
|
93
|
-
parameters: {}
|
|
119
|
+
parameters: {},
|
|
94
120
|
};
|
|
95
121
|
}
|
|
96
122
|
/**
|
|
@@ -101,7 +127,7 @@ export class AxonFlow {
|
|
|
101
127
|
// Transform SDK request to Agent API format
|
|
102
128
|
const agentRequest = {
|
|
103
129
|
query: request.aiRequest.prompt,
|
|
104
|
-
user_token: this.config.apiKey,
|
|
130
|
+
user_token: this.config.apiKey || '',
|
|
105
131
|
client_id: this.config.tenant,
|
|
106
132
|
request_type: 'llm_chat',
|
|
107
133
|
context: {
|
|
@@ -109,16 +135,23 @@ export class AxonFlow {
|
|
|
109
135
|
model: request.aiRequest.model,
|
|
110
136
|
parameters: request.aiRequest.parameters,
|
|
111
137
|
requestId: request.requestId,
|
|
112
|
-
mode: this.config.mode
|
|
113
|
-
}
|
|
138
|
+
mode: this.config.mode,
|
|
139
|
+
},
|
|
114
140
|
};
|
|
141
|
+
const headers = {
|
|
142
|
+
'Content-Type': 'application/json',
|
|
143
|
+
};
|
|
144
|
+
// Add license key header if available (preferred auth method)
|
|
145
|
+
// Skip auth headers for localhost (self-hosted mode)
|
|
146
|
+
const isLocalhost = this.config.endpoint.includes('localhost') || this.config.endpoint.includes('127.0.0.1');
|
|
147
|
+
if (!isLocalhost && this.config.licenseKey) {
|
|
148
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
149
|
+
}
|
|
115
150
|
const response = await fetch(url, {
|
|
116
151
|
method: 'POST',
|
|
117
|
-
headers
|
|
118
|
-
'Content-Type': 'application/json'
|
|
119
|
-
},
|
|
152
|
+
headers,
|
|
120
153
|
body: JSON.stringify(agentRequest),
|
|
121
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
154
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
122
155
|
});
|
|
123
156
|
if (!response.ok) {
|
|
124
157
|
const errorText = await response.text();
|
|
@@ -126,23 +159,29 @@ export class AxonFlow {
|
|
|
126
159
|
}
|
|
127
160
|
const agentResponse = await response.json();
|
|
128
161
|
// Transform Agent API response to SDK format
|
|
162
|
+
// Extract policy name from policy_info if available
|
|
163
|
+
const policyName = agentResponse.policy_info?.policies_evaluated?.[0] || 'agent-policy';
|
|
129
164
|
return {
|
|
130
165
|
requestId: request.requestId,
|
|
131
166
|
allowed: !agentResponse.blocked,
|
|
132
|
-
violations: agentResponse.blocked
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
167
|
+
violations: agentResponse.blocked
|
|
168
|
+
? [
|
|
169
|
+
{
|
|
170
|
+
type: 'security',
|
|
171
|
+
severity: 'high',
|
|
172
|
+
description: agentResponse.block_reason || 'Request blocked by policy',
|
|
173
|
+
policy: policyName,
|
|
174
|
+
action: 'blocked',
|
|
175
|
+
},
|
|
176
|
+
]
|
|
177
|
+
: [],
|
|
139
178
|
modifiedRequest: agentResponse.data,
|
|
140
|
-
policies: [],
|
|
179
|
+
policies: agentResponse.policy_info?.policies_evaluated || [],
|
|
141
180
|
audit: {
|
|
142
181
|
timestamp: Date.now(),
|
|
143
182
|
duration: parseInt(agentResponse.policy_info?.processing_time?.replace('ms', '') || '0'),
|
|
144
|
-
tenant: this.config.tenant
|
|
145
|
-
}
|
|
183
|
+
tenant: this.config.tenant,
|
|
184
|
+
},
|
|
146
185
|
};
|
|
147
186
|
}
|
|
148
187
|
/**
|
|
@@ -155,7 +194,7 @@ export class AxonFlow {
|
|
|
155
194
|
debugLog('Request processed', {
|
|
156
195
|
allowed: response.allowed,
|
|
157
196
|
violations: response.violations?.length || 0,
|
|
158
|
-
duration: response.audit.duration
|
|
197
|
+
duration: response.audit.duration,
|
|
159
198
|
});
|
|
160
199
|
}
|
|
161
200
|
}
|
|
@@ -163,9 +202,9 @@ export class AxonFlow {
|
|
|
163
202
|
* Check if an error is from AxonFlow (vs the AI provider)
|
|
164
203
|
*/
|
|
165
204
|
isAxonFlowError(error) {
|
|
166
|
-
return error?.message?.includes('AxonFlow') ||
|
|
205
|
+
return (error?.message?.includes('AxonFlow') ||
|
|
167
206
|
error?.message?.includes('governance') ||
|
|
168
|
-
error?.message?.includes('fetch');
|
|
207
|
+
error?.message?.includes('fetch'));
|
|
169
208
|
}
|
|
170
209
|
/**
|
|
171
210
|
* Create a sandbox client for testing
|
|
@@ -175,7 +214,7 @@ export class AxonFlow {
|
|
|
175
214
|
apiKey,
|
|
176
215
|
mode: 'sandbox',
|
|
177
216
|
endpoint: 'https://staging-eu.getaxonflow.com',
|
|
178
|
-
debug: true
|
|
217
|
+
debug: true,
|
|
179
218
|
});
|
|
180
219
|
}
|
|
181
220
|
/**
|
|
@@ -185,7 +224,7 @@ export class AxonFlow {
|
|
|
185
224
|
const url = `${this.config.endpoint}/api/connectors`;
|
|
186
225
|
const response = await fetch(url, {
|
|
187
226
|
method: 'GET',
|
|
188
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
227
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
189
228
|
});
|
|
190
229
|
if (!response.ok) {
|
|
191
230
|
throw new Error(`Failed to list connectors: ${response.status} ${response.statusText}`);
|
|
@@ -201,14 +240,21 @@ export class AxonFlow {
|
|
|
201
240
|
*/
|
|
202
241
|
async installConnector(request) {
|
|
203
242
|
const url = `${this.config.endpoint}/api/connectors/install`;
|
|
243
|
+
const headers = {
|
|
244
|
+
'Content-Type': 'application/json',
|
|
245
|
+
};
|
|
246
|
+
// Add authentication headers
|
|
247
|
+
if (this.config.licenseKey) {
|
|
248
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
249
|
+
}
|
|
250
|
+
else if (this.config.apiKey) {
|
|
251
|
+
headers['X-Client-Secret'] = this.config.apiKey;
|
|
252
|
+
}
|
|
204
253
|
const response = await fetch(url, {
|
|
205
254
|
method: 'POST',
|
|
206
|
-
headers
|
|
207
|
-
'Content-Type': 'application/json',
|
|
208
|
-
'X-Client-Secret': this.config.apiKey
|
|
209
|
-
},
|
|
255
|
+
headers,
|
|
210
256
|
body: JSON.stringify(request),
|
|
211
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
257
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
212
258
|
});
|
|
213
259
|
if (!response.ok) {
|
|
214
260
|
const errorText = await response.text();
|
|
@@ -224,22 +270,26 @@ export class AxonFlow {
|
|
|
224
270
|
async queryConnector(connectorName, query, params) {
|
|
225
271
|
const agentRequest = {
|
|
226
272
|
query,
|
|
227
|
-
user_token: this.config.apiKey,
|
|
273
|
+
user_token: this.config.apiKey || '',
|
|
228
274
|
client_id: this.config.tenant,
|
|
229
275
|
request_type: 'mcp-query',
|
|
230
276
|
context: {
|
|
231
277
|
connector: connectorName,
|
|
232
|
-
params: params || {}
|
|
233
|
-
}
|
|
278
|
+
params: params || {},
|
|
279
|
+
},
|
|
234
280
|
};
|
|
235
281
|
const url = `${this.config.endpoint}/api/request`;
|
|
282
|
+
const headers = {
|
|
283
|
+
'Content-Type': 'application/json',
|
|
284
|
+
};
|
|
285
|
+
if (this.config.licenseKey) {
|
|
286
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
287
|
+
}
|
|
236
288
|
const response = await fetch(url, {
|
|
237
289
|
method: 'POST',
|
|
238
|
-
headers
|
|
239
|
-
'Content-Type': 'application/json'
|
|
240
|
-
},
|
|
290
|
+
headers,
|
|
241
291
|
body: JSON.stringify(agentRequest),
|
|
242
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
292
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
243
293
|
});
|
|
244
294
|
if (!response.ok) {
|
|
245
295
|
const errorText = await response.text();
|
|
@@ -253,28 +303,35 @@ export class AxonFlow {
|
|
|
253
303
|
success: agentResponse.success,
|
|
254
304
|
data: agentResponse.data,
|
|
255
305
|
error: agentResponse.error,
|
|
256
|
-
meta: agentResponse.metadata
|
|
306
|
+
meta: agentResponse.metadata,
|
|
257
307
|
};
|
|
258
308
|
}
|
|
259
309
|
/**
|
|
260
310
|
* Generate a multi-agent execution plan from a natural language query
|
|
311
|
+
* @param query - Natural language query describing the task
|
|
312
|
+
* @param domain - Optional domain hint (travel, healthcare, etc.)
|
|
313
|
+
* @param userToken - Optional user token for authentication (defaults to tenant/client_id)
|
|
261
314
|
*/
|
|
262
|
-
async generatePlan(query, domain) {
|
|
315
|
+
async generatePlan(query, domain, userToken) {
|
|
263
316
|
const agentRequest = {
|
|
264
317
|
query,
|
|
265
|
-
user_token: this.config.
|
|
318
|
+
user_token: userToken || this.config.tenant,
|
|
266
319
|
client_id: this.config.tenant,
|
|
267
320
|
request_type: 'multi-agent-plan',
|
|
268
|
-
context: domain ? { domain } : {}
|
|
321
|
+
context: domain ? { domain } : {},
|
|
269
322
|
};
|
|
270
323
|
const url = `${this.config.endpoint}/api/request`;
|
|
324
|
+
const headers = {
|
|
325
|
+
'Content-Type': 'application/json',
|
|
326
|
+
};
|
|
327
|
+
if (this.config.licenseKey) {
|
|
328
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
329
|
+
}
|
|
271
330
|
const response = await fetch(url, {
|
|
272
331
|
method: 'POST',
|
|
273
|
-
headers
|
|
274
|
-
'Content-Type': 'application/json'
|
|
275
|
-
},
|
|
332
|
+
headers,
|
|
276
333
|
body: JSON.stringify(agentRequest),
|
|
277
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
334
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
278
335
|
});
|
|
279
336
|
if (!response.ok) {
|
|
280
337
|
const errorText = await response.text();
|
|
@@ -293,28 +350,34 @@ export class AxonFlow {
|
|
|
293
350
|
domain: agentResponse.data?.domain || domain || 'generic',
|
|
294
351
|
complexity: agentResponse.data?.complexity || 0,
|
|
295
352
|
parallel: agentResponse.data?.parallel || false,
|
|
296
|
-
metadata: agentResponse.metadata || {}
|
|
353
|
+
metadata: agentResponse.metadata || {},
|
|
297
354
|
};
|
|
298
355
|
}
|
|
299
356
|
/**
|
|
300
357
|
* Execute a previously generated multi-agent plan
|
|
358
|
+
* @param planId - ID of the plan to execute
|
|
359
|
+
* @param userToken - Optional user token for authentication (defaults to tenant/client_id)
|
|
301
360
|
*/
|
|
302
|
-
async executePlan(planId) {
|
|
361
|
+
async executePlan(planId, userToken) {
|
|
303
362
|
const agentRequest = {
|
|
304
363
|
query: '',
|
|
305
|
-
user_token: this.config.
|
|
364
|
+
user_token: userToken || this.config.tenant,
|
|
306
365
|
client_id: this.config.tenant,
|
|
307
366
|
request_type: 'execute-plan',
|
|
308
|
-
context: { plan_id: planId }
|
|
367
|
+
context: { plan_id: planId },
|
|
309
368
|
};
|
|
310
369
|
const url = `${this.config.endpoint}/api/request`;
|
|
370
|
+
const headers = {
|
|
371
|
+
'Content-Type': 'application/json',
|
|
372
|
+
};
|
|
373
|
+
if (this.config.licenseKey) {
|
|
374
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
375
|
+
}
|
|
311
376
|
const response = await fetch(url, {
|
|
312
377
|
method: 'POST',
|
|
313
|
-
headers
|
|
314
|
-
'Content-Type': 'application/json'
|
|
315
|
-
},
|
|
378
|
+
headers,
|
|
316
379
|
body: JSON.stringify(agentRequest),
|
|
317
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
380
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
318
381
|
});
|
|
319
382
|
if (!response.ok) {
|
|
320
383
|
const errorText = await response.text();
|
|
@@ -330,7 +393,7 @@ export class AxonFlow {
|
|
|
330
393
|
result: agentResponse.result,
|
|
331
394
|
stepResults: agentResponse.metadata?.step_results,
|
|
332
395
|
error: agentResponse.error,
|
|
333
|
-
duration: agentResponse.metadata?.duration
|
|
396
|
+
duration: agentResponse.metadata?.duration,
|
|
334
397
|
};
|
|
335
398
|
}
|
|
336
399
|
/**
|
|
@@ -340,7 +403,7 @@ export class AxonFlow {
|
|
|
340
403
|
const url = `${this.config.endpoint}/api/plans/${planId}`;
|
|
341
404
|
const response = await fetch(url, {
|
|
342
405
|
method: 'GET',
|
|
343
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
406
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
344
407
|
});
|
|
345
408
|
if (!response.ok) {
|
|
346
409
|
const errorText = await response.text();
|
|
@@ -353,191 +416,209 @@ export class AxonFlow {
|
|
|
353
416
|
result: status.result,
|
|
354
417
|
stepResults: status.step_results,
|
|
355
418
|
error: status.error,
|
|
356
|
-
duration: status.duration
|
|
419
|
+
duration: status.duration,
|
|
357
420
|
};
|
|
358
421
|
}
|
|
359
|
-
//
|
|
360
|
-
// Gateway Mode
|
|
361
|
-
//
|
|
362
|
-
// Gateway Mode allows clients to make LLM calls directly while still using
|
|
363
|
-
// AxonFlow for policy enforcement and audit logging.
|
|
364
|
-
//
|
|
365
|
-
// Usage:
|
|
366
|
-
// 1. Call getPolicyApprovedContext() before making LLM call
|
|
367
|
-
// 2. Make LLM call directly to your provider (using returned approved data)
|
|
368
|
-
// 3. Call auditLLMCall() after to record the call for compliance
|
|
369
|
-
//
|
|
370
|
-
// Example:
|
|
371
|
-
// const ctx = await axonflow.getPolicyApprovedContext({
|
|
372
|
-
// userToken: 'user-jwt',
|
|
373
|
-
// dataSources: ['postgres'],
|
|
374
|
-
// query: 'Find patients with diabetes'
|
|
375
|
-
// });
|
|
376
|
-
// if (!ctx.approved) throw new Error(ctx.blockReason);
|
|
377
|
-
//
|
|
378
|
-
// const llmResp = await openai.chat.completions.create({...}); // Your LLM call
|
|
379
|
-
//
|
|
380
|
-
// await axonflow.auditLLMCall({
|
|
381
|
-
// contextId: ctx.contextId,
|
|
382
|
-
// responseSummary: 'Found 5 patients',
|
|
383
|
-
// provider: 'openai',
|
|
384
|
-
// model: 'gpt-4',
|
|
385
|
-
// tokenUsage: { promptTokens: 100, completionTokens: 50, totalTokens: 150 },
|
|
386
|
-
// latencyMs: 250
|
|
387
|
-
// });
|
|
422
|
+
// ============================================================================
|
|
423
|
+
// Gateway Mode Methods
|
|
424
|
+
// ============================================================================
|
|
388
425
|
/**
|
|
389
|
-
*
|
|
390
|
-
*
|
|
391
|
-
|
|
392
|
-
|
|
426
|
+
* Gateway Mode: Pre-check policy approval before making a direct LLM call.
|
|
427
|
+
* Alias for getPolicyApprovedContext() for simpler API.
|
|
428
|
+
*/
|
|
429
|
+
async preCheck(options) {
|
|
430
|
+
return this.getPolicyApprovedContext(options);
|
|
431
|
+
}
|
|
432
|
+
/**
|
|
433
|
+
* Gateway Mode: Get policy-approved context before making a direct LLM call.
|
|
393
434
|
*
|
|
394
|
-
*
|
|
395
|
-
*
|
|
435
|
+
* Use this when you want to:
|
|
436
|
+
* - Make direct LLM calls (not through AxonFlow proxy)
|
|
437
|
+
* - Have full control over your LLM provider/model selection
|
|
438
|
+
* - Minimize latency by calling LLM directly
|
|
396
439
|
*
|
|
397
440
|
* @example
|
|
398
|
-
*
|
|
399
|
-
*
|
|
400
|
-
*
|
|
401
|
-
* query: '
|
|
441
|
+
* ```typescript
|
|
442
|
+
* const ctx = await axonflow.getPolicyApprovedContext({
|
|
443
|
+
* userToken: 'user-jwt',
|
|
444
|
+
* query: 'Analyze this customer data',
|
|
445
|
+
* dataSources: ['postgres']
|
|
402
446
|
* });
|
|
403
447
|
*
|
|
404
|
-
* if (!
|
|
405
|
-
* throw new Error(`
|
|
448
|
+
* if (!ctx.approved) {
|
|
449
|
+
* throw new Error(`Blocked: ${ctx.blockReason}`);
|
|
406
450
|
* }
|
|
407
451
|
*
|
|
408
|
-
* //
|
|
409
|
-
* const
|
|
452
|
+
* // Make direct LLM call with approved data
|
|
453
|
+
* const response = await openai.chat.completions.create({
|
|
454
|
+
* model: 'gpt-4',
|
|
455
|
+
* messages: [{ role: 'user', content: JSON.stringify(ctx.approvedData) }]
|
|
456
|
+
* });
|
|
457
|
+
*
|
|
458
|
+
* // Audit the call
|
|
459
|
+
* await axonflow.auditLLMCall({
|
|
460
|
+
* contextId: ctx.contextId,
|
|
461
|
+
* responseSummary: response.choices[0].message.content.substring(0, 100),
|
|
462
|
+
* provider: 'openai',
|
|
463
|
+
* model: 'gpt-4',
|
|
464
|
+
* tokenUsage: {
|
|
465
|
+
* promptTokens: response.usage.prompt_tokens,
|
|
466
|
+
* completionTokens: response.usage.completion_tokens,
|
|
467
|
+
* totalTokens: response.usage.total_tokens
|
|
468
|
+
* },
|
|
469
|
+
* latencyMs: 250
|
|
470
|
+
* });
|
|
471
|
+
* ```
|
|
410
472
|
*/
|
|
411
|
-
async getPolicyApprovedContext(
|
|
473
|
+
async getPolicyApprovedContext(options) {
|
|
412
474
|
const url = `${this.config.endpoint}/api/policy/pre-check`;
|
|
413
|
-
const
|
|
414
|
-
user_token:
|
|
475
|
+
const requestBody = {
|
|
476
|
+
user_token: options.userToken,
|
|
415
477
|
client_id: this.config.tenant,
|
|
416
|
-
query:
|
|
417
|
-
data_sources:
|
|
418
|
-
context:
|
|
478
|
+
query: options.query,
|
|
479
|
+
data_sources: options.dataSources || [],
|
|
480
|
+
context: options.context || {},
|
|
481
|
+
};
|
|
482
|
+
const headers = {
|
|
483
|
+
'Content-Type': 'application/json',
|
|
419
484
|
};
|
|
485
|
+
// Add authentication headers
|
|
486
|
+
const isLocalhost = this.config.endpoint.includes('localhost') || this.config.endpoint.includes('127.0.0.1');
|
|
487
|
+
if (!isLocalhost) {
|
|
488
|
+
if (this.config.licenseKey) {
|
|
489
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
490
|
+
}
|
|
491
|
+
else if (this.config.apiKey) {
|
|
492
|
+
headers['X-Client-Secret'] = this.config.apiKey;
|
|
493
|
+
}
|
|
494
|
+
}
|
|
420
495
|
if (this.config.debug) {
|
|
421
|
-
debugLog('Gateway
|
|
422
|
-
query: request.query.substring(0, 50),
|
|
423
|
-
dataSources: request.dataSources
|
|
424
|
-
});
|
|
496
|
+
debugLog('Gateway Mode: Pre-check', { query: options.query.substring(0, 50) });
|
|
425
497
|
}
|
|
426
|
-
const startTime = Date.now();
|
|
427
498
|
const response = await fetch(url, {
|
|
428
499
|
method: 'POST',
|
|
429
|
-
headers
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
'X-License-Key': this.config.apiKey
|
|
433
|
-
},
|
|
434
|
-
body: JSON.stringify(body),
|
|
435
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
500
|
+
headers,
|
|
501
|
+
body: JSON.stringify(requestBody),
|
|
502
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
436
503
|
});
|
|
437
|
-
const duration = Date.now() - startTime;
|
|
438
504
|
if (!response.ok) {
|
|
439
505
|
const errorText = await response.text();
|
|
440
|
-
|
|
506
|
+
if (response.status === 401 || response.status === 403) {
|
|
507
|
+
throw new AuthenticationError(`Policy pre-check authentication failed: ${errorText}`);
|
|
508
|
+
}
|
|
509
|
+
throw new APIError(response.status, response.statusText, errorText);
|
|
441
510
|
}
|
|
442
511
|
const data = await response.json();
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
}
|
|
450
|
-
return {
|
|
512
|
+
// Transform snake_case response to camelCase
|
|
513
|
+
// Default expiration to 5 minutes from now if not provided
|
|
514
|
+
const expiresAt = data.expires_at
|
|
515
|
+
? new Date(data.expires_at)
|
|
516
|
+
: new Date(Date.now() + 5 * 60 * 1000);
|
|
517
|
+
const result = {
|
|
451
518
|
contextId: data.context_id,
|
|
452
519
|
approved: data.approved,
|
|
453
|
-
approvedData: data.approved_data,
|
|
520
|
+
approvedData: data.approved_data || {},
|
|
454
521
|
policies: data.policies || [],
|
|
455
|
-
|
|
522
|
+
expiresAt,
|
|
523
|
+
blockReason: data.block_reason,
|
|
524
|
+
};
|
|
525
|
+
// Parse rate limit info if present
|
|
526
|
+
if (data.rate_limit) {
|
|
527
|
+
result.rateLimitInfo = {
|
|
456
528
|
limit: data.rate_limit.limit,
|
|
457
529
|
remaining: data.rate_limit.remaining,
|
|
458
|
-
resetAt: new Date(data.rate_limit.reset_at)
|
|
459
|
-
}
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
530
|
+
resetAt: new Date(data.rate_limit.reset_at),
|
|
531
|
+
};
|
|
532
|
+
}
|
|
533
|
+
if (this.config.debug) {
|
|
534
|
+
debugLog('Gateway Mode: Pre-check result', {
|
|
535
|
+
approved: result.approved,
|
|
536
|
+
contextId: result.contextId,
|
|
537
|
+
policies: result.policies.length,
|
|
538
|
+
});
|
|
539
|
+
}
|
|
540
|
+
return result;
|
|
463
541
|
}
|
|
464
542
|
/**
|
|
465
|
-
*
|
|
543
|
+
* Gateway Mode: Audit an LLM call after completion.
|
|
466
544
|
*
|
|
467
|
-
*
|
|
468
|
-
*
|
|
469
|
-
*
|
|
470
|
-
* @param contextId Context ID from getPolicyApprovedContext()
|
|
471
|
-
* @param responseSummary Brief summary of the LLM response (not full response)
|
|
472
|
-
* @param provider LLM provider name
|
|
473
|
-
* @param model Model name
|
|
474
|
-
* @param tokenUsage Token counts from LLM response
|
|
475
|
-
* @param latencyMs Time taken for LLM call in milliseconds
|
|
476
|
-
* @param metadata Optional additional metadata
|
|
477
|
-
* @returns AuditResult confirming the audit was recorded
|
|
545
|
+
* Call this after making a direct LLM call to log the audit trail.
|
|
546
|
+
* This is required for compliance and monitoring.
|
|
478
547
|
*
|
|
479
548
|
* @example
|
|
480
|
-
*
|
|
481
|
-
*
|
|
482
|
-
*
|
|
483
|
-
* '
|
|
484
|
-
* '
|
|
485
|
-
*
|
|
486
|
-
*
|
|
487
|
-
*
|
|
488
|
-
*
|
|
549
|
+
* ```typescript
|
|
550
|
+
* await axonflow.auditLLMCall({
|
|
551
|
+
* contextId: ctx.contextId,
|
|
552
|
+
* responseSummary: 'Generated report with 5 items',
|
|
553
|
+
* provider: 'openai',
|
|
554
|
+
* model: 'gpt-4',
|
|
555
|
+
* tokenUsage: {
|
|
556
|
+
* promptTokens: 100,
|
|
557
|
+
* completionTokens: 50,
|
|
558
|
+
* totalTokens: 150
|
|
559
|
+
* },
|
|
560
|
+
* latencyMs: 250
|
|
561
|
+
* });
|
|
562
|
+
* ```
|
|
489
563
|
*/
|
|
490
|
-
async auditLLMCall(
|
|
564
|
+
async auditLLMCall(options) {
|
|
491
565
|
const url = `${this.config.endpoint}/api/audit/llm-call`;
|
|
492
|
-
const
|
|
493
|
-
context_id: contextId,
|
|
566
|
+
const requestBody = {
|
|
567
|
+
context_id: options.contextId,
|
|
494
568
|
client_id: this.config.tenant,
|
|
495
|
-
response_summary: responseSummary,
|
|
496
|
-
provider,
|
|
497
|
-
model,
|
|
569
|
+
response_summary: options.responseSummary,
|
|
570
|
+
provider: options.provider,
|
|
571
|
+
model: options.model,
|
|
498
572
|
token_usage: {
|
|
499
|
-
prompt_tokens: tokenUsage.promptTokens,
|
|
500
|
-
completion_tokens: tokenUsage.completionTokens,
|
|
501
|
-
total_tokens: tokenUsage.totalTokens
|
|
573
|
+
prompt_tokens: options.tokenUsage.promptTokens,
|
|
574
|
+
completion_tokens: options.tokenUsage.completionTokens,
|
|
575
|
+
total_tokens: options.tokenUsage.totalTokens,
|
|
502
576
|
},
|
|
503
|
-
latency_ms: latencyMs,
|
|
504
|
-
metadata
|
|
577
|
+
latency_ms: options.latencyMs,
|
|
578
|
+
metadata: options.metadata || {},
|
|
505
579
|
};
|
|
580
|
+
const headers = {
|
|
581
|
+
'Content-Type': 'application/json',
|
|
582
|
+
};
|
|
583
|
+
// Add authentication headers
|
|
584
|
+
const isLocalhost = this.config.endpoint.includes('localhost') || this.config.endpoint.includes('127.0.0.1');
|
|
585
|
+
if (!isLocalhost) {
|
|
586
|
+
if (this.config.licenseKey) {
|
|
587
|
+
headers['X-License-Key'] = this.config.licenseKey;
|
|
588
|
+
}
|
|
589
|
+
else if (this.config.apiKey) {
|
|
590
|
+
headers['X-Client-Secret'] = this.config.apiKey;
|
|
591
|
+
}
|
|
592
|
+
}
|
|
506
593
|
if (this.config.debug) {
|
|
507
|
-
debugLog('Gateway
|
|
508
|
-
contextId,
|
|
509
|
-
provider,
|
|
510
|
-
model,
|
|
511
|
-
tokens: tokenUsage.totalTokens
|
|
594
|
+
debugLog('Gateway Mode: Audit', {
|
|
595
|
+
contextId: options.contextId,
|
|
596
|
+
provider: options.provider,
|
|
597
|
+
model: options.model,
|
|
512
598
|
});
|
|
513
599
|
}
|
|
514
|
-
const startTime = Date.now();
|
|
515
600
|
const response = await fetch(url, {
|
|
516
601
|
method: 'POST',
|
|
517
|
-
headers
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
'X-License-Key': this.config.apiKey
|
|
521
|
-
},
|
|
522
|
-
body: JSON.stringify(body),
|
|
523
|
-
signal: AbortSignal.timeout(this.config.timeout)
|
|
602
|
+
headers,
|
|
603
|
+
body: JSON.stringify(requestBody),
|
|
604
|
+
signal: AbortSignal.timeout(this.config.timeout),
|
|
524
605
|
});
|
|
525
|
-
const duration = Date.now() - startTime;
|
|
526
606
|
if (!response.ok) {
|
|
527
607
|
const errorText = await response.text();
|
|
528
|
-
|
|
608
|
+
if (response.status === 401 || response.status === 403) {
|
|
609
|
+
throw new AuthenticationError(`Audit logging authentication failed: ${errorText}`);
|
|
610
|
+
}
|
|
611
|
+
throw new APIError(response.status, response.statusText, errorText);
|
|
529
612
|
}
|
|
530
613
|
const data = await response.json();
|
|
531
|
-
|
|
532
|
-
debugLog('Gateway audit complete', {
|
|
533
|
-
auditId: data.audit_id,
|
|
534
|
-
duration
|
|
535
|
-
});
|
|
536
|
-
}
|
|
537
|
-
return {
|
|
614
|
+
const result = {
|
|
538
615
|
success: data.success,
|
|
539
|
-
auditId: data.audit_id
|
|
616
|
+
auditId: data.audit_id,
|
|
540
617
|
};
|
|
618
|
+
if (this.config.debug) {
|
|
619
|
+
debugLog('Gateway Mode: Audit logged', { auditId: result.auditId });
|
|
620
|
+
}
|
|
621
|
+
return result;
|
|
541
622
|
}
|
|
542
623
|
}
|
|
543
624
|
//# sourceMappingURL=client.js.map
|