promptlayer 1.1.0 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -0
- package/dist/claude-agents.d.mts +20 -0
- package/dist/claude-agents.d.ts +20 -0
- package/dist/claude-agents.js +2 -0
- package/dist/claude-agents.js.map +1 -0
- package/dist/esm/{chunk-SWBNW72U.js → chunk-DFBRFJOL.js} +2 -2
- package/dist/esm/{chunk-SWBNW72U.js.map → chunk-DFBRFJOL.js.map} +1 -1
- package/dist/esm/claude-agents.js +2 -0
- package/dist/esm/claude-agents.js.map +1 -0
- package/dist/esm/index.js +1 -1
- package/dist/esm/openai-agents.js +2 -2
- package/dist/esm/openai-agents.js.map +1 -1
- package/dist/index.js +2 -2
- package/dist/index.js.map +1 -1
- package/dist/openai-agents.js +2 -2
- package/dist/openai-agents.js.map +1 -1
- package/package.json +18 -1
- package/vendor/claude-agents/trace/.claude-plugin/plugin.json +8 -0
- package/vendor/claude-agents/trace/hooks/hook_utils.py +38 -0
- package/vendor/claude-agents/trace/hooks/hooks.json +60 -0
- package/vendor/claude-agents/trace/hooks/lib.sh +577 -0
- package/vendor/claude-agents/trace/hooks/parse_stop_transcript.py +375 -0
- package/vendor/claude-agents/trace/hooks/post_tool_use.sh +41 -0
- package/vendor/claude-agents/trace/hooks/session_end.sh +37 -0
- package/vendor/claude-agents/trace/hooks/session_start.sh +57 -0
- package/vendor/claude-agents/trace/hooks/stop_hook.sh +123 -0
- package/vendor/claude-agents/trace/hooks/user_prompt_submit.sh +25 -0
- package/vendor/claude-agents/vendor_metadata.json +5 -0
- package/.github/CODEOWNERS +0 -1
- package/.github/workflows/node.js.yml +0 -30
- package/.github/workflows/npm-publish.yml +0 -35
- package/src/groups.ts +0 -16
- package/src/index.ts +0 -383
- package/src/integrations/openai-agents/helpers.test.ts +0 -254
- package/src/integrations/openai-agents/ids.ts +0 -27
- package/src/integrations/openai-agents/index.ts +0 -8
- package/src/integrations/openai-agents/instrumentation.test.ts +0 -46
- package/src/integrations/openai-agents/instrumentation.ts +0 -47
- package/src/integrations/openai-agents/mapping.ts +0 -714
- package/src/integrations/openai-agents/otlp-json.ts +0 -120
- package/src/integrations/openai-agents/processor.test.ts +0 -509
- package/src/integrations/openai-agents/processor.ts +0 -388
- package/src/integrations/openai-agents/time.ts +0 -56
- package/src/integrations/openai-agents/types.ts +0 -49
- package/src/integrations/openai-agents/url.ts +0 -9
- package/src/openai-agents.ts +0 -1
- package/src/promptlayer.ts +0 -125
- package/src/run-error-tracking.test.ts +0 -146
- package/src/span-exporter.ts +0 -120
- package/src/span-wrapper.ts +0 -51
- package/src/templates.ts +0 -37
- package/src/tracing.ts +0 -20
- package/src/track.ts +0 -84
- package/src/types.ts +0 -689
- package/src/utils/blueprint-builder.test.ts +0 -727
- package/src/utils/blueprint-builder.ts +0 -1453
- package/src/utils/errors.test.ts +0 -68
- package/src/utils/errors.ts +0 -62
- package/src/utils/streaming.test.ts +0 -498
- package/src/utils/streaming.ts +0 -1402
- package/src/utils/utils.ts +0 -1228
- package/tsconfig.json +0 -115
- package/tsup.config.ts +0 -20
- package/vitest.config.ts +0 -9
package/src/utils/utils.ts
DELETED
|
@@ -1,1228 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
GetPromptTemplateParams,
|
|
3
|
-
GetPromptTemplateResponse,
|
|
4
|
-
ListPromptTemplatesResponse,
|
|
5
|
-
LogRequest,
|
|
6
|
-
Pagination,
|
|
7
|
-
PublishPromptTemplate,
|
|
8
|
-
PublishPromptTemplateResponse,
|
|
9
|
-
RequestLog,
|
|
10
|
-
RunWorkflowRequestParams,
|
|
11
|
-
TrackGroup,
|
|
12
|
-
TrackMetadata,
|
|
13
|
-
TrackPrompt,
|
|
14
|
-
TrackRequest,
|
|
15
|
-
TrackScore,
|
|
16
|
-
WorkflowResponse,
|
|
17
|
-
} from "@/types";
|
|
18
|
-
import type { AnthropicBedrock } from "@anthropic-ai/bedrock-sdk";
|
|
19
|
-
import type TypeAnthropic from "@anthropic-ai/sdk";
|
|
20
|
-
import type { AnthropicVertex } from "@anthropic-ai/vertex-sdk";
|
|
21
|
-
import Ably from "ably";
|
|
22
|
-
import { Centrifuge } from "centrifuge";
|
|
23
|
-
import type TypeOpenAI from "openai";
|
|
24
|
-
import pRetry from "p-retry";
|
|
25
|
-
import {
|
|
26
|
-
MAP_PROVIDER_TO_FUNCTION_NAME,
|
|
27
|
-
cleaned_result,
|
|
28
|
-
STREAMING_PROVIDERS_WITH_USAGE,
|
|
29
|
-
} from "./streaming";
|
|
30
|
-
|
|
31
|
-
// SDK version - injected at build time from package.json
|
|
32
|
-
declare const __SDK_VERSION__: string;
|
|
33
|
-
export const SDK_VERSION = __SDK_VERSION__;
|
|
34
|
-
|
|
35
|
-
// Get Node.js version (major.minor format)
|
|
36
|
-
const getNodeVersion = (): string => {
|
|
37
|
-
if (typeof process !== "undefined" && process.versions?.node) {
|
|
38
|
-
const parts = process.versions.node.split(".");
|
|
39
|
-
return `${parts[0]}.${parts[1]}`;
|
|
40
|
-
}
|
|
41
|
-
return "unknown";
|
|
42
|
-
};
|
|
43
|
-
|
|
44
|
-
const _NODE_VERSION = getNodeVersion();
|
|
45
|
-
const _PROMPTLAYER_USER_AGENT = `promptlayer-js/${SDK_VERSION} (node ${_NODE_VERSION})`;
|
|
46
|
-
|
|
47
|
-
/**
|
|
48
|
-
* Returns common headers to be included in all PromptLayer API requests.
|
|
49
|
-
* Includes the SDK version and user agent for tracking and debugging purposes.
|
|
50
|
-
*/
|
|
51
|
-
export const getCommonHeaders = (): Record<string, string> => ({
|
|
52
|
-
"User-Agent": _PROMPTLAYER_USER_AGENT,
|
|
53
|
-
"X-SDK-Version": SDK_VERSION,
|
|
54
|
-
});
|
|
55
|
-
|
|
56
|
-
export const SET_WORKFLOW_COMPLETE_MESSAGE = "SET_WORKFLOW_COMPLETE";
|
|
57
|
-
|
|
58
|
-
export enum FinalOutputCode {
|
|
59
|
-
OK = "OK",
|
|
60
|
-
EXCEEDS_SIZE_LIMIT = "EXCEEDS_SIZE_LIMIT",
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
async function getFinalOutput(
|
|
64
|
-
baseURL: string,
|
|
65
|
-
executionId: number,
|
|
66
|
-
returnAllOutputs: boolean,
|
|
67
|
-
headers: Record<string, string>
|
|
68
|
-
): Promise<any> {
|
|
69
|
-
const response = await fetchWithRetry(
|
|
70
|
-
`${baseURL}/workflow-version-execution-results?workflow_version_execution_id=${executionId}&return_all_outputs=${returnAllOutputs}`,
|
|
71
|
-
{ headers, ...getCommonHeaders() }
|
|
72
|
-
);
|
|
73
|
-
if (!response.ok) {
|
|
74
|
-
throw new Error("Failed to fetch final output");
|
|
75
|
-
}
|
|
76
|
-
return response.json();
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
function makeMessageListener(
|
|
80
|
-
baseURL: string,
|
|
81
|
-
resultsPromise: { resolve: (data: any) => void; reject: (err: any) => void },
|
|
82
|
-
executionId: number,
|
|
83
|
-
returnAllOutputs: boolean,
|
|
84
|
-
headers: Record<string, string>
|
|
85
|
-
) {
|
|
86
|
-
return async function (message: any) {
|
|
87
|
-
if (message.name !== SET_WORKFLOW_COMPLETE_MESSAGE) return;
|
|
88
|
-
|
|
89
|
-
try {
|
|
90
|
-
const data = JSON.parse(message.data);
|
|
91
|
-
const resultCode = data.result_code;
|
|
92
|
-
let results;
|
|
93
|
-
|
|
94
|
-
if (resultCode === FinalOutputCode.OK || resultCode == null) {
|
|
95
|
-
results = data.final_output;
|
|
96
|
-
} else if (resultCode === FinalOutputCode.EXCEEDS_SIZE_LIMIT) {
|
|
97
|
-
results = await getFinalOutput(
|
|
98
|
-
baseURL,
|
|
99
|
-
executionId,
|
|
100
|
-
returnAllOutputs,
|
|
101
|
-
headers
|
|
102
|
-
);
|
|
103
|
-
resultsPromise.resolve(results);
|
|
104
|
-
} else {
|
|
105
|
-
throw new Error(`Unsupported final output code: ${resultCode}`);
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
resultsPromise.resolve(results);
|
|
109
|
-
} catch (err) {
|
|
110
|
-
resultsPromise.reject(err);
|
|
111
|
-
}
|
|
112
|
-
};
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
interface WaitForWorkflowCompletionParams {
|
|
116
|
-
token: string;
|
|
117
|
-
channelName: string;
|
|
118
|
-
executionId: number;
|
|
119
|
-
returnAllOutputs: boolean;
|
|
120
|
-
headers: Record<string, string>;
|
|
121
|
-
timeout: number;
|
|
122
|
-
baseURL: string;
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
async function waitForWorkflowCompletion({
|
|
126
|
-
token,
|
|
127
|
-
channelName,
|
|
128
|
-
executionId,
|
|
129
|
-
returnAllOutputs,
|
|
130
|
-
headers,
|
|
131
|
-
timeout,
|
|
132
|
-
baseURL,
|
|
133
|
-
}: WaitForWorkflowCompletionParams): Promise<any> {
|
|
134
|
-
const client = new Ably.Realtime(token);
|
|
135
|
-
const channel = client.channels.get(channelName);
|
|
136
|
-
|
|
137
|
-
const resultsPromise = {} as {
|
|
138
|
-
resolve: (value: any) => void;
|
|
139
|
-
reject: (reason?: any) => void;
|
|
140
|
-
};
|
|
141
|
-
|
|
142
|
-
const promise = new Promise<any>((resolve, reject) => {
|
|
143
|
-
resultsPromise.resolve = resolve;
|
|
144
|
-
resultsPromise.reject = reject;
|
|
145
|
-
});
|
|
146
|
-
|
|
147
|
-
const listener = makeMessageListener(
|
|
148
|
-
baseURL,
|
|
149
|
-
resultsPromise,
|
|
150
|
-
executionId,
|
|
151
|
-
returnAllOutputs,
|
|
152
|
-
headers
|
|
153
|
-
);
|
|
154
|
-
await channel.subscribe(SET_WORKFLOW_COMPLETE_MESSAGE, listener);
|
|
155
|
-
|
|
156
|
-
try {
|
|
157
|
-
return await new Promise((resolve, reject) => {
|
|
158
|
-
const timer = setTimeout(() => {
|
|
159
|
-
reject(
|
|
160
|
-
new Error("Workflow execution did not complete properly (timeout)")
|
|
161
|
-
);
|
|
162
|
-
}, timeout);
|
|
163
|
-
|
|
164
|
-
promise
|
|
165
|
-
.then((result) => {
|
|
166
|
-
clearTimeout(timer);
|
|
167
|
-
resolve(result);
|
|
168
|
-
})
|
|
169
|
-
.catch((err) => {
|
|
170
|
-
clearTimeout(timer);
|
|
171
|
-
reject(err);
|
|
172
|
-
});
|
|
173
|
-
});
|
|
174
|
-
} finally {
|
|
175
|
-
console.log("Closing client");
|
|
176
|
-
channel.unsubscribe(SET_WORKFLOW_COMPLETE_MESSAGE, listener);
|
|
177
|
-
client.close();
|
|
178
|
-
console.log("Closed client");
|
|
179
|
-
}
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
/**
|
|
183
|
-
* Wrapper around fetch that retries on 5xx server errors with exponential backoff.
|
|
184
|
-
* Uses p-retry for industry-standard retry logic with exponential backoff.
|
|
185
|
-
*
|
|
186
|
-
* @param input - The URL or Request object to fetch
|
|
187
|
-
* @param init - The request initialization options
|
|
188
|
-
* @returns Promise<Response> - The fetch response
|
|
189
|
-
*/
|
|
190
|
-
export const fetchWithRetry = async (
|
|
191
|
-
input: RequestInfo | URL,
|
|
192
|
-
init?: RequestInit
|
|
193
|
-
): Promise<Response> => {
|
|
194
|
-
return pRetry(
|
|
195
|
-
async () => {
|
|
196
|
-
const response = await fetch(input, init);
|
|
197
|
-
|
|
198
|
-
if ((response.status >= 500 && response.status < 600) || (response.status === 429)) {
|
|
199
|
-
throw new Error(
|
|
200
|
-
`Server error: ${response.status} ${response.statusText}`
|
|
201
|
-
);
|
|
202
|
-
}
|
|
203
|
-
|
|
204
|
-
return response;
|
|
205
|
-
},
|
|
206
|
-
{
|
|
207
|
-
retries: 3, // Retry up to 3 times (4 total attempts)
|
|
208
|
-
factor: 2, // Exponential backoff factor
|
|
209
|
-
minTimeout: 2000, // First retry after 2 seconds
|
|
210
|
-
maxTimeout: 15000, // Cap at 15 seconds (gives us ~2s, ~4s, ~8s progression with randomization)
|
|
211
|
-
randomize: true, // Add jitter to avoid thundering herd
|
|
212
|
-
onFailedAttempt: (error) => {
|
|
213
|
-
console.info(
|
|
214
|
-
`PromptLayer API request attempt ${error.attemptNumber} failed. ${error.retriesLeft} retries left.`
|
|
215
|
-
);
|
|
216
|
-
},
|
|
217
|
-
}
|
|
218
|
-
);
|
|
219
|
-
};
|
|
220
|
-
|
|
221
|
-
const promptlayerApiHandler = async <Item>(
|
|
222
|
-
apiKey: string,
|
|
223
|
-
baseURL: string,
|
|
224
|
-
body: TrackRequest & {
|
|
225
|
-
request_response: AsyncIterable<Item> | any;
|
|
226
|
-
},
|
|
227
|
-
throwOnError: boolean = true
|
|
228
|
-
) => {
|
|
229
|
-
const isGenerator = body.request_response[Symbol.asyncIterator] !== undefined;
|
|
230
|
-
if (isGenerator) {
|
|
231
|
-
return proxyGenerator(
|
|
232
|
-
apiKey,
|
|
233
|
-
baseURL,
|
|
234
|
-
body.request_response,
|
|
235
|
-
body,
|
|
236
|
-
throwOnError
|
|
237
|
-
);
|
|
238
|
-
}
|
|
239
|
-
return await promptLayerApiRequest(apiKey, baseURL, body, throwOnError);
|
|
240
|
-
};
|
|
241
|
-
|
|
242
|
-
const promptLayerApiRequest = async (
|
|
243
|
-
apiKey: string,
|
|
244
|
-
baseURL: string,
|
|
245
|
-
body: TrackRequest,
|
|
246
|
-
throwOnError: boolean = true
|
|
247
|
-
) => {
|
|
248
|
-
try {
|
|
249
|
-
const response = await fetchWithRetry(`${baseURL}/track-request`, {
|
|
250
|
-
method: "POST",
|
|
251
|
-
headers: { "Content-Type": "application/json", ...getCommonHeaders() },
|
|
252
|
-
body: JSON.stringify(body),
|
|
253
|
-
});
|
|
254
|
-
const data = await response.json();
|
|
255
|
-
if (response.status !== 200) {
|
|
256
|
-
const errorMessage =
|
|
257
|
-
data.message || data.error || "Failed to log request";
|
|
258
|
-
if (throwOnError) {
|
|
259
|
-
throw new Error(errorMessage);
|
|
260
|
-
} else {
|
|
261
|
-
warnOnBadResponse(
|
|
262
|
-
data,
|
|
263
|
-
"WARNING: While logging your request, PromptLayer experienced the following error:"
|
|
264
|
-
);
|
|
265
|
-
}
|
|
266
|
-
}
|
|
267
|
-
if (data && body.return_pl_id) {
|
|
268
|
-
return [body.request_response, data.request_id];
|
|
269
|
-
}
|
|
270
|
-
} catch (e) {
|
|
271
|
-
if (throwOnError) {
|
|
272
|
-
throw e;
|
|
273
|
-
}
|
|
274
|
-
console.warn(
|
|
275
|
-
`WARNING: While logging your request PromptLayer had the following error: ${e}`
|
|
276
|
-
);
|
|
277
|
-
}
|
|
278
|
-
return body.request_response;
|
|
279
|
-
};
|
|
280
|
-
|
|
281
|
-
const promptLayerTrackMetadata = async (
|
|
282
|
-
apiKey: string,
|
|
283
|
-
baseURL: string,
|
|
284
|
-
body: TrackMetadata,
|
|
285
|
-
throwOnError: boolean = true
|
|
286
|
-
): Promise<boolean> => {
|
|
287
|
-
try {
|
|
288
|
-
const response = await fetchWithRetry(`${baseURL}/library-track-metadata`, {
|
|
289
|
-
method: "POST",
|
|
290
|
-
headers: { "Content-Type": "application/json", ...getCommonHeaders() },
|
|
291
|
-
body: JSON.stringify({
|
|
292
|
-
...body,
|
|
293
|
-
api_key: apiKey,
|
|
294
|
-
}),
|
|
295
|
-
});
|
|
296
|
-
const data = await response.json();
|
|
297
|
-
if (response.status !== 200) {
|
|
298
|
-
const errorMessage =
|
|
299
|
-
data.message || data.error || "Failed to track metadata";
|
|
300
|
-
if (throwOnError) {
|
|
301
|
-
throw new Error(errorMessage);
|
|
302
|
-
} else {
|
|
303
|
-
warnOnBadResponse(
|
|
304
|
-
data,
|
|
305
|
-
"WARNING: While logging metadata to your request, PromptLayer experienced the following error"
|
|
306
|
-
);
|
|
307
|
-
return false;
|
|
308
|
-
}
|
|
309
|
-
}
|
|
310
|
-
} catch (e) {
|
|
311
|
-
if (throwOnError) {
|
|
312
|
-
throw e;
|
|
313
|
-
}
|
|
314
|
-
console.warn(
|
|
315
|
-
`WARNING: While logging metadata to your request, PromptLayer experienced the following error: ${e}`
|
|
316
|
-
);
|
|
317
|
-
return false;
|
|
318
|
-
}
|
|
319
|
-
return true;
|
|
320
|
-
};
|
|
321
|
-
|
|
322
|
-
const promptLayerTrackScore = async (
|
|
323
|
-
apiKey: string,
|
|
324
|
-
baseURL: string,
|
|
325
|
-
body: TrackScore,
|
|
326
|
-
throwOnError: boolean = true
|
|
327
|
-
): Promise<boolean> => {
|
|
328
|
-
try {
|
|
329
|
-
const response = await fetchWithRetry(`${baseURL}/library-track-score`, {
|
|
330
|
-
method: "POST",
|
|
331
|
-
headers: { "Content-Type": "application/json", ...getCommonHeaders() },
|
|
332
|
-
body: JSON.stringify({
|
|
333
|
-
...body,
|
|
334
|
-
api_key: apiKey,
|
|
335
|
-
}),
|
|
336
|
-
});
|
|
337
|
-
const data = await response.json();
|
|
338
|
-
if (response.status !== 200) {
|
|
339
|
-
const errorMessage =
|
|
340
|
-
data.message || data.error || "Failed to track score";
|
|
341
|
-
if (throwOnError) {
|
|
342
|
-
throw new Error(errorMessage);
|
|
343
|
-
} else {
|
|
344
|
-
warnOnBadResponse(
|
|
345
|
-
data,
|
|
346
|
-
"WARNING: While scoring your request, PromptLayer experienced the following error"
|
|
347
|
-
);
|
|
348
|
-
return false;
|
|
349
|
-
}
|
|
350
|
-
}
|
|
351
|
-
} catch (e) {
|
|
352
|
-
if (throwOnError) {
|
|
353
|
-
throw e;
|
|
354
|
-
}
|
|
355
|
-
console.warn(
|
|
356
|
-
`WARNING: While scoring your request, PromptLayer experienced the following error: ${e}`
|
|
357
|
-
);
|
|
358
|
-
return false;
|
|
359
|
-
}
|
|
360
|
-
return true;
|
|
361
|
-
};
|
|
362
|
-
|
|
363
|
-
const promptLayerTrackPrompt = async (
|
|
364
|
-
apiKey: string,
|
|
365
|
-
baseURL: string,
|
|
366
|
-
body: TrackPrompt,
|
|
367
|
-
throwOnError: boolean = true
|
|
368
|
-
): Promise<boolean> => {
|
|
369
|
-
try {
|
|
370
|
-
const response = await fetchWithRetry(`${baseURL}/library-track-prompt`, {
|
|
371
|
-
method: "POST",
|
|
372
|
-
headers: { "Content-Type": "application/json", ...getCommonHeaders() },
|
|
373
|
-
body: JSON.stringify({
|
|
374
|
-
...body,
|
|
375
|
-
api_key: apiKey,
|
|
376
|
-
}),
|
|
377
|
-
});
|
|
378
|
-
const data = await response.json();
|
|
379
|
-
if (response.status !== 200) {
|
|
380
|
-
const errorMessage =
|
|
381
|
-
data.message || data.error || "Failed to track prompt";
|
|
382
|
-
if (throwOnError) {
|
|
383
|
-
throw new Error(errorMessage);
|
|
384
|
-
} else {
|
|
385
|
-
warnOnBadResponse(
|
|
386
|
-
data,
|
|
387
|
-
"WARNING: While associating your request with a prompt template, PromptLayer experienced the following error"
|
|
388
|
-
);
|
|
389
|
-
return false;
|
|
390
|
-
}
|
|
391
|
-
}
|
|
392
|
-
} catch (e) {
|
|
393
|
-
if (throwOnError) {
|
|
394
|
-
throw e;
|
|
395
|
-
}
|
|
396
|
-
console.warn(
|
|
397
|
-
`WARNING: While associating your request with a prompt template, PromptLayer experienced the following error: ${e}`
|
|
398
|
-
);
|
|
399
|
-
return false;
|
|
400
|
-
}
|
|
401
|
-
return true;
|
|
402
|
-
};
|
|
403
|
-
|
|
404
|
-
const promptLayerTrackGroup = async (
|
|
405
|
-
apiKey: string,
|
|
406
|
-
baseURL: string,
|
|
407
|
-
body: TrackGroup,
|
|
408
|
-
throwOnError: boolean = true
|
|
409
|
-
): Promise<boolean> => {
|
|
410
|
-
try {
|
|
411
|
-
const response = await fetchWithRetry(`${baseURL}/track-group`, {
|
|
412
|
-
method: "POST",
|
|
413
|
-
headers: { "Content-Type": "application/json", ...getCommonHeaders() },
|
|
414
|
-
body: JSON.stringify({
|
|
415
|
-
...body,
|
|
416
|
-
api_key: apiKey,
|
|
417
|
-
}),
|
|
418
|
-
});
|
|
419
|
-
const data = await response.json();
|
|
420
|
-
if (response.status !== 200) {
|
|
421
|
-
const errorMessage =
|
|
422
|
-
data.message || data.error || "Failed to track group";
|
|
423
|
-
if (throwOnError) {
|
|
424
|
-
throw new Error(errorMessage);
|
|
425
|
-
} else {
|
|
426
|
-
warnOnBadResponse(
|
|
427
|
-
data,
|
|
428
|
-
"WARNING: While associating your request with a group, PromptLayer experienced the following error"
|
|
429
|
-
);
|
|
430
|
-
return false;
|
|
431
|
-
}
|
|
432
|
-
}
|
|
433
|
-
} catch (e) {
|
|
434
|
-
if (throwOnError) {
|
|
435
|
-
throw e;
|
|
436
|
-
}
|
|
437
|
-
console.warn(
|
|
438
|
-
`WARNING: While associating your request with a group, PromptLayer experienced the following error: ${e}`
|
|
439
|
-
);
|
|
440
|
-
return false;
|
|
441
|
-
}
|
|
442
|
-
return true;
|
|
443
|
-
};
|
|
444
|
-
|
|
445
|
-
const promptLayerCreateGroup = async (
|
|
446
|
-
apiKey: string,
|
|
447
|
-
baseURL: string,
|
|
448
|
-
throwOnError: boolean = true
|
|
449
|
-
): Promise<number | boolean> => {
|
|
450
|
-
try {
|
|
451
|
-
const response = await fetchWithRetry(`${baseURL}/create-group`, {
|
|
452
|
-
method: "POST",
|
|
453
|
-
headers: { "Content-Type": "application/json", ...getCommonHeaders() },
|
|
454
|
-
body: JSON.stringify({
|
|
455
|
-
api_key: apiKey,
|
|
456
|
-
}),
|
|
457
|
-
});
|
|
458
|
-
const data = await response.json();
|
|
459
|
-
if (response.status !== 200) {
|
|
460
|
-
const errorMessage =
|
|
461
|
-
data.message || data.error || "Failed to create group";
|
|
462
|
-
if (throwOnError) {
|
|
463
|
-
throw new Error(errorMessage);
|
|
464
|
-
} else {
|
|
465
|
-
warnOnBadResponse(
|
|
466
|
-
data,
|
|
467
|
-
"WARNING: While creating a group PromptLayer had the following error"
|
|
468
|
-
);
|
|
469
|
-
return false;
|
|
470
|
-
}
|
|
471
|
-
}
|
|
472
|
-
return data.id;
|
|
473
|
-
} catch (e) {
|
|
474
|
-
if (throwOnError) {
|
|
475
|
-
throw e;
|
|
476
|
-
}
|
|
477
|
-
console.warn(
|
|
478
|
-
`WARNING: While creating a group PromptLayer had the following error: ${e}`
|
|
479
|
-
);
|
|
480
|
-
return false;
|
|
481
|
-
}
|
|
482
|
-
};
|
|
483
|
-
|
|
484
|
-
const getPromptTemplate = async (
|
|
485
|
-
apiKey: string,
|
|
486
|
-
baseURL: string,
|
|
487
|
-
promptName: string,
|
|
488
|
-
params?: Partial<GetPromptTemplateParams>,
|
|
489
|
-
throwOnError: boolean = true
|
|
490
|
-
): Promise<GetPromptTemplateResponse | null> => {
|
|
491
|
-
try {
|
|
492
|
-
const url = new URL(`${baseURL}/prompt-templates/${promptName}`);
|
|
493
|
-
const response = await fetchWithRetry(url, {
|
|
494
|
-
method: "POST",
|
|
495
|
-
headers: {
|
|
496
|
-
"Content-Type": "application/json",
|
|
497
|
-
"X-API-KEY": apiKey,
|
|
498
|
-
...getCommonHeaders(),
|
|
499
|
-
},
|
|
500
|
-
body: JSON.stringify(params),
|
|
501
|
-
});
|
|
502
|
-
const data = await response.json();
|
|
503
|
-
if (response.status !== 200) {
|
|
504
|
-
const errorMessage =
|
|
505
|
-
data.message || data.error || "Failed to fetch prompt template";
|
|
506
|
-
if (throwOnError) {
|
|
507
|
-
throw new Error(errorMessage);
|
|
508
|
-
} else {
|
|
509
|
-
console.warn(
|
|
510
|
-
`WARNING: While fetching a prompt template PromptLayer had the following error: ${errorMessage}`
|
|
511
|
-
);
|
|
512
|
-
return null;
|
|
513
|
-
}
|
|
514
|
-
}
|
|
515
|
-
if (data.warning) {
|
|
516
|
-
console.warn(
|
|
517
|
-
`WARNING: While fetching your prompt PromptLayer had the following error: ${data.warning}`
|
|
518
|
-
);
|
|
519
|
-
}
|
|
520
|
-
return data as GetPromptTemplateResponse;
|
|
521
|
-
} catch (e) {
|
|
522
|
-
if (throwOnError) {
|
|
523
|
-
throw e;
|
|
524
|
-
}
|
|
525
|
-
console.warn(
|
|
526
|
-
`WARNING: While fetching a prompt template PromptLayer had the following error: ${e}`
|
|
527
|
-
);
|
|
528
|
-
return null;
|
|
529
|
-
}
|
|
530
|
-
};
|
|
531
|
-
|
|
532
|
-
const publishPromptTemplate = async (
|
|
533
|
-
apiKey: string,
|
|
534
|
-
baseURL: string,
|
|
535
|
-
body: PublishPromptTemplate,
|
|
536
|
-
throwOnError: boolean = true
|
|
537
|
-
): Promise<PublishPromptTemplateResponse> => {
|
|
538
|
-
const response = await fetchWithRetry(`${baseURL}/rest/prompt-templates`, {
|
|
539
|
-
method: "POST",
|
|
540
|
-
headers: { "Content-Type": "application/json", "X-API-KEY": apiKey, ...getCommonHeaders() },
|
|
541
|
-
body: JSON.stringify({
|
|
542
|
-
prompt_template: { ...body },
|
|
543
|
-
prompt_version: { ...body },
|
|
544
|
-
release_labels: body.release_labels ? body.release_labels : undefined,
|
|
545
|
-
}),
|
|
546
|
-
});
|
|
547
|
-
const data = await response.json();
|
|
548
|
-
if (response.status !== 200 && response.status !== 201) {
|
|
549
|
-
const errorMessage =
|
|
550
|
-
data.message || data.error || "Failed to publish prompt template";
|
|
551
|
-
if (throwOnError) {
|
|
552
|
-
throw new Error(errorMessage);
|
|
553
|
-
} else {
|
|
554
|
-
warnOnBadResponse(
|
|
555
|
-
data,
|
|
556
|
-
"WARNING: While publishing a prompt template PromptLayer had the following error"
|
|
557
|
-
);
|
|
558
|
-
}
|
|
559
|
-
}
|
|
560
|
-
return data as PublishPromptTemplateResponse;
|
|
561
|
-
};
|
|
562
|
-
|
|
563
|
-
const getAllPromptTemplates = async (
|
|
564
|
-
apiKey: string,
|
|
565
|
-
baseURL: string,
|
|
566
|
-
params?: Partial<Pagination>,
|
|
567
|
-
throwOnError: boolean = true
|
|
568
|
-
): Promise<Array<ListPromptTemplatesResponse>> => {
|
|
569
|
-
const url = new URL(`${baseURL}/prompt-templates`);
|
|
570
|
-
Object.entries(params || {}).forEach(([key, value]) =>
|
|
571
|
-
url.searchParams.append(key, value.toString())
|
|
572
|
-
);
|
|
573
|
-
const response = await fetchWithRetry(url, {
|
|
574
|
-
headers: { "Content-Type": "application/json", "X-API-KEY": apiKey, ...getCommonHeaders() },
|
|
575
|
-
});
|
|
576
|
-
const data = await response.json();
|
|
577
|
-
if (response.status !== 200) {
|
|
578
|
-
const errorMessage =
|
|
579
|
-
data.message || data.error || "Failed to fetch prompt templates";
|
|
580
|
-
if (throwOnError) {
|
|
581
|
-
throw new Error(errorMessage);
|
|
582
|
-
} else {
|
|
583
|
-
warnOnBadResponse(
|
|
584
|
-
data,
|
|
585
|
-
"WARNING: While fetching all prompt templates PromptLayer had the following error"
|
|
586
|
-
);
|
|
587
|
-
return [];
|
|
588
|
-
}
|
|
589
|
-
}
|
|
590
|
-
return (data.items ?? []) as Array<ListPromptTemplatesResponse>;
|
|
591
|
-
};
|
|
592
|
-
|
|
593
|
-
const waitForWorkflowCompletionCentrifugo = async (
|
|
594
|
-
params: WaitForWorkflowCompletionParams
|
|
595
|
-
): Promise<any> => {
|
|
596
|
-
const url = new URL(`${params.baseURL}/connection/websocket`);
|
|
597
|
-
url.protocol = url.protocol === "https:" ? "wss:" : "ws:";
|
|
598
|
-
|
|
599
|
-
const client = new Centrifuge(url.toString(), { token: params.token });
|
|
600
|
-
const sub = client.newSubscription(params.channelName);
|
|
601
|
-
|
|
602
|
-
return new Promise((resolve, reject) => {
|
|
603
|
-
const cleanupWithResolve = (data: any) => {
|
|
604
|
-
cleanup();
|
|
605
|
-
resolve(data);
|
|
606
|
-
};
|
|
607
|
-
|
|
608
|
-
const listener = makeMessageListener(
|
|
609
|
-
params.baseURL,
|
|
610
|
-
{ resolve: cleanupWithResolve, reject },
|
|
611
|
-
params.executionId,
|
|
612
|
-
params.returnAllOutputs,
|
|
613
|
-
params.headers
|
|
614
|
-
);
|
|
615
|
-
|
|
616
|
-
sub.on("publication", (message) => {
|
|
617
|
-
listener({
|
|
618
|
-
name: message.data.message_name,
|
|
619
|
-
data: message.data.data,
|
|
620
|
-
});
|
|
621
|
-
});
|
|
622
|
-
|
|
623
|
-
const timeout = setTimeout(() => {
|
|
624
|
-
reject(
|
|
625
|
-
new Error("Workflow execution did not complete properly (timeout)")
|
|
626
|
-
);
|
|
627
|
-
}, params.timeout);
|
|
628
|
-
|
|
629
|
-
const cleanup = () => {
|
|
630
|
-
clearTimeout(timeout);
|
|
631
|
-
sub.unsubscribe();
|
|
632
|
-
client.disconnect();
|
|
633
|
-
};
|
|
634
|
-
|
|
635
|
-
sub.on("error", (err) => {
|
|
636
|
-
cleanup();
|
|
637
|
-
reject(`Centrifugo subscription error: ${err}`);
|
|
638
|
-
});
|
|
639
|
-
|
|
640
|
-
client.on("error", (err) => {
|
|
641
|
-
cleanup();
|
|
642
|
-
reject(`Centrifugo client error: ${err}`);
|
|
643
|
-
});
|
|
644
|
-
|
|
645
|
-
sub.subscribe();
|
|
646
|
-
client.connect();
|
|
647
|
-
});
|
|
648
|
-
};
|
|
649
|
-
|
|
650
|
-
export const runWorkflowRequest = async ({
|
|
651
|
-
workflow_name,
|
|
652
|
-
input_variables,
|
|
653
|
-
metadata = {},
|
|
654
|
-
workflow_label_name = null,
|
|
655
|
-
workflow_version_number = null,
|
|
656
|
-
return_all_outputs = false,
|
|
657
|
-
api_key,
|
|
658
|
-
timeout = 3600000, // Default timeout is 1 hour in milliseconds
|
|
659
|
-
baseURL,
|
|
660
|
-
}: RunWorkflowRequestParams): Promise<WorkflowResponse> => {
|
|
661
|
-
const payload = {
|
|
662
|
-
input_variables,
|
|
663
|
-
metadata,
|
|
664
|
-
workflow_label_name,
|
|
665
|
-
workflow_version_number,
|
|
666
|
-
return_all_outputs,
|
|
667
|
-
};
|
|
668
|
-
|
|
669
|
-
const headers = { "Content-Type": "application/json", "X-API-KEY": api_key, ...getCommonHeaders() };
|
|
670
|
-
|
|
671
|
-
try {
|
|
672
|
-
const response = await fetchWithRetry(
|
|
673
|
-
`${baseURL}/workflows/${encodeURIComponent(workflow_name)}/run`,
|
|
674
|
-
{
|
|
675
|
-
method: "POST",
|
|
676
|
-
headers: headers,
|
|
677
|
-
body: JSON.stringify(payload),
|
|
678
|
-
}
|
|
679
|
-
);
|
|
680
|
-
|
|
681
|
-
if (response.status !== 201) {
|
|
682
|
-
const errorData = await response.json().catch(() => ({}));
|
|
683
|
-
return {
|
|
684
|
-
success: false,
|
|
685
|
-
message: `Failed to run workflow: ${
|
|
686
|
-
errorData.error || response.statusText
|
|
687
|
-
}`,
|
|
688
|
-
};
|
|
689
|
-
}
|
|
690
|
-
|
|
691
|
-
const result = await response.json();
|
|
692
|
-
if (result.warning) {
|
|
693
|
-
console.warn(`WARNING: ${result.warning}`);
|
|
694
|
-
}
|
|
695
|
-
const execution_id = result.workflow_version_execution_id;
|
|
696
|
-
if (!execution_id) {
|
|
697
|
-
console.log("No execution ID returned from workflow run");
|
|
698
|
-
return { success: false, message: "Failed to run workflow" };
|
|
699
|
-
}
|
|
700
|
-
|
|
701
|
-
const channel_name = `workflow_updates:${execution_id}`;
|
|
702
|
-
const ws_response = await fetchWithRetry(
|
|
703
|
-
`${baseURL}/ws-token-request-library?capability=${channel_name}`,
|
|
704
|
-
{
|
|
705
|
-
method: "POST",
|
|
706
|
-
headers: headers,
|
|
707
|
-
}
|
|
708
|
-
);
|
|
709
|
-
|
|
710
|
-
const ws_token_response = await ws_response.json();
|
|
711
|
-
const token = ws_token_response.token_details.token;
|
|
712
|
-
|
|
713
|
-
const params: WaitForWorkflowCompletionParams = {
|
|
714
|
-
token,
|
|
715
|
-
channelName: channel_name,
|
|
716
|
-
executionId: execution_id,
|
|
717
|
-
returnAllOutputs: return_all_outputs,
|
|
718
|
-
headers: headers,
|
|
719
|
-
timeout: timeout,
|
|
720
|
-
baseURL: baseURL,
|
|
721
|
-
};
|
|
722
|
-
if (ws_token_response.messaging_backend === "centrifugo")
|
|
723
|
-
return waitForWorkflowCompletionCentrifugo(params);
|
|
724
|
-
return await waitForWorkflowCompletion(params);
|
|
725
|
-
} catch (error) {
|
|
726
|
-
console.error(
|
|
727
|
-
`Failed to run workflow: ${
|
|
728
|
-
error instanceof Error ? error.message : error
|
|
729
|
-
}`
|
|
730
|
-
);
|
|
731
|
-
throw error;
|
|
732
|
-
}
|
|
733
|
-
};
|
|
734
|
-
|
|
735
|
-
async function* proxyGenerator<Item>(
|
|
736
|
-
apiKey: string,
|
|
737
|
-
baseURL: string,
|
|
738
|
-
generator: AsyncIterable<Item>,
|
|
739
|
-
body: TrackRequest,
|
|
740
|
-
throwOnError: boolean = true
|
|
741
|
-
) {
|
|
742
|
-
const results = [];
|
|
743
|
-
for await (const value of generator) {
|
|
744
|
-
yield body.return_pl_id ? [value, null] : value;
|
|
745
|
-
results.push(value);
|
|
746
|
-
}
|
|
747
|
-
const request_response = cleaned_result(results, body.function_name);
|
|
748
|
-
const response = await promptLayerApiRequest(
|
|
749
|
-
apiKey,
|
|
750
|
-
baseURL,
|
|
751
|
-
{
|
|
752
|
-
...body,
|
|
753
|
-
request_response,
|
|
754
|
-
request_end_time: new Date().toISOString(),
|
|
755
|
-
},
|
|
756
|
-
throwOnError
|
|
757
|
-
);
|
|
758
|
-
if (response) {
|
|
759
|
-
if (body.return_pl_id) {
|
|
760
|
-
const request_id = (response as any)[1];
|
|
761
|
-
const lastResult = results.at(-1);
|
|
762
|
-
yield [lastResult, request_id];
|
|
763
|
-
}
|
|
764
|
-
}
|
|
765
|
-
}
|
|
766
|
-
|
|
767
|
-
const warnOnBadResponse = (request_response: any, main_message: string) => {
|
|
768
|
-
try {
|
|
769
|
-
console.warn(`${main_message}: ${request_response.message}`);
|
|
770
|
-
} catch (e) {
|
|
771
|
-
console.warn(`${main_message}: ${request_response}`);
|
|
772
|
-
}
|
|
773
|
-
};
|
|
774
|
-
|
|
775
|
-
const trackRequest = async (
|
|
776
|
-
baseURL: string,
|
|
777
|
-
body: TrackRequest,
|
|
778
|
-
throwOnError: boolean = true
|
|
779
|
-
) => {
|
|
780
|
-
try {
|
|
781
|
-
const response = await fetchWithRetry(`${baseURL}/track-request`, {
|
|
782
|
-
method: "POST",
|
|
783
|
-
headers: { "Content-Type": "application/json", ...getCommonHeaders() },
|
|
784
|
-
body: JSON.stringify(body),
|
|
785
|
-
});
|
|
786
|
-
const data = await response.json();
|
|
787
|
-
if (response.status !== 200) {
|
|
788
|
-
const errorMessage =
|
|
789
|
-
data.message || data.error || "Failed to track request";
|
|
790
|
-
if (throwOnError) {
|
|
791
|
-
throw new Error(errorMessage);
|
|
792
|
-
} else {
|
|
793
|
-
warnOnBadResponse(
|
|
794
|
-
data,
|
|
795
|
-
"WARNING: While logging your request, PromptLayer experienced the following error:"
|
|
796
|
-
);
|
|
797
|
-
}
|
|
798
|
-
}
|
|
799
|
-
return data;
|
|
800
|
-
} catch (e) {
|
|
801
|
-
if (throwOnError) {
|
|
802
|
-
throw e;
|
|
803
|
-
}
|
|
804
|
-
console.warn(
|
|
805
|
-
`WARNING: While logging your request PromptLayer had the following error: ${e}`
|
|
806
|
-
);
|
|
807
|
-
}
|
|
808
|
-
return {};
|
|
809
|
-
};
|
|
810
|
-
|
|
811
|
-
const openaiChatRequest = async (client: TypeOpenAI, kwargs: any) => {
|
|
812
|
-
return await client.chat.completions.create(kwargs);
|
|
813
|
-
};
|
|
814
|
-
|
|
815
|
-
const openaiCompletionsRequest = async (client: TypeOpenAI, kwargs: any) => {
|
|
816
|
-
return await client.completions.create(kwargs);
|
|
817
|
-
};
|
|
818
|
-
|
|
819
|
-
const MAP_TYPE_TO_OPENAI_FUNCTION = {
|
|
820
|
-
chat: openaiChatRequest,
|
|
821
|
-
completion: openaiCompletionsRequest,
|
|
822
|
-
};
|
|
823
|
-
|
|
824
|
-
const openaiRequest = async (
|
|
825
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
826
|
-
kwargs: any
|
|
827
|
-
) => {
|
|
828
|
-
const OpenAI = require("openai").default;
|
|
829
|
-
const client = new OpenAI({
|
|
830
|
-
baseURL: kwargs.baseURL,
|
|
831
|
-
apiKey: kwargs.apiKey,
|
|
832
|
-
});
|
|
833
|
-
|
|
834
|
-
delete kwargs?.apiKey;
|
|
835
|
-
delete kwargs?.baseURL;
|
|
836
|
-
|
|
837
|
-
const api_type = promptBlueprint.metadata?.model?.api_type;
|
|
838
|
-
if (api_type === "chat-completions") {
|
|
839
|
-
const requestToMake =
|
|
840
|
-
MAP_TYPE_TO_OPENAI_FUNCTION[promptBlueprint.prompt_template.type];
|
|
841
|
-
return await requestToMake(client, kwargs);
|
|
842
|
-
} else if (api_type === "images") {
|
|
843
|
-
return await client.images.generate(kwargs);
|
|
844
|
-
} else {
|
|
845
|
-
return await client.responses.create(kwargs);
|
|
846
|
-
}
|
|
847
|
-
};
|
|
848
|
-
|
|
849
|
-
const azureOpenAIRequest = async (
|
|
850
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
851
|
-
kwargs: any
|
|
852
|
-
) => {
|
|
853
|
-
const { AzureOpenAI } = require("openai");
|
|
854
|
-
const client = new AzureOpenAI({
|
|
855
|
-
endpoint: process.env.AZURE_OPENAI_ENDPOINT || kwargs.baseURL,
|
|
856
|
-
apiVersion: process.env.OPENAI_API_VERSION || kwargs.apiVersion,
|
|
857
|
-
apiKey: process.env.AZURE_OPENAI_API_KEY || kwargs.apiKey,
|
|
858
|
-
});
|
|
859
|
-
delete kwargs?.baseURL;
|
|
860
|
-
delete kwargs?.apiVersion;
|
|
861
|
-
delete kwargs?.apiKey;
|
|
862
|
-
|
|
863
|
-
const api_type = promptBlueprint.metadata?.model?.api_type;
|
|
864
|
-
|
|
865
|
-
if (api_type === "chat-completions") {
|
|
866
|
-
const requestToMake = MAP_TYPE_TO_OPENAI_FUNCTION[promptBlueprint.prompt_template.type];
|
|
867
|
-
return await requestToMake(client, kwargs);
|
|
868
|
-
} else if (api_type === "images") {
|
|
869
|
-
return await client.images.generate(kwargs);
|
|
870
|
-
} else {
|
|
871
|
-
return await client.responses.create(kwargs);
|
|
872
|
-
}
|
|
873
|
-
};
|
|
874
|
-
|
|
875
|
-
const anthropicChatRequest = async (
|
|
876
|
-
client: TypeAnthropic | AnthropicVertex | AnthropicBedrock,
|
|
877
|
-
kwargs: any
|
|
878
|
-
) => {
|
|
879
|
-
return client.messages.create(kwargs);
|
|
880
|
-
};
|
|
881
|
-
|
|
882
|
-
const anthropicCompletionsRequest = async (
|
|
883
|
-
client: TypeAnthropic | AnthropicBedrock,
|
|
884
|
-
kwargs: any
|
|
885
|
-
) => {
|
|
886
|
-
return client.completions.create(kwargs);
|
|
887
|
-
};
|
|
888
|
-
|
|
889
|
-
const MAP_TYPE_TO_ANTHROPIC_FUNCTION = {
|
|
890
|
-
chat: anthropicChatRequest,
|
|
891
|
-
completion: anthropicCompletionsRequest,
|
|
892
|
-
};
|
|
893
|
-
|
|
894
|
-
const anthropicRequest = async (
|
|
895
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
896
|
-
kwargs: any
|
|
897
|
-
) => {
|
|
898
|
-
const Anthropic = require("@anthropic-ai/sdk").default;
|
|
899
|
-
const client = new Anthropic({
|
|
900
|
-
baseURL: kwargs.baseURL,
|
|
901
|
-
apiKey: kwargs.apiKey,
|
|
902
|
-
});
|
|
903
|
-
const requestToMake =
|
|
904
|
-
MAP_TYPE_TO_ANTHROPIC_FUNCTION[promptBlueprint.prompt_template.type];
|
|
905
|
-
return requestToMake(client, kwargs);
|
|
906
|
-
};
|
|
907
|
-
|
|
908
|
-
const utilLogRequest = async (
|
|
909
|
-
apiKey: string,
|
|
910
|
-
baseURL: string,
|
|
911
|
-
body: LogRequest,
|
|
912
|
-
throwOnError: boolean = true
|
|
913
|
-
): Promise<RequestLog | null> => {
|
|
914
|
-
try {
|
|
915
|
-
const response = await fetchWithRetry(`${baseURL}/log-request`, {
|
|
916
|
-
method: "POST",
|
|
917
|
-
headers: { "Content-Type": "application/json", "X-API-KEY": apiKey, ...getCommonHeaders() },
|
|
918
|
-
body: JSON.stringify(body),
|
|
919
|
-
});
|
|
920
|
-
const data = await response.json();
|
|
921
|
-
if (response.status !== 201) {
|
|
922
|
-
const errorMessage =
|
|
923
|
-
data.message || data.error || "Failed to log request";
|
|
924
|
-
if (throwOnError) {
|
|
925
|
-
throw new Error(errorMessage);
|
|
926
|
-
} else {
|
|
927
|
-
warnOnBadResponse(
|
|
928
|
-
data,
|
|
929
|
-
"WARNING: While logging your request PromptLayer had the following error"
|
|
930
|
-
);
|
|
931
|
-
return null;
|
|
932
|
-
}
|
|
933
|
-
}
|
|
934
|
-
return data;
|
|
935
|
-
} catch (e) {
|
|
936
|
-
if (throwOnError) {
|
|
937
|
-
throw e;
|
|
938
|
-
}
|
|
939
|
-
console.warn(
|
|
940
|
-
`WARNING: While tracking your prompt PromptLayer had the following error: ${e}`
|
|
941
|
-
);
|
|
942
|
-
return null;
|
|
943
|
-
}
|
|
944
|
-
};
|
|
945
|
-
|
|
946
|
-
const googleChatRequest = async (model_client: any, kwargs: any) => {
|
|
947
|
-
const history = kwargs?.history;
|
|
948
|
-
const generationConfig = kwargs?.generationConfig;
|
|
949
|
-
const lastMessage =
|
|
950
|
-
history.length > 0 ? history[history.length - 1]?.parts : "";
|
|
951
|
-
const chat = model_client.chats.create({
|
|
952
|
-
model: kwargs?.model,
|
|
953
|
-
history: history.slice(0, -1) ?? [],
|
|
954
|
-
config: generationConfig,
|
|
955
|
-
});
|
|
956
|
-
|
|
957
|
-
if (kwargs?.stream)
|
|
958
|
-
return await chat.sendMessageStream({ message: lastMessage });
|
|
959
|
-
return await chat.sendMessage({ message: lastMessage });
|
|
960
|
-
};
|
|
961
|
-
|
|
962
|
-
const googleCompletionsRequest = async (
|
|
963
|
-
model_client: any,
|
|
964
|
-
{ stream, ...kwargs }: any
|
|
965
|
-
) => {
|
|
966
|
-
if (stream) return await model_client.generateContentStream({ ...kwargs });
|
|
967
|
-
return await model_client.generateContent({ ...kwargs });
|
|
968
|
-
};
|
|
969
|
-
|
|
970
|
-
const MAP_TYPE_TO_GOOGLE_FUNCTION = {
|
|
971
|
-
chat: googleChatRequest,
|
|
972
|
-
completion: googleCompletionsRequest,
|
|
973
|
-
};
|
|
974
|
-
|
|
975
|
-
const googleRequest = async (
|
|
976
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
977
|
-
kwargs: any
|
|
978
|
-
) => {
|
|
979
|
-
const { GoogleGenAI } = await import("@google/genai");
|
|
980
|
-
|
|
981
|
-
const geminiAPI = process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY;
|
|
982
|
-
const project =
|
|
983
|
-
process.env.VERTEX_AI_PROJECT_ID ||
|
|
984
|
-
process.env.GOOGLE_PROJECT_ID ||
|
|
985
|
-
process.env.GOOGLE_CLOUD_PROJECT;
|
|
986
|
-
const location =
|
|
987
|
-
process.env.VERTEX_AI_PROJECT_LOCATION ||
|
|
988
|
-
process.env.GOOGLE_PROJECT_LOCATION ||
|
|
989
|
-
process.env.GOOGLE_CLOUD_PROJECT_LOCATION;
|
|
990
|
-
const googleAuthOptions = {
|
|
991
|
-
keyFilename: process.env.GOOGLE_APPLICATION_CREDENTIALS,
|
|
992
|
-
projectId: project,
|
|
993
|
-
scopes: ["https://www.googleapis.com/auth/cloud-platform"],
|
|
994
|
-
};
|
|
995
|
-
|
|
996
|
-
const genAI = geminiAPI
|
|
997
|
-
? new GoogleGenAI({ apiKey: geminiAPI })
|
|
998
|
-
: new GoogleGenAI({
|
|
999
|
-
vertexai: true,
|
|
1000
|
-
project: project,
|
|
1001
|
-
location: location,
|
|
1002
|
-
googleAuthOptions,
|
|
1003
|
-
});
|
|
1004
|
-
const requestToMake =
|
|
1005
|
-
MAP_TYPE_TO_GOOGLE_FUNCTION[promptBlueprint.prompt_template.type];
|
|
1006
|
-
|
|
1007
|
-
return await requestToMake(genAI, kwargs);
|
|
1008
|
-
};
|
|
1009
|
-
|
|
1010
|
-
const snakeToCamel = (str: string): string =>
|
|
1011
|
-
str.replace(/_([a-z])/g, (_, letter) => letter.toUpperCase());
|
|
1012
|
-
|
|
1013
|
-
const convertKeysToCamelCase = <T>(
|
|
1014
|
-
obj: T,
|
|
1015
|
-
ignoreValuesWithKeys: Set<string> = new Set()
|
|
1016
|
-
): T => {
|
|
1017
|
-
if (!obj || typeof obj !== "object") return obj;
|
|
1018
|
-
if (Array.isArray(obj))
|
|
1019
|
-
return obj.map((item) =>
|
|
1020
|
-
convertKeysToCamelCase(item, ignoreValuesWithKeys)
|
|
1021
|
-
) as T;
|
|
1022
|
-
|
|
1023
|
-
return Object.fromEntries(
|
|
1024
|
-
Object.entries(obj).map(([key, value]) => {
|
|
1025
|
-
if (ignoreValuesWithKeys.has(key)) {
|
|
1026
|
-
return [snakeToCamel(key), value];
|
|
1027
|
-
}
|
|
1028
|
-
return [
|
|
1029
|
-
snakeToCamel(key),
|
|
1030
|
-
convertKeysToCamelCase(value, ignoreValuesWithKeys),
|
|
1031
|
-
];
|
|
1032
|
-
})
|
|
1033
|
-
) as T;
|
|
1034
|
-
};
|
|
1035
|
-
|
|
1036
|
-
const configureProviderSettings = (
|
|
1037
|
-
promptBlueprint: any,
|
|
1038
|
-
customProvider: any,
|
|
1039
|
-
modelParameterOverrides: any = {},
|
|
1040
|
-
stream: boolean = false
|
|
1041
|
-
) => {
|
|
1042
|
-
const provider_type =
|
|
1043
|
-
customProvider?.client ?? promptBlueprint.metadata?.model?.provider;
|
|
1044
|
-
const api_type = promptBlueprint.metadata?.model?.api_type;
|
|
1045
|
-
|
|
1046
|
-
if (!provider_type) {
|
|
1047
|
-
throw new Error(
|
|
1048
|
-
"Provider type not found in prompt blueprint or custom provider"
|
|
1049
|
-
);
|
|
1050
|
-
}
|
|
1051
|
-
|
|
1052
|
-
let kwargs = {
|
|
1053
|
-
...(promptBlueprint.llm_kwargs || {}),
|
|
1054
|
-
stream,
|
|
1055
|
-
};
|
|
1056
|
-
|
|
1057
|
-
if (
|
|
1058
|
-
["google", "vertexai"].includes(provider_type) &&
|
|
1059
|
-
promptBlueprint.metadata?.model?.name.startsWith("gemini")
|
|
1060
|
-
)
|
|
1061
|
-
kwargs = convertKeysToCamelCase(
|
|
1062
|
-
kwargs,
|
|
1063
|
-
new Set(["function_declarations", "properties"])
|
|
1064
|
-
);
|
|
1065
|
-
|
|
1066
|
-
const providerConfig = {
|
|
1067
|
-
baseURL: customProvider?.base_url ?? promptBlueprint.provider_base_url?.url,
|
|
1068
|
-
apiKey: customProvider?.api_key,
|
|
1069
|
-
};
|
|
1070
|
-
|
|
1071
|
-
Object.entries(providerConfig).forEach(([key, value]) => {
|
|
1072
|
-
if (value !== undefined) {
|
|
1073
|
-
kwargs[key] = value;
|
|
1074
|
-
}
|
|
1075
|
-
});
|
|
1076
|
-
|
|
1077
|
-
if (stream && STREAMING_PROVIDERS_WITH_USAGE.includes(provider_type as any) && api_type === "chat-completions") {
|
|
1078
|
-
kwargs.stream_options = { include_usage: true };
|
|
1079
|
-
}
|
|
1080
|
-
|
|
1081
|
-
return { provider_type, kwargs };
|
|
1082
|
-
};
|
|
1083
|
-
|
|
1084
|
-
const getProviderConfig = (provider_type: string, promptTemplate: any) => {
|
|
1085
|
-
const providerMap =
|
|
1086
|
-
MAP_PROVIDER_TO_FUNCTION_NAME[
|
|
1087
|
-
provider_type as keyof typeof MAP_PROVIDER_TO_FUNCTION_NAME
|
|
1088
|
-
];
|
|
1089
|
-
|
|
1090
|
-
if (!providerMap) {
|
|
1091
|
-
throw new Error(`Unsupported provider type: ${provider_type}`);
|
|
1092
|
-
}
|
|
1093
|
-
|
|
1094
|
-
const templateType = promptTemplate.type as keyof typeof providerMap;
|
|
1095
|
-
const config = providerMap[templateType];
|
|
1096
|
-
|
|
1097
|
-
if (!config) {
|
|
1098
|
-
throw new Error(
|
|
1099
|
-
`Unsupported template type '${promptTemplate.type}' for provider '${provider_type}'`
|
|
1100
|
-
);
|
|
1101
|
-
}
|
|
1102
|
-
|
|
1103
|
-
return config;
|
|
1104
|
-
};
|
|
1105
|
-
|
|
1106
|
-
const vertexaiRequest = async (
|
|
1107
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
1108
|
-
kwargs: any
|
|
1109
|
-
) => {
|
|
1110
|
-
const model = promptBlueprint.metadata?.model;
|
|
1111
|
-
if (!model) throw new Error("Model metadata not found in prompt blueprint");
|
|
1112
|
-
if (model.name.startsWith("gemini"))
|
|
1113
|
-
return googleRequest(promptBlueprint, kwargs);
|
|
1114
|
-
if (model.name.startsWith("claude")) {
|
|
1115
|
-
const { AnthropicVertex } = await import("@anthropic-ai/vertex-sdk");
|
|
1116
|
-
const client = new AnthropicVertex({ baseURL: kwargs.baseURL });
|
|
1117
|
-
if (promptBlueprint.prompt_template.type === "chat")
|
|
1118
|
-
return anthropicChatRequest(client, kwargs);
|
|
1119
|
-
throw new Error(
|
|
1120
|
-
`Unsupported prompt template type '${promptBlueprint.prompt_template.type}' for Anthropic Vertex AI`
|
|
1121
|
-
);
|
|
1122
|
-
}
|
|
1123
|
-
throw new Error(
|
|
1124
|
-
`Unsupported model name '${model.name}' for Vertex AI request`
|
|
1125
|
-
);
|
|
1126
|
-
};
|
|
1127
|
-
|
|
1128
|
-
const amazonBedrockRequest = async (
|
|
1129
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
1130
|
-
kwargs: any
|
|
1131
|
-
) => {
|
|
1132
|
-
const { BedrockRuntimeClient, ConverseCommand, ConverseStreamCommand } =
|
|
1133
|
-
await import("@aws-sdk/client-bedrock-runtime");
|
|
1134
|
-
const client = new BedrockRuntimeClient({
|
|
1135
|
-
credentials: {
|
|
1136
|
-
accessKeyId: kwargs?.aws_access_key || process.env.AWS_ACCESS_KEY_ID,
|
|
1137
|
-
secretAccessKey:
|
|
1138
|
-
kwargs?.aws_secret_key || process.env.AWS_SECRET_ACCESS_KEY,
|
|
1139
|
-
sessionToken: kwargs?.aws_session_token || process.env.AWS_SESSION_TOKEN,
|
|
1140
|
-
},
|
|
1141
|
-
region:
|
|
1142
|
-
kwargs?.aws_region ||
|
|
1143
|
-
process.env.AWS_REGION ||
|
|
1144
|
-
process.env.AWS_DEFAULT_REGION ||
|
|
1145
|
-
"us-east-1",
|
|
1146
|
-
});
|
|
1147
|
-
|
|
1148
|
-
if (kwargs?.stream) {
|
|
1149
|
-
delete kwargs.stream;
|
|
1150
|
-
const command = new ConverseStreamCommand({
|
|
1151
|
-
...kwargs,
|
|
1152
|
-
});
|
|
1153
|
-
return await client.send(command);
|
|
1154
|
-
} else {
|
|
1155
|
-
delete kwargs?.stream;
|
|
1156
|
-
const command = new ConverseCommand({
|
|
1157
|
-
...kwargs,
|
|
1158
|
-
});
|
|
1159
|
-
return await client.send(command);
|
|
1160
|
-
}
|
|
1161
|
-
};
|
|
1162
|
-
|
|
1163
|
-
const anthropicBedrockRequest = async (
|
|
1164
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
1165
|
-
kwargs: any
|
|
1166
|
-
) => {
|
|
1167
|
-
const { AnthropicBedrock } = await import("@anthropic-ai/bedrock-sdk");
|
|
1168
|
-
const client = new AnthropicBedrock({
|
|
1169
|
-
awsAccessKey: kwargs.aws_access_key,
|
|
1170
|
-
awsSecretKey: kwargs.aws_secret_key,
|
|
1171
|
-
awsRegion: kwargs.aws_region,
|
|
1172
|
-
awsSessionToken: kwargs.aws_session_token,
|
|
1173
|
-
baseURL: kwargs.base_url,
|
|
1174
|
-
});
|
|
1175
|
-
|
|
1176
|
-
const requestToMake =
|
|
1177
|
-
MAP_TYPE_TO_ANTHROPIC_FUNCTION[promptBlueprint.prompt_template.type];
|
|
1178
|
-
return requestToMake(client, kwargs);
|
|
1179
|
-
};
|
|
1180
|
-
|
|
1181
|
-
const mistralRequest = async (
|
|
1182
|
-
promptBlueprint: GetPromptTemplateResponse,
|
|
1183
|
-
kwargs: any
|
|
1184
|
-
) => {
|
|
1185
|
-
const { Mistral } = await import("@mistralai/mistralai");
|
|
1186
|
-
const client = new Mistral({ apiKey: process.env.MISTRAL_API_KEY });
|
|
1187
|
-
kwargs = convertKeysToCamelCase(kwargs, new Set());
|
|
1188
|
-
if (kwargs?.stream) {
|
|
1189
|
-
delete kwargs.stream;
|
|
1190
|
-
return await client.chat.stream(kwargs);
|
|
1191
|
-
}
|
|
1192
|
-
delete kwargs.stream;
|
|
1193
|
-
return await client.chat.complete(kwargs);
|
|
1194
|
-
};
|
|
1195
|
-
|
|
1196
|
-
export const readEnv = (env: string): string | undefined => {
|
|
1197
|
-
if (typeof (globalThis as any).process !== "undefined")
|
|
1198
|
-
return (globalThis as any).process.env?.[env]?.trim() ?? undefined;
|
|
1199
|
-
|
|
1200
|
-
if (typeof (globalThis as any).Deno !== "undefined")
|
|
1201
|
-
return (globalThis as any).Deno.env?.get?.(env)?.trim();
|
|
1202
|
-
return undefined;
|
|
1203
|
-
};
|
|
1204
|
-
|
|
1205
|
-
export {
|
|
1206
|
-
amazonBedrockRequest,
|
|
1207
|
-
anthropicBedrockRequest,
|
|
1208
|
-
anthropicRequest,
|
|
1209
|
-
azureOpenAIRequest,
|
|
1210
|
-
configureProviderSettings,
|
|
1211
|
-
getAllPromptTemplates,
|
|
1212
|
-
getPromptTemplate,
|
|
1213
|
-
getProviderConfig,
|
|
1214
|
-
googleRequest,
|
|
1215
|
-
mistralRequest,
|
|
1216
|
-
openaiRequest,
|
|
1217
|
-
promptlayerApiHandler,
|
|
1218
|
-
promptLayerApiRequest,
|
|
1219
|
-
promptLayerCreateGroup,
|
|
1220
|
-
promptLayerTrackGroup,
|
|
1221
|
-
promptLayerTrackMetadata,
|
|
1222
|
-
promptLayerTrackPrompt,
|
|
1223
|
-
promptLayerTrackScore,
|
|
1224
|
-
publishPromptTemplate,
|
|
1225
|
-
trackRequest,
|
|
1226
|
-
utilLogRequest,
|
|
1227
|
-
vertexaiRequest,
|
|
1228
|
-
};
|