@promptev/client 0.0.3 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/index.cjs +277 -42
- package/dist/esm/index.d.ts +72 -15
- package/dist/esm/index.js +269 -38
- package/package.json +6 -9
- package/readme.md +389 -55
- package/dist/cjs/index.d.ts +0 -30
package/dist/cjs/index.cjs
CHANGED
|
@@ -1,58 +1,293 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
2
|
+
// ── Types ───────────────────────────────────────────────
|
|
5
3
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
-
exports.PromptevClient = void 0;
|
|
7
|
-
|
|
4
|
+
exports.PromptevClient = exports.NetworkError = exports.ServerError = exports.RateLimitError = exports.NotFoundError = exports.AuthenticationError = exports.ValidationError = exports.PromptevError = void 0;
|
|
5
|
+
// ── Errors ──────────────────────────────────────────────
|
|
6
|
+
class PromptevError extends Error {
|
|
7
|
+
constructor(message, statusCode, responseText) {
|
|
8
|
+
super(message);
|
|
9
|
+
this.name = "PromptevError";
|
|
10
|
+
this.statusCode = statusCode;
|
|
11
|
+
this.responseText = responseText;
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
exports.PromptevError = PromptevError;
|
|
15
|
+
class ValidationError extends PromptevError {
|
|
16
|
+
constructor(message, statusCode, responseText) {
|
|
17
|
+
super(message, statusCode, responseText);
|
|
18
|
+
this.name = "ValidationError";
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
exports.ValidationError = ValidationError;
|
|
22
|
+
class AuthenticationError extends PromptevError {
|
|
23
|
+
constructor(message, statusCode, responseText) {
|
|
24
|
+
super(message, statusCode, responseText);
|
|
25
|
+
this.name = "AuthenticationError";
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
exports.AuthenticationError = AuthenticationError;
|
|
29
|
+
class NotFoundError extends PromptevError {
|
|
30
|
+
constructor(message, statusCode, responseText) {
|
|
31
|
+
super(message, statusCode, responseText);
|
|
32
|
+
this.name = "NotFoundError";
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
exports.NotFoundError = NotFoundError;
|
|
36
|
+
class RateLimitError extends PromptevError {
|
|
37
|
+
constructor(message, statusCode, responseText) {
|
|
38
|
+
super(message, statusCode, responseText);
|
|
39
|
+
this.name = "RateLimitError";
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
exports.RateLimitError = RateLimitError;
|
|
43
|
+
class ServerError extends PromptevError {
|
|
44
|
+
constructor(message, statusCode, responseText) {
|
|
45
|
+
super(message, statusCode, responseText);
|
|
46
|
+
this.name = "ServerError";
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
exports.ServerError = ServerError;
|
|
50
|
+
class NetworkError extends PromptevError {
|
|
51
|
+
constructor(message, statusCode, responseText) {
|
|
52
|
+
super(message, statusCode, responseText);
|
|
53
|
+
this.name = "NetworkError";
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
exports.NetworkError = NetworkError;
|
|
57
|
+
// ── Client ──────────────────────────────────────────────
|
|
58
|
+
const RETRYABLE_STATUS = new Set([502, 503, 504]);
|
|
59
|
+
const DEFAULT_MAX_RETRIES = 2;
|
|
60
|
+
const DEFAULT_BACKOFF = 500; // ms
|
|
8
61
|
class PromptevClient {
|
|
9
62
|
constructor(config) {
|
|
10
|
-
this.baseUrl = config.baseUrl ?? "https://api.promptev.ai";
|
|
11
63
|
this.projectKey = config.projectKey;
|
|
12
|
-
this.
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
64
|
+
this.baseUrl = (config.baseUrl ?? "https://api.promptev.ai").replace(/\/+$/, "");
|
|
65
|
+
this.headers = {
|
|
66
|
+
"Content-Type": "application/json",
|
|
67
|
+
...(config.headers ?? {}),
|
|
68
|
+
};
|
|
69
|
+
this.timeout = config.timeout ?? 30000;
|
|
70
|
+
this.maxRetries = config.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
71
|
+
}
|
|
72
|
+
// ── Error handling ──────────────────────────────────
|
|
73
|
+
static raiseForStatus(status, text) {
|
|
74
|
+
if (status < 400)
|
|
75
|
+
return;
|
|
76
|
+
let detail;
|
|
77
|
+
try {
|
|
78
|
+
const body = JSON.parse(text);
|
|
79
|
+
detail =
|
|
80
|
+
typeof body.detail === "string" ? body.detail : text || "Unknown error";
|
|
81
|
+
}
|
|
82
|
+
catch {
|
|
83
|
+
detail = text || "Unknown error";
|
|
84
|
+
}
|
|
85
|
+
if (status === 400)
|
|
86
|
+
throw new ValidationError(detail, status, text);
|
|
87
|
+
if (status === 401)
|
|
88
|
+
throw new AuthenticationError(detail, status, text);
|
|
89
|
+
if (status === 403)
|
|
90
|
+
throw new AuthenticationError(detail, status, text);
|
|
91
|
+
if (status === 404)
|
|
92
|
+
throw new NotFoundError(detail, status, text);
|
|
93
|
+
if (status === 429)
|
|
94
|
+
throw new RateLimitError(detail, status, text);
|
|
95
|
+
if (status >= 500)
|
|
96
|
+
throw new ServerError(detail, status, text);
|
|
97
|
+
throw new PromptevError(detail, status, text);
|
|
98
|
+
}
|
|
99
|
+
// ── Retry helper ────────────────────────────────────
|
|
100
|
+
async fetchWithRetry(url, init) {
|
|
101
|
+
let lastError = null;
|
|
102
|
+
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
|
|
103
|
+
try {
|
|
104
|
+
const controller = new AbortController();
|
|
105
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
106
|
+
const resp = await fetch(url, {
|
|
107
|
+
...init,
|
|
108
|
+
signal: controller.signal,
|
|
109
|
+
});
|
|
110
|
+
clearTimeout(timeoutId);
|
|
111
|
+
if (!RETRYABLE_STATUS.has(resp.status) || attempt === this.maxRetries) {
|
|
112
|
+
return resp;
|
|
113
|
+
}
|
|
114
|
+
lastError = new ServerError(`Server error (${resp.status})`, resp.status);
|
|
115
|
+
}
|
|
116
|
+
catch (err) {
|
|
117
|
+
if (err.name === "AbortError") {
|
|
118
|
+
lastError = new NetworkError("Request timed out");
|
|
119
|
+
}
|
|
120
|
+
else {
|
|
121
|
+
lastError = new NetworkError(`Network error: ${err.message}`);
|
|
122
|
+
}
|
|
123
|
+
if (attempt === this.maxRetries)
|
|
124
|
+
throw lastError;
|
|
125
|
+
}
|
|
126
|
+
const delay = DEFAULT_BACKOFF * Math.pow(2, attempt);
|
|
127
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
128
|
+
}
|
|
129
|
+
throw lastError;
|
|
130
|
+
}
|
|
131
|
+
// ── SSE parser ──────────────────────────────────────
|
|
132
|
+
async *parseSSE(response) {
|
|
133
|
+
const reader = response.body.getReader();
|
|
134
|
+
const decoder = new TextDecoder();
|
|
135
|
+
let buffer = "";
|
|
136
|
+
try {
|
|
137
|
+
while (true) {
|
|
138
|
+
const { done, value } = await reader.read();
|
|
139
|
+
if (done)
|
|
140
|
+
break;
|
|
141
|
+
buffer += decoder.decode(value, { stream: true });
|
|
142
|
+
const lines = buffer.split("\n");
|
|
143
|
+
buffer = lines.pop() || "";
|
|
144
|
+
for (const line of lines) {
|
|
145
|
+
if (!line.startsWith("data: "))
|
|
146
|
+
continue;
|
|
147
|
+
try {
|
|
148
|
+
const data = JSON.parse(line.slice(6));
|
|
149
|
+
const event = {
|
|
150
|
+
type: data.type || "unknown",
|
|
151
|
+
output: data.output || "",
|
|
152
|
+
raw: data,
|
|
153
|
+
};
|
|
154
|
+
yield event;
|
|
155
|
+
if (event.type === "done" || event.type === "error")
|
|
156
|
+
return;
|
|
157
|
+
}
|
|
158
|
+
catch {
|
|
159
|
+
continue;
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
finally {
|
|
165
|
+
reader.releaseLock();
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
// ── Prompt API ──────────────────────────────────────
|
|
169
|
+
/**
|
|
170
|
+
* Compile and execute a prompt.
|
|
171
|
+
*
|
|
172
|
+
* If the prompt has a model configured, it compiles the template,
|
|
173
|
+
* sends it to the LLM, and returns the AI response. If no model
|
|
174
|
+
* is configured, it returns the compiled template string.
|
|
175
|
+
*/
|
|
176
|
+
async runPrompt(promptKey, query, variables) {
|
|
177
|
+
const url = `${this.baseUrl}/api/sdk/v1/prompt/client/${this.projectKey}/${promptKey}`;
|
|
178
|
+
const merged = { query, ...(variables ?? {}) };
|
|
179
|
+
const resp = await this.fetchWithRetry(url, {
|
|
180
|
+
method: "POST",
|
|
181
|
+
headers: this.headers,
|
|
182
|
+
body: JSON.stringify({ variables: merged }),
|
|
18
183
|
});
|
|
184
|
+
const text = await resp.text();
|
|
185
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
186
|
+
const data = JSON.parse(text);
|
|
187
|
+
if (typeof data.prompt !== "string") {
|
|
188
|
+
throw new PromptevError("Unexpected response: missing 'prompt' field");
|
|
189
|
+
}
|
|
190
|
+
return data.prompt;
|
|
19
191
|
}
|
|
20
192
|
/**
|
|
21
|
-
* Compile a prompt
|
|
22
|
-
*
|
|
23
|
-
*
|
|
24
|
-
*
|
|
25
|
-
* "additionalProp1": "string",
|
|
26
|
-
* "additionalProp2": "string",
|
|
27
|
-
* "additionalProp3": "string"
|
|
28
|
-
* }
|
|
29
|
-
* }
|
|
193
|
+
* Compile and execute a prompt with streaming.
|
|
194
|
+
*
|
|
195
|
+
* Use this when the prompt has tools attached or you want real-time
|
|
196
|
+
* output. Returns the same event types as agent streaming.
|
|
30
197
|
*/
|
|
31
|
-
async
|
|
32
|
-
const url =
|
|
198
|
+
async *streamPrompt(promptKey, query, variables) {
|
|
199
|
+
const url = `${this.baseUrl}/api/sdk/v1/prompt/client/${this.projectKey}/${promptKey}?stream=true`;
|
|
200
|
+
const merged = { query, ...(variables ?? {}) };
|
|
201
|
+
const controller = new AbortController();
|
|
202
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
203
|
+
let resp;
|
|
33
204
|
try {
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
}
|
|
41
|
-
return compiled;
|
|
205
|
+
resp = await fetch(url, {
|
|
206
|
+
method: "POST",
|
|
207
|
+
headers: this.headers,
|
|
208
|
+
body: JSON.stringify({ variables: merged }),
|
|
209
|
+
signal: controller.signal,
|
|
210
|
+
});
|
|
42
211
|
}
|
|
43
212
|
catch (err) {
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
213
|
+
clearTimeout(timeoutId);
|
|
214
|
+
if (err.name === "AbortError")
|
|
215
|
+
throw new NetworkError("Request timed out");
|
|
216
|
+
throw new NetworkError(`Network error: ${err.message}`);
|
|
217
|
+
}
|
|
218
|
+
clearTimeout(timeoutId);
|
|
219
|
+
if (resp.status >= 400) {
|
|
220
|
+
const text = await resp.text();
|
|
221
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
222
|
+
}
|
|
223
|
+
const contentType = resp.headers.get("content-type") || "";
|
|
224
|
+
// Non-streaming fallback: prompt has no model, backend returns JSON
|
|
225
|
+
if (!contentType.includes("text/event-stream")) {
|
|
226
|
+
const data = await resp.json();
|
|
227
|
+
yield {
|
|
228
|
+
type: "done",
|
|
229
|
+
output: data.prompt || "",
|
|
230
|
+
raw: data,
|
|
231
|
+
};
|
|
232
|
+
return;
|
|
233
|
+
}
|
|
234
|
+
yield* this.parseSSE(resp);
|
|
235
|
+
}
|
|
236
|
+
// ── Agent API ───────────────────────────────────────
|
|
237
|
+
/**
|
|
238
|
+
* Start a new agent chat session.
|
|
239
|
+
*/
|
|
240
|
+
async startAgent(chatbotId, options) {
|
|
241
|
+
const url = `${this.baseUrl}/api/sdk/v1/agent/${this.projectKey}/${chatbotId}/start`;
|
|
242
|
+
const payload = {};
|
|
243
|
+
if (options?.visitor != null)
|
|
244
|
+
payload.visitor = options.visitor;
|
|
245
|
+
if (options?.platform && options.platform !== "sdk")
|
|
246
|
+
payload.platform = options.platform;
|
|
247
|
+
const resp = await this.fetchWithRetry(url, {
|
|
248
|
+
method: "POST",
|
|
249
|
+
headers: this.headers,
|
|
250
|
+
body: JSON.stringify(payload),
|
|
251
|
+
});
|
|
252
|
+
const text = await resp.text();
|
|
253
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
254
|
+
const data = JSON.parse(text);
|
|
255
|
+
return {
|
|
256
|
+
sessionToken: data.session_token,
|
|
257
|
+
chatbotId: data.chatbot_id,
|
|
258
|
+
name: data.name,
|
|
259
|
+
memoryEnabled: data.memory_enabled ?? false,
|
|
260
|
+
messages: data.messages ?? [],
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
/**
|
|
264
|
+
* Stream an agent response as SSE events.
|
|
265
|
+
*/
|
|
266
|
+
async *streamAgent(chatbotId, options) {
|
|
267
|
+
const url = `${this.baseUrl}/api/sdk/v1/agent/${this.projectKey}/${chatbotId}/stream?session_token=${encodeURIComponent(options.sessionToken)}`;
|
|
268
|
+
const controller = new AbortController();
|
|
269
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
270
|
+
let resp;
|
|
271
|
+
try {
|
|
272
|
+
resp = await fetch(url, {
|
|
273
|
+
method: "POST",
|
|
274
|
+
headers: this.headers,
|
|
275
|
+
body: JSON.stringify({ query: options.query }),
|
|
276
|
+
signal: controller.signal,
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
catch (err) {
|
|
280
|
+
clearTimeout(timeoutId);
|
|
281
|
+
if (err.name === "AbortError")
|
|
282
|
+
throw new NetworkError("Request timed out");
|
|
283
|
+
throw new NetworkError(`Network error: ${err.message}`);
|
|
284
|
+
}
|
|
285
|
+
clearTimeout(timeoutId);
|
|
286
|
+
if (resp.status >= 400) {
|
|
287
|
+
const text = await resp.text();
|
|
288
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
55
289
|
}
|
|
290
|
+
yield* this.parseSSE(resp);
|
|
56
291
|
}
|
|
57
292
|
}
|
|
58
293
|
exports.PromptevClient = PromptevClient;
|
package/dist/esm/index.d.ts
CHANGED
|
@@ -1,26 +1,83 @@
|
|
|
1
1
|
export interface PromptevClientConfig {
|
|
2
|
-
baseUrl?: string;
|
|
3
2
|
projectKey: string;
|
|
3
|
+
baseUrl?: string;
|
|
4
4
|
headers?: Record<string, string>;
|
|
5
|
+
timeout?: number;
|
|
6
|
+
maxRetries?: number;
|
|
7
|
+
}
|
|
8
|
+
export interface AgentSession {
|
|
9
|
+
sessionToken: string;
|
|
10
|
+
chatbotId: string;
|
|
11
|
+
name: string;
|
|
12
|
+
memoryEnabled: boolean;
|
|
13
|
+
messages: Record<string, any>[];
|
|
14
|
+
}
|
|
15
|
+
export interface AgentEvent {
|
|
16
|
+
type: string;
|
|
17
|
+
output: string;
|
|
18
|
+
raw: Record<string, any>;
|
|
19
|
+
}
|
|
20
|
+
export declare class PromptevError extends Error {
|
|
21
|
+
statusCode?: number;
|
|
22
|
+
responseText?: string;
|
|
23
|
+
constructor(message: string, statusCode?: number, responseText?: string);
|
|
24
|
+
}
|
|
25
|
+
export declare class ValidationError extends PromptevError {
|
|
26
|
+
constructor(message: string, statusCode?: number, responseText?: string);
|
|
27
|
+
}
|
|
28
|
+
export declare class AuthenticationError extends PromptevError {
|
|
29
|
+
constructor(message: string, statusCode?: number, responseText?: string);
|
|
30
|
+
}
|
|
31
|
+
export declare class NotFoundError extends PromptevError {
|
|
32
|
+
constructor(message: string, statusCode?: number, responseText?: string);
|
|
33
|
+
}
|
|
34
|
+
export declare class RateLimitError extends PromptevError {
|
|
35
|
+
constructor(message: string, statusCode?: number, responseText?: string);
|
|
36
|
+
}
|
|
37
|
+
export declare class ServerError extends PromptevError {
|
|
38
|
+
constructor(message: string, statusCode?: number, responseText?: string);
|
|
39
|
+
}
|
|
40
|
+
export declare class NetworkError extends PromptevError {
|
|
41
|
+
constructor(message: string, statusCode?: number, responseText?: string);
|
|
5
42
|
}
|
|
6
43
|
export declare class PromptevClient {
|
|
7
|
-
private client;
|
|
8
|
-
private baseUrl;
|
|
9
44
|
private projectKey;
|
|
45
|
+
private baseUrl;
|
|
46
|
+
private headers;
|
|
47
|
+
private timeout;
|
|
48
|
+
private maxRetries;
|
|
10
49
|
constructor(config: PromptevClientConfig);
|
|
50
|
+
private static raiseForStatus;
|
|
51
|
+
private fetchWithRetry;
|
|
52
|
+
private parseSSE;
|
|
53
|
+
/**
|
|
54
|
+
* Compile and execute a prompt.
|
|
55
|
+
*
|
|
56
|
+
* If the prompt has a model configured, it compiles the template,
|
|
57
|
+
* sends it to the LLM, and returns the AI response. If no model
|
|
58
|
+
* is configured, it returns the compiled template string.
|
|
59
|
+
*/
|
|
60
|
+
runPrompt(promptKey: string, query: string, variables?: Record<string, string>): Promise<string>;
|
|
61
|
+
/**
|
|
62
|
+
* Compile and execute a prompt with streaming.
|
|
63
|
+
*
|
|
64
|
+
* Use this when the prompt has tools attached or you want real-time
|
|
65
|
+
* output. Returns the same event types as agent streaming.
|
|
66
|
+
*/
|
|
67
|
+
streamPrompt(promptKey: string, query: string, variables?: Record<string, string>): AsyncGenerator<AgentEvent>;
|
|
68
|
+
/**
|
|
69
|
+
* Start a new agent chat session.
|
|
70
|
+
*/
|
|
71
|
+
startAgent(chatbotId: string, options?: {
|
|
72
|
+
visitor?: string;
|
|
73
|
+
platform?: string;
|
|
74
|
+
}): Promise<AgentSession>;
|
|
11
75
|
/**
|
|
12
|
-
*
|
|
13
|
-
* Body shape (exact):
|
|
14
|
-
* {
|
|
15
|
-
* "variables": {
|
|
16
|
-
* "additionalProp1": "string",
|
|
17
|
-
* "additionalProp2": "string",
|
|
18
|
-
* "additionalProp3": "string"
|
|
19
|
-
* }
|
|
20
|
-
* }
|
|
76
|
+
* Stream an agent response as SSE events.
|
|
21
77
|
*/
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
78
|
+
streamAgent(chatbotId: string, options: {
|
|
79
|
+
sessionToken: string;
|
|
80
|
+
query: string;
|
|
81
|
+
}): AsyncGenerator<AgentEvent>;
|
|
25
82
|
}
|
|
26
83
|
export default PromptevClient;
|
package/dist/esm/index.js
CHANGED
|
@@ -1,52 +1,283 @@
|
|
|
1
|
-
|
|
1
|
+
// ── Types ───────────────────────────────────────────────
|
|
2
|
+
// ── Errors ──────────────────────────────────────────────
|
|
3
|
+
export class PromptevError extends Error {
|
|
4
|
+
constructor(message, statusCode, responseText) {
|
|
5
|
+
super(message);
|
|
6
|
+
this.name = "PromptevError";
|
|
7
|
+
this.statusCode = statusCode;
|
|
8
|
+
this.responseText = responseText;
|
|
9
|
+
}
|
|
10
|
+
}
|
|
11
|
+
export class ValidationError extends PromptevError {
|
|
12
|
+
constructor(message, statusCode, responseText) {
|
|
13
|
+
super(message, statusCode, responseText);
|
|
14
|
+
this.name = "ValidationError";
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
export class AuthenticationError extends PromptevError {
|
|
18
|
+
constructor(message, statusCode, responseText) {
|
|
19
|
+
super(message, statusCode, responseText);
|
|
20
|
+
this.name = "AuthenticationError";
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
export class NotFoundError extends PromptevError {
|
|
24
|
+
constructor(message, statusCode, responseText) {
|
|
25
|
+
super(message, statusCode, responseText);
|
|
26
|
+
this.name = "NotFoundError";
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
export class RateLimitError extends PromptevError {
|
|
30
|
+
constructor(message, statusCode, responseText) {
|
|
31
|
+
super(message, statusCode, responseText);
|
|
32
|
+
this.name = "RateLimitError";
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
export class ServerError extends PromptevError {
|
|
36
|
+
constructor(message, statusCode, responseText) {
|
|
37
|
+
super(message, statusCode, responseText);
|
|
38
|
+
this.name = "ServerError";
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
export class NetworkError extends PromptevError {
|
|
42
|
+
constructor(message, statusCode, responseText) {
|
|
43
|
+
super(message, statusCode, responseText);
|
|
44
|
+
this.name = "NetworkError";
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
// ── Client ──────────────────────────────────────────────
|
|
48
|
+
const RETRYABLE_STATUS = new Set([502, 503, 504]);
|
|
49
|
+
const DEFAULT_MAX_RETRIES = 2;
|
|
50
|
+
const DEFAULT_BACKOFF = 500; // ms
|
|
2
51
|
export class PromptevClient {
|
|
3
52
|
constructor(config) {
|
|
4
|
-
this.baseUrl = config.baseUrl ?? "https://api.promptev.ai";
|
|
5
53
|
this.projectKey = config.projectKey;
|
|
6
|
-
this.
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
54
|
+
this.baseUrl = (config.baseUrl ?? "https://api.promptev.ai").replace(/\/+$/, "");
|
|
55
|
+
this.headers = {
|
|
56
|
+
"Content-Type": "application/json",
|
|
57
|
+
...(config.headers ?? {}),
|
|
58
|
+
};
|
|
59
|
+
this.timeout = config.timeout ?? 30000;
|
|
60
|
+
this.maxRetries = config.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
61
|
+
}
|
|
62
|
+
// ── Error handling ──────────────────────────────────
|
|
63
|
+
static raiseForStatus(status, text) {
|
|
64
|
+
if (status < 400)
|
|
65
|
+
return;
|
|
66
|
+
let detail;
|
|
67
|
+
try {
|
|
68
|
+
const body = JSON.parse(text);
|
|
69
|
+
detail =
|
|
70
|
+
typeof body.detail === "string" ? body.detail : text || "Unknown error";
|
|
71
|
+
}
|
|
72
|
+
catch {
|
|
73
|
+
detail = text || "Unknown error";
|
|
74
|
+
}
|
|
75
|
+
if (status === 400)
|
|
76
|
+
throw new ValidationError(detail, status, text);
|
|
77
|
+
if (status === 401)
|
|
78
|
+
throw new AuthenticationError(detail, status, text);
|
|
79
|
+
if (status === 403)
|
|
80
|
+
throw new AuthenticationError(detail, status, text);
|
|
81
|
+
if (status === 404)
|
|
82
|
+
throw new NotFoundError(detail, status, text);
|
|
83
|
+
if (status === 429)
|
|
84
|
+
throw new RateLimitError(detail, status, text);
|
|
85
|
+
if (status >= 500)
|
|
86
|
+
throw new ServerError(detail, status, text);
|
|
87
|
+
throw new PromptevError(detail, status, text);
|
|
88
|
+
}
|
|
89
|
+
// ── Retry helper ────────────────────────────────────
|
|
90
|
+
async fetchWithRetry(url, init) {
|
|
91
|
+
let lastError = null;
|
|
92
|
+
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
|
|
93
|
+
try {
|
|
94
|
+
const controller = new AbortController();
|
|
95
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
96
|
+
const resp = await fetch(url, {
|
|
97
|
+
...init,
|
|
98
|
+
signal: controller.signal,
|
|
99
|
+
});
|
|
100
|
+
clearTimeout(timeoutId);
|
|
101
|
+
if (!RETRYABLE_STATUS.has(resp.status) || attempt === this.maxRetries) {
|
|
102
|
+
return resp;
|
|
103
|
+
}
|
|
104
|
+
lastError = new ServerError(`Server error (${resp.status})`, resp.status);
|
|
105
|
+
}
|
|
106
|
+
catch (err) {
|
|
107
|
+
if (err.name === "AbortError") {
|
|
108
|
+
lastError = new NetworkError("Request timed out");
|
|
109
|
+
}
|
|
110
|
+
else {
|
|
111
|
+
lastError = new NetworkError(`Network error: ${err.message}`);
|
|
112
|
+
}
|
|
113
|
+
if (attempt === this.maxRetries)
|
|
114
|
+
throw lastError;
|
|
115
|
+
}
|
|
116
|
+
const delay = DEFAULT_BACKOFF * Math.pow(2, attempt);
|
|
117
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
118
|
+
}
|
|
119
|
+
throw lastError;
|
|
120
|
+
}
|
|
121
|
+
// ── SSE parser ──────────────────────────────────────
|
|
122
|
+
async *parseSSE(response) {
|
|
123
|
+
const reader = response.body.getReader();
|
|
124
|
+
const decoder = new TextDecoder();
|
|
125
|
+
let buffer = "";
|
|
126
|
+
try {
|
|
127
|
+
while (true) {
|
|
128
|
+
const { done, value } = await reader.read();
|
|
129
|
+
if (done)
|
|
130
|
+
break;
|
|
131
|
+
buffer += decoder.decode(value, { stream: true });
|
|
132
|
+
const lines = buffer.split("\n");
|
|
133
|
+
buffer = lines.pop() || "";
|
|
134
|
+
for (const line of lines) {
|
|
135
|
+
if (!line.startsWith("data: "))
|
|
136
|
+
continue;
|
|
137
|
+
try {
|
|
138
|
+
const data = JSON.parse(line.slice(6));
|
|
139
|
+
const event = {
|
|
140
|
+
type: data.type || "unknown",
|
|
141
|
+
output: data.output || "",
|
|
142
|
+
raw: data,
|
|
143
|
+
};
|
|
144
|
+
yield event;
|
|
145
|
+
if (event.type === "done" || event.type === "error")
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
catch {
|
|
149
|
+
continue;
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
finally {
|
|
155
|
+
reader.releaseLock();
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
// ── Prompt API ──────────────────────────────────────
|
|
159
|
+
/**
|
|
160
|
+
* Compile and execute a prompt.
|
|
161
|
+
*
|
|
162
|
+
* If the prompt has a model configured, it compiles the template,
|
|
163
|
+
* sends it to the LLM, and returns the AI response. If no model
|
|
164
|
+
* is configured, it returns the compiled template string.
|
|
165
|
+
*/
|
|
166
|
+
async runPrompt(promptKey, query, variables) {
|
|
167
|
+
const url = `${this.baseUrl}/api/sdk/v1/prompt/client/${this.projectKey}/${promptKey}`;
|
|
168
|
+
const merged = { query, ...(variables ?? {}) };
|
|
169
|
+
const resp = await this.fetchWithRetry(url, {
|
|
170
|
+
method: "POST",
|
|
171
|
+
headers: this.headers,
|
|
172
|
+
body: JSON.stringify({ variables: merged }),
|
|
12
173
|
});
|
|
174
|
+
const text = await resp.text();
|
|
175
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
176
|
+
const data = JSON.parse(text);
|
|
177
|
+
if (typeof data.prompt !== "string") {
|
|
178
|
+
throw new PromptevError("Unexpected response: missing 'prompt' field");
|
|
179
|
+
}
|
|
180
|
+
return data.prompt;
|
|
13
181
|
}
|
|
14
182
|
/**
|
|
15
|
-
* Compile a prompt
|
|
16
|
-
*
|
|
17
|
-
*
|
|
18
|
-
*
|
|
19
|
-
* "additionalProp1": "string",
|
|
20
|
-
* "additionalProp2": "string",
|
|
21
|
-
* "additionalProp3": "string"
|
|
22
|
-
* }
|
|
23
|
-
* }
|
|
183
|
+
* Compile and execute a prompt with streaming.
|
|
184
|
+
*
|
|
185
|
+
* Use this when the prompt has tools attached or you want real-time
|
|
186
|
+
* output. Returns the same event types as agent streaming.
|
|
24
187
|
*/
|
|
25
|
-
async
|
|
26
|
-
const url =
|
|
188
|
+
async *streamPrompt(promptKey, query, variables) {
|
|
189
|
+
const url = `${this.baseUrl}/api/sdk/v1/prompt/client/${this.projectKey}/${promptKey}?stream=true`;
|
|
190
|
+
const merged = { query, ...(variables ?? {}) };
|
|
191
|
+
const controller = new AbortController();
|
|
192
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
193
|
+
let resp;
|
|
27
194
|
try {
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
}
|
|
35
|
-
return compiled;
|
|
195
|
+
resp = await fetch(url, {
|
|
196
|
+
method: "POST",
|
|
197
|
+
headers: this.headers,
|
|
198
|
+
body: JSON.stringify({ variables: merged }),
|
|
199
|
+
signal: controller.signal,
|
|
200
|
+
});
|
|
36
201
|
}
|
|
37
202
|
catch (err) {
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
203
|
+
clearTimeout(timeoutId);
|
|
204
|
+
if (err.name === "AbortError")
|
|
205
|
+
throw new NetworkError("Request timed out");
|
|
206
|
+
throw new NetworkError(`Network error: ${err.message}`);
|
|
207
|
+
}
|
|
208
|
+
clearTimeout(timeoutId);
|
|
209
|
+
if (resp.status >= 400) {
|
|
210
|
+
const text = await resp.text();
|
|
211
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
212
|
+
}
|
|
213
|
+
const contentType = resp.headers.get("content-type") || "";
|
|
214
|
+
// Non-streaming fallback: prompt has no model, backend returns JSON
|
|
215
|
+
if (!contentType.includes("text/event-stream")) {
|
|
216
|
+
const data = await resp.json();
|
|
217
|
+
yield {
|
|
218
|
+
type: "done",
|
|
219
|
+
output: data.prompt || "",
|
|
220
|
+
raw: data,
|
|
221
|
+
};
|
|
222
|
+
return;
|
|
223
|
+
}
|
|
224
|
+
yield* this.parseSSE(resp);
|
|
225
|
+
}
|
|
226
|
+
// ── Agent API ───────────────────────────────────────
|
|
227
|
+
/**
|
|
228
|
+
* Start a new agent chat session.
|
|
229
|
+
*/
|
|
230
|
+
async startAgent(chatbotId, options) {
|
|
231
|
+
const url = `${this.baseUrl}/api/sdk/v1/agent/${this.projectKey}/${chatbotId}/start`;
|
|
232
|
+
const payload = {};
|
|
233
|
+
if (options?.visitor != null)
|
|
234
|
+
payload.visitor = options.visitor;
|
|
235
|
+
if (options?.platform && options.platform !== "sdk")
|
|
236
|
+
payload.platform = options.platform;
|
|
237
|
+
const resp = await this.fetchWithRetry(url, {
|
|
238
|
+
method: "POST",
|
|
239
|
+
headers: this.headers,
|
|
240
|
+
body: JSON.stringify(payload),
|
|
241
|
+
});
|
|
242
|
+
const text = await resp.text();
|
|
243
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
244
|
+
const data = JSON.parse(text);
|
|
245
|
+
return {
|
|
246
|
+
sessionToken: data.session_token,
|
|
247
|
+
chatbotId: data.chatbot_id,
|
|
248
|
+
name: data.name,
|
|
249
|
+
memoryEnabled: data.memory_enabled ?? false,
|
|
250
|
+
messages: data.messages ?? [],
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
/**
|
|
254
|
+
* Stream an agent response as SSE events.
|
|
255
|
+
*/
|
|
256
|
+
async *streamAgent(chatbotId, options) {
|
|
257
|
+
const url = `${this.baseUrl}/api/sdk/v1/agent/${this.projectKey}/${chatbotId}/stream?session_token=${encodeURIComponent(options.sessionToken)}`;
|
|
258
|
+
const controller = new AbortController();
|
|
259
|
+
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
260
|
+
let resp;
|
|
261
|
+
try {
|
|
262
|
+
resp = await fetch(url, {
|
|
263
|
+
method: "POST",
|
|
264
|
+
headers: this.headers,
|
|
265
|
+
body: JSON.stringify({ query: options.query }),
|
|
266
|
+
signal: controller.signal,
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
catch (err) {
|
|
270
|
+
clearTimeout(timeoutId);
|
|
271
|
+
if (err.name === "AbortError")
|
|
272
|
+
throw new NetworkError("Request timed out");
|
|
273
|
+
throw new NetworkError(`Network error: ${err.message}`);
|
|
274
|
+
}
|
|
275
|
+
clearTimeout(timeoutId);
|
|
276
|
+
if (resp.status >= 400) {
|
|
277
|
+
const text = await resp.text();
|
|
278
|
+
PromptevClient.raiseForStatus(resp.status, text);
|
|
49
279
|
}
|
|
280
|
+
yield* this.parseSSE(resp);
|
|
50
281
|
}
|
|
51
282
|
}
|
|
52
283
|
export default PromptevClient;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptev/client",
|
|
3
|
-
"version": "0.0
|
|
4
|
-
"description": "
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "JavaScript/TypeScript SDK for Promptev — run AI prompts and agents programmatically",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/cjs/index.cjs",
|
|
7
7
|
"module": "./dist/esm/index.js",
|
|
@@ -32,9 +32,9 @@
|
|
|
32
32
|
"client",
|
|
33
33
|
"sdk",
|
|
34
34
|
"ai",
|
|
35
|
-
"
|
|
36
|
-
"
|
|
37
|
-
"
|
|
35
|
+
"agents",
|
|
36
|
+
"agent-sdk",
|
|
37
|
+
"context-engineering",
|
|
38
38
|
"typescript",
|
|
39
39
|
"javascript"
|
|
40
40
|
],
|
|
@@ -44,10 +44,7 @@
|
|
|
44
44
|
"build:cjs": "tsc -p tsconfig.cjs.json && node scripts/rename-cjs.cjs",
|
|
45
45
|
"build": "npm run build:esm && npm run build:cjs"
|
|
46
46
|
},
|
|
47
|
-
"dependencies": {
|
|
48
|
-
"axios": "^1.10.0",
|
|
49
|
-
"node-cache": "^5.1.2"
|
|
50
|
-
},
|
|
47
|
+
"dependencies": {},
|
|
51
48
|
"devDependencies": {
|
|
52
49
|
"@types/node": "^20.10.0",
|
|
53
50
|
"ts-node": "^10.9.2",
|
package/readme.md
CHANGED
|
@@ -1,116 +1,450 @@
|
|
|
1
|
-
#
|
|
1
|
+
# Promptev JavaScript SDK
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
---
|
|
3
|
+
The official JavaScript/TypeScript SDK for [Promptev.ai](https://promptev.ai) — run AI prompts and agents programmatically using your project API key.
|
|
6
4
|
|
|
7
5
|
## Installation
|
|
8
6
|
|
|
9
7
|
```bash
|
|
10
8
|
npm install @promptev/client
|
|
11
9
|
# or
|
|
12
|
-
yarn
|
|
13
|
-
#or
|
|
10
|
+
yarn add @promptev/client
|
|
11
|
+
# or
|
|
14
12
|
pnpm add @promptev/client
|
|
15
13
|
```
|
|
16
14
|
|
|
17
|
-
|
|
15
|
+
Requires Node.js 18+ or any modern browser. Zero dependencies.
|
|
18
16
|
|
|
19
|
-
##
|
|
17
|
+
## Platform Support
|
|
20
18
|
|
|
21
|
-
|
|
22
|
-
---
|
|
19
|
+
Works everywhere — backend, frontend, and edge runtimes. Built entirely on [Web Platform APIs](https://wintercg.org/) (`fetch`, `ReadableStream`, `AbortController`, `TextDecoder`) with no Node.js-specific modules.
|
|
23
20
|
|
|
24
|
-
|
|
21
|
+
| Environment | Supported | Notes |
|
|
22
|
+
|---|---|---|
|
|
23
|
+
| **Node.js 18+** | ✅ | ESM and CommonJS |
|
|
24
|
+
| **Bun** | ✅ | Native Web API support |
|
|
25
|
+
| **Deno** | ✅ | Native Web API support |
|
|
26
|
+
| **React / Next.js** | ✅ | Works with any bundler |
|
|
27
|
+
| **Angular** | ✅ | Full TypeScript types included |
|
|
28
|
+
| **Vue / Nuxt** | ✅ | Standard ESM import |
|
|
29
|
+
| **Svelte / SvelteKit** | ✅ | Standard ESM import |
|
|
30
|
+
| **Vanilla JS** | ✅ | No framework required |
|
|
31
|
+
| **Cloudflare Workers** | ✅ | Edge runtime compatible |
|
|
32
|
+
| **Vercel Edge Functions** | ✅ | Edge runtime compatible |
|
|
25
33
|
|
|
26
|
-
|
|
34
|
+
```js
|
|
35
|
+
// ESM (Node 18+, React, Next.js, Vue, Angular, Svelte, etc.)
|
|
36
|
+
import { PromptevClient } from "@promptev/client";
|
|
37
|
+
|
|
38
|
+
// CommonJS (legacy Node.js projects)
|
|
39
|
+
const { PromptevClient } = require("@promptev/client");
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## Quick Start
|
|
27
43
|
|
|
28
44
|
```ts
|
|
29
45
|
import { PromptevClient } from "@promptev/client";
|
|
30
46
|
|
|
31
|
-
const client = new PromptevClient({
|
|
32
|
-
|
|
33
|
-
|
|
47
|
+
const client = new PromptevClient({ projectKey: "pv_sk_your_key_here" });
|
|
48
|
+
|
|
49
|
+
// Run a prompt
|
|
50
|
+
const result = await client.runPrompt("support-agent",
|
|
51
|
+
"What is the refund policy?",
|
|
52
|
+
{ company: "Acme Corp" }
|
|
53
|
+
);
|
|
54
|
+
console.log(result);
|
|
55
|
+
|
|
56
|
+
// Chat with an AI agent
|
|
57
|
+
const session = await client.startAgent("your-agent-id");
|
|
58
|
+
|
|
59
|
+
for await (const event of client.streamAgent(session.chatbotId, {
|
|
60
|
+
sessionToken: session.sessionToken,
|
|
61
|
+
query: "Summarize our Q4 sales report",
|
|
62
|
+
})) {
|
|
63
|
+
if (event.type === "done") console.log(event.output);
|
|
64
|
+
}
|
|
34
65
|
```
|
|
35
66
|
|
|
36
|
-
|
|
67
|
+
## Prompts
|
|
68
|
+
|
|
69
|
+
Promptev prompts are versioned, server-managed templates. `runPrompt` compiles the template with your variables — and if the prompt has a model configured in Promptev, it also executes it against the LLM and returns the AI response directly.
|
|
37
70
|
|
|
38
|
-
###
|
|
71
|
+
### Run a prompt with variables
|
|
39
72
|
|
|
40
73
|
```ts
|
|
41
|
-
const
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
74
|
+
const result = await client.runPrompt("support-agent",
|
|
75
|
+
"How do I reset my password?",
|
|
76
|
+
{ company: "Acme Corp", tone: "professional" }
|
|
77
|
+
);
|
|
78
|
+
```
|
|
45
79
|
|
|
46
|
-
|
|
47
|
-
|
|
80
|
+
### Run a prompt without variables
|
|
81
|
+
|
|
82
|
+
```ts
|
|
83
|
+
const result = await client.runPrompt("knowledge-base",
|
|
84
|
+
"What is the refund policy?"
|
|
85
|
+
);
|
|
48
86
|
```
|
|
49
87
|
|
|
50
|
-
|
|
88
|
+
### With a model configured (auto-execute)
|
|
51
89
|
|
|
52
|
-
|
|
90
|
+
If your prompt has a model and/or context packs attached in Promptev, `runPrompt` compiles the template, retrieves relevant context via RAG, sends it to the LLM, and returns the AI response:
|
|
53
91
|
|
|
54
92
|
```ts
|
|
55
|
-
const
|
|
56
|
-
|
|
57
|
-
|
|
93
|
+
const answer = await client.runPrompt("support-agent",
|
|
94
|
+
"What is the refund policy?",
|
|
95
|
+
{ company: "Acme Corp" }
|
|
96
|
+
);
|
|
97
|
+
console.log(answer); // "Our refund policy allows returns within 30 days..."
|
|
58
98
|
```
|
|
59
99
|
|
|
60
|
-
|
|
100
|
+
### Without a model (use with your own LLM)
|
|
61
101
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
### 4. Use with LLM APIs (OpenAI, Claude, Gemini, etc.)
|
|
102
|
+
If no model is configured, `runPrompt` returns the compiled template — use it with any LLM:
|
|
65
103
|
|
|
66
104
|
```ts
|
|
67
105
|
import OpenAI from "openai";
|
|
68
106
|
|
|
107
|
+
const client = new PromptevClient({ projectKey: "pv_sk_..." });
|
|
69
108
|
const openai = new OpenAI({ apiKey: "sk-..." });
|
|
70
109
|
|
|
71
|
-
const
|
|
72
|
-
|
|
73
|
-
}
|
|
110
|
+
const systemPrompt = await client.runPrompt("support-agent",
|
|
111
|
+
"How do I reset my password?",
|
|
112
|
+
{ company: "Acme Corp", tone: "professional" }
|
|
113
|
+
);
|
|
74
114
|
|
|
75
115
|
const response = await openai.chat.completions.create({
|
|
76
116
|
model: "gpt-4",
|
|
77
|
-
messages: [
|
|
117
|
+
messages: [
|
|
118
|
+
{ role: "system", content: systemPrompt },
|
|
119
|
+
{ role: "user", content: "How do I reset my password?" },
|
|
120
|
+
],
|
|
78
121
|
});
|
|
79
|
-
|
|
80
122
|
console.log(response.choices[0].message.content);
|
|
81
123
|
```
|
|
82
124
|
|
|
83
|
-
|
|
125
|
+
### Stream a prompt (with tools or real-time output)
|
|
126
|
+
|
|
127
|
+
When a prompt has tools attached (Jira, Slack, GitHub, etc.) or you want real-time output, use `streamPrompt`. It returns SSE events — same format as agent streaming:
|
|
128
|
+
|
|
129
|
+
```ts
|
|
130
|
+
for await (const event of client.streamPrompt("research-assistant",
|
|
131
|
+
"Find all P1 bugs assigned to me",
|
|
132
|
+
{ project: "ACME" }
|
|
133
|
+
)) {
|
|
134
|
+
if (event.type === "thoughts") console.log(`Thinking: ${event.output}`);
|
|
135
|
+
else if (event.type === "processing") console.log(`Running: ${event.output}`);
|
|
136
|
+
else if (event.type === "done") console.log(event.output);
|
|
137
|
+
}
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
> **When to use which:**
|
|
141
|
+
> - `runPrompt()` — simple prompt execution, no tools, returns a string
|
|
142
|
+
> - `streamPrompt()` — prompts with tools, RAG-heavy queries, or when you want real-time output
|
|
143
|
+
|
|
144
|
+
## Agents
|
|
145
|
+
|
|
146
|
+
Promptev agents are deployed AI assistants with built-in memory, tools (Jira, Slack, GitHub, etc.), and RAG context packs. The SDK lets you start sessions and stream responses in real time.
|
|
147
|
+
|
|
148
|
+
### Start a session
|
|
149
|
+
|
|
150
|
+
```ts
|
|
151
|
+
const session = await client.startAgent("your-agent-id", { visitor: "John" });
|
|
152
|
+
|
|
153
|
+
console.log(session.sessionToken); // Use this for all subsequent messages
|
|
154
|
+
console.log(session.name); // Agent display name
|
|
155
|
+
console.log(session.memoryEnabled); // Whether agent retains conversation context
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
### Stream a response
|
|
84
159
|
|
|
85
|
-
|
|
160
|
+
The agent responds via Server-Sent Events (SSE). Each event has a `type` and `output`:
|
|
86
161
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
162
|
+
| Event Type | Description |
|
|
163
|
+
|---|---|
|
|
164
|
+
| `thoughts` | Agent's internal reasoning |
|
|
165
|
+
| `processing` | Tool execution status (e.g., "Searching Jira...") |
|
|
166
|
+
| `approval_required` | Agent needs permission to run a tool |
|
|
167
|
+
| `done` | Final response text |
|
|
168
|
+
| `error` | Something went wrong |
|
|
90
169
|
|
|
91
|
-
|
|
170
|
+
```ts
|
|
171
|
+
for await (const event of client.streamAgent(session.chatbotId, {
|
|
172
|
+
sessionToken: session.sessionToken,
|
|
173
|
+
query: "What are the open P1 bugs in our backlog?",
|
|
174
|
+
})) {
|
|
175
|
+
if (event.type === "thoughts") console.log(`Thinking: ${event.output}`);
|
|
176
|
+
else if (event.type === "processing") console.log(`Running: ${event.output}`);
|
|
177
|
+
else if (event.type === "done") console.log(`\n${event.output}`);
|
|
178
|
+
else if (event.type === "error") console.log(`Error: ${event.output}`);
|
|
179
|
+
}
|
|
180
|
+
```
|
|
92
181
|
|
|
93
|
-
|
|
182
|
+
### Multi-turn conversation
|
|
183
|
+
|
|
184
|
+
The session token maintains conversation context across messages:
|
|
94
185
|
|
|
95
186
|
```ts
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
187
|
+
const session = await client.startAgent("your-agent-id", { visitor: "Sarah" });
|
|
188
|
+
|
|
189
|
+
// First message
|
|
190
|
+
for await (const event of client.streamAgent(session.chatbotId, {
|
|
191
|
+
sessionToken: session.sessionToken,
|
|
192
|
+
query: "Summarize our Q4 sales report",
|
|
193
|
+
})) {
|
|
194
|
+
if (event.type === "done") console.log(event.output);
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// Follow-up — agent remembers the previous context
|
|
198
|
+
for await (const event of client.streamAgent(session.chatbotId, {
|
|
199
|
+
sessionToken: session.sessionToken,
|
|
200
|
+
query: "Compare that with Q3",
|
|
201
|
+
})) {
|
|
202
|
+
if (event.type === "done") console.log(event.output);
|
|
203
|
+
}
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
### Collect the final response only
|
|
207
|
+
|
|
208
|
+
```ts
|
|
209
|
+
async function askAgent(client, session, query) {
|
|
210
|
+
for await (const event of client.streamAgent(session.chatbotId, {
|
|
211
|
+
sessionToken: session.sessionToken,
|
|
212
|
+
query,
|
|
213
|
+
})) {
|
|
214
|
+
if (event.type === "done") return event.output;
|
|
215
|
+
if (event.type === "error") throw new Error(event.output);
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
const answer = await askAgent(client, session, "What's our monthly churn rate?");
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
## Error Handling
|
|
223
|
+
|
|
224
|
+
The SDK raises typed errors for each failure scenario:
|
|
225
|
+
|
|
226
|
+
```ts
|
|
227
|
+
import {
|
|
228
|
+
PromptevClient,
|
|
229
|
+
ValidationError,
|
|
230
|
+
AuthenticationError,
|
|
231
|
+
NotFoundError,
|
|
232
|
+
RateLimitError,
|
|
233
|
+
ServerError,
|
|
234
|
+
NetworkError,
|
|
235
|
+
} from "@promptev/client";
|
|
236
|
+
|
|
237
|
+
const client = new PromptevClient({ projectKey: "pv_sk_..." });
|
|
238
|
+
|
|
239
|
+
try {
|
|
240
|
+
const result = await client.runPrompt("my-prompt", "Hello", { name: "Ava" });
|
|
241
|
+
} catch (err) {
|
|
242
|
+
if (err instanceof ValidationError) {
|
|
243
|
+
// 400 — missing variables, bad input
|
|
244
|
+
console.log(`Invalid request: ${err.message}`);
|
|
245
|
+
} else if (err instanceof NotFoundError) {
|
|
246
|
+
// 404 — prompt or project not found
|
|
247
|
+
console.log(`Not found: ${err.message}`);
|
|
248
|
+
} else if (err instanceof AuthenticationError) {
|
|
249
|
+
// 401/403 — invalid API key or agent not active
|
|
250
|
+
console.log(`Auth error: ${err.message}`);
|
|
251
|
+
} else if (err instanceof RateLimitError) {
|
|
252
|
+
// 429 — API usage quota exceeded
|
|
253
|
+
console.log(`Rate limited: ${err.message}`);
|
|
254
|
+
} else if (err instanceof ServerError) {
|
|
255
|
+
// 5xx — server error (after retries exhausted)
|
|
256
|
+
console.log(`Server error: ${err.message}`);
|
|
257
|
+
} else if (err instanceof NetworkError) {
|
|
258
|
+
// Connection failed, timeout, DNS error
|
|
259
|
+
console.log(`Network error: ${err.message}`);
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
All errors extend `PromptevError` and include:
|
|
265
|
+
- `err.statusCode` — HTTP status code (if applicable)
|
|
266
|
+
- `err.responseText` — Raw response body (for debugging)
|
|
267
|
+
|
|
268
|
+
## Configuration
|
|
269
|
+
|
|
270
|
+
```ts
|
|
271
|
+
const client = new PromptevClient({
|
|
272
|
+
projectKey: "pv_sk_...", // Required — your project API key
|
|
273
|
+
baseUrl: "https://api.promptev.ai", // Default — override for self-hosted
|
|
274
|
+
timeout: 30000, // Default — request timeout in ms
|
|
275
|
+
maxRetries: 2, // Default — retries for 502/503/504
|
|
276
|
+
headers: { "X-Custom": "value" }, // Optional — extra HTTP headers
|
|
100
277
|
});
|
|
101
278
|
```
|
|
102
279
|
|
|
103
|
-
|
|
280
|
+
| Parameter | Default | Description |
|
|
281
|
+
|---|---|---|
|
|
282
|
+
| `projectKey` | *required* | Your Promptev project API key |
|
|
283
|
+
| `baseUrl` | `https://api.promptev.ai` | API base URL |
|
|
284
|
+
| `timeout` | `30000` | Request timeout in milliseconds |
|
|
285
|
+
| `maxRetries` | `2` | Automatic retries for transient server errors (502, 503, 504) |
|
|
286
|
+
| `headers` | `{}` | Additional HTTP headers |
|
|
287
|
+
|
|
288
|
+
## API Reference
|
|
289
|
+
|
|
290
|
+
### `PromptevClient`
|
|
291
|
+
|
|
292
|
+
| Method | Description | Returns |
|
|
293
|
+
|---|---|---|
|
|
294
|
+
| `runPrompt(promptKey, query, variables?)` | Compile and execute a prompt | `Promise<string>` |
|
|
295
|
+
| `streamPrompt(promptKey, query, variables?)` | Stream prompt execution with tools | `AsyncGenerator<AgentEvent>` |
|
|
296
|
+
| `startAgent(chatbotId, options?)` | Start agent session | `Promise<AgentSession>` |
|
|
297
|
+
| `streamAgent(chatbotId, options)` | Stream agent response | `AsyncGenerator<AgentEvent>` |
|
|
298
|
+
|
|
299
|
+
### `AgentSession`
|
|
300
|
+
|
|
301
|
+
| Field | Type | Description |
|
|
302
|
+
|---|---|---|
|
|
303
|
+
| `sessionToken` | `string` | Token for subsequent stream calls |
|
|
304
|
+
| `chatbotId` | `string` | Agent identifier |
|
|
305
|
+
| `name` | `string` | Agent display name |
|
|
306
|
+
| `memoryEnabled` | `boolean` | Whether agent retains conversation context |
|
|
307
|
+
| `messages` | `array` | Previous messages (populated when resuming a session) |
|
|
308
|
+
|
|
309
|
+
### `AgentEvent`
|
|
310
|
+
|
|
311
|
+
| Field | Type | Description |
|
|
312
|
+
|---|---|---|
|
|
313
|
+
| `type` | `string` | Event type: `thoughts`, `processing`, `done`, `error`, `approval_required` |
|
|
314
|
+
| `output` | `string` | Event content text |
|
|
315
|
+
| `raw` | `object` | Full parsed SSE event data |
|
|
316
|
+
|
|
317
|
+
### Exceptions
|
|
318
|
+
|
|
319
|
+
| Error | HTTP Status | When |
|
|
320
|
+
|---|---|---|
|
|
321
|
+
| `ValidationError` | 400 | Missing required variables, bad input |
|
|
322
|
+
| `AuthenticationError` | 401, 403 | Invalid API key, agent not active |
|
|
323
|
+
| `NotFoundError` | 404 | Project, prompt, or agent not found |
|
|
324
|
+
| `RateLimitError` | 429 | API usage quota exceeded |
|
|
325
|
+
| `ServerError` | 5xx | Server error (after retries exhausted) |
|
|
326
|
+
| `NetworkError` | — | Connection failed, timeout, DNS error |
|
|
327
|
+
| `PromptevError` | any | Base class for all above errors |
|
|
328
|
+
|
|
329
|
+
## Framework Examples
|
|
330
|
+
|
|
331
|
+
### React
|
|
332
|
+
|
|
333
|
+
```tsx
|
|
334
|
+
import { useState } from "react";
|
|
335
|
+
import { PromptevClient } from "@promptev/client";
|
|
336
|
+
|
|
337
|
+
const client = new PromptevClient({ projectKey: "pv_sk_..." });
|
|
338
|
+
|
|
339
|
+
export function AskAI() {
|
|
340
|
+
const [answer, setAnswer] = useState("");
|
|
341
|
+
const [loading, setLoading] = useState(false);
|
|
342
|
+
|
|
343
|
+
async function handleAsk() {
|
|
344
|
+
setLoading(true);
|
|
345
|
+
try {
|
|
346
|
+
const result = await client.runPrompt("support-agent", "What is your refund policy?");
|
|
347
|
+
setAnswer(result);
|
|
348
|
+
} finally {
|
|
349
|
+
setLoading(false);
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
return (
|
|
354
|
+
<div>
|
|
355
|
+
<button onClick={handleAsk} disabled={loading}>Ask AI</button>
|
|
356
|
+
{answer && <p>{answer}</p>}
|
|
357
|
+
</div>
|
|
358
|
+
);
|
|
359
|
+
}
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
### React — Streaming Agent Chat
|
|
363
|
+
|
|
364
|
+
```tsx
|
|
365
|
+
import { useState, useRef } from "react";
|
|
366
|
+
import { PromptevClient } from "@promptev/client";
|
|
367
|
+
|
|
368
|
+
const client = new PromptevClient({ projectKey: "pv_sk_..." });
|
|
369
|
+
|
|
370
|
+
export function AgentChat() {
|
|
371
|
+
const [messages, setMessages] = useState<string[]>([]);
|
|
372
|
+
const [input, setInput] = useState("");
|
|
373
|
+
const sessionRef = useRef(null);
|
|
374
|
+
|
|
375
|
+
async function startChat() {
|
|
376
|
+
sessionRef.current = await client.startAgent("your-agent-id", { visitor: "user" });
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
async function sendMessage() {
|
|
380
|
+
if (!sessionRef.current) await startChat();
|
|
381
|
+
|
|
382
|
+
setMessages((prev) => [...prev, `You: ${input}`]);
|
|
383
|
+
const query = input;
|
|
384
|
+
setInput("");
|
|
385
|
+
|
|
386
|
+
for await (const event of client.streamAgent(sessionRef.current.chatbotId, {
|
|
387
|
+
sessionToken: sessionRef.current.sessionToken,
|
|
388
|
+
query,
|
|
389
|
+
})) {
|
|
390
|
+
if (event.type === "done") {
|
|
391
|
+
setMessages((prev) => [...prev, `Agent: ${event.output}`]);
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
return (
|
|
397
|
+
<div>
|
|
398
|
+
{messages.map((msg, i) => <p key={i}>{msg}</p>)}
|
|
399
|
+
<input value={input} onChange={(e) => setInput(e.target.value)} />
|
|
400
|
+
<button onClick={sendMessage}>Send</button>
|
|
401
|
+
</div>
|
|
402
|
+
);
|
|
403
|
+
}
|
|
404
|
+
```
|
|
405
|
+
|
|
406
|
+
### Node.js / Express API
|
|
407
|
+
|
|
408
|
+
```js
|
|
409
|
+
import express from "express";
|
|
410
|
+
import { PromptevClient } from "@promptev/client";
|
|
411
|
+
|
|
412
|
+
const app = express();
|
|
413
|
+
const client = new PromptevClient({ projectKey: "pv_sk_..." });
|
|
414
|
+
|
|
415
|
+
app.post("/api/ask", express.json(), async (req, res) => {
|
|
416
|
+
const answer = await client.runPrompt("support-agent", req.body.question);
|
|
417
|
+
res.json({ answer });
|
|
418
|
+
});
|
|
419
|
+
|
|
420
|
+
app.listen(3000);
|
|
421
|
+
```
|
|
422
|
+
|
|
423
|
+
### Vanilla HTML
|
|
424
|
+
|
|
425
|
+
```html
|
|
426
|
+
<script type="module">
|
|
427
|
+
import { PromptevClient } from "https://cdn.jsdelivr.net/npm/@promptev/client/dist/esm/index.js";
|
|
428
|
+
|
|
429
|
+
const client = new PromptevClient({ projectKey: "pv_sk_..." });
|
|
430
|
+
|
|
431
|
+
document.getElementById("ask-btn").addEventListener("click", async () => {
|
|
432
|
+
const answer = await client.runPrompt("support-agent", "What is the refund policy?");
|
|
433
|
+
document.getElementById("output").textContent = answer;
|
|
434
|
+
});
|
|
435
|
+
</script>
|
|
436
|
+
```
|
|
104
437
|
|
|
105
438
|
## License
|
|
106
439
|
|
|
107
|
-
This SDK is
|
|
440
|
+
This SDK is commercial software by [Promptev Inc](https://promptev.ai).
|
|
108
441
|
|
|
109
|
-
|
|
442
|
+
- Free tier use allowed
|
|
443
|
+
- Production use requires an active subscription
|
|
110
444
|
|
|
111
|
-
|
|
445
|
+
See [LICENSE](./LICENSE.txt) for full terms.
|
|
112
446
|
|
|
113
|
-
##
|
|
447
|
+
## Support
|
|
114
448
|
|
|
115
|
-
-
|
|
116
|
-
-
|
|
449
|
+
- Website: [promptev.ai](https://promptev.ai)
|
|
450
|
+
- Email: support@promptev.ai
|
package/dist/cjs/index.d.ts
DELETED
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
export interface PromptliyClientConfig {
|
|
2
|
-
baseUrl?: string;
|
|
3
|
-
projectKey: string;
|
|
4
|
-
}
|
|
5
|
-
export interface Prompt {
|
|
6
|
-
prompt: string;
|
|
7
|
-
variables: string[] | string | null;
|
|
8
|
-
format(values?: Record<string, string>): string;
|
|
9
|
-
}
|
|
10
|
-
export declare class PromptliyClient {
|
|
11
|
-
private client;
|
|
12
|
-
private baseUrl;
|
|
13
|
-
private projectKey;
|
|
14
|
-
private promptCache;
|
|
15
|
-
private refreshInterval;
|
|
16
|
-
private cacheRefreshIntervalMs;
|
|
17
|
-
private isReady;
|
|
18
|
-
constructor(config: PromptliyClientConfig);
|
|
19
|
-
private ensureReady;
|
|
20
|
-
private startCacheRefresh;
|
|
21
|
-
private refreshCachedPrompt;
|
|
22
|
-
private createPromptObject;
|
|
23
|
-
getPrompt(promptKey: string): Promise<Prompt> & {
|
|
24
|
-
format: (values: Record<string, string>) => Promise<string>;
|
|
25
|
-
};
|
|
26
|
-
private fetchPrompt;
|
|
27
|
-
private fetchPromptFromServer;
|
|
28
|
-
dispose(): Promise<void>;
|
|
29
|
-
}
|
|
30
|
-
export default PromptliyClient;
|