@promptev/client 0.0.2 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/readme.md CHANGED
@@ -1,125 +1,450 @@
1
- # promptev-client (JavaScript / TypeScript)
1
+ # Promptev JavaScript SDK
2
2
 
3
- A lightweight JS SDK to securely fetch, format, and cache prompts from [Promptev.ai](https://promptev.ai).
4
-
5
- ---
3
+ The official JavaScript/TypeScript SDK for [Promptev.ai](https://promptev.ai) — run AI prompts and agents programmatically using your project API key.
6
4
 
7
5
  ## Installation
8
6
 
9
7
  ```bash
10
8
  npm install @promptev/client
11
9
  # or
12
- yarn install @promptev/client
13
- #or
10
+ yarn add @promptev/client
11
+ # or
14
12
  pnpm add @promptev/client
15
13
  ```
16
14
 
17
- > Optional for Node.js caching (recommended):
18
- ```bash
19
- npm install node-cache
20
- ```
15
+ Requires Node.js 18+ or any modern browser. Zero dependencies.
21
16
 
22
- ---
17
+ ## Platform Support
23
18
 
24
- ## What is Promptev?
19
+ Works everywhere backend, frontend, and edge runtimes. Built entirely on [Web Platform APIs](https://wintercg.org/) (`fetch`, `ReadableStream`, `AbortController`, `TextDecoder`) with no Node.js-specific modules.
25
20
 
26
- [Promptev](https://promptev.ai) helps teams manage, version, and collaborate on production-grade prompts with live context, variables, versioning and integrations.
21
+ | Environment | Supported | Notes |
22
+ |---|---|---|
23
+ | **Node.js 18+** | ✅ | ESM and CommonJS |
24
+ | **Bun** | ✅ | Native Web API support |
25
+ | **Deno** | ✅ | Native Web API support |
26
+ | **React / Next.js** | ✅ | Works with any bundler |
27
+ | **Angular** | ✅ | Full TypeScript types included |
28
+ | **Vue / Nuxt** | ✅ | Standard ESM import |
29
+ | **Svelte / SvelteKit** | ✅ | Standard ESM import |
30
+ | **Vanilla JS** | ✅ | No framework required |
31
+ | **Cloudflare Workers** | ✅ | Edge runtime compatible |
32
+ | **Vercel Edge Functions** | ✅ | Edge runtime compatible |
27
33
 
28
- ---
34
+ ```js
35
+ // ESM (Node 18+, React, Next.js, Vue, Angular, Svelte, etc.)
36
+ import { PromptevClient } from "@promptev/client";
29
37
 
30
- ## Usage
38
+ // CommonJS (legacy Node.js projects)
39
+ const { PromptevClient } = require("@promptev/client");
40
+ ```
31
41
 
32
- ### 1. Initialize the client
42
+ ## Quick Start
33
43
 
34
44
  ```ts
35
45
  import { PromptevClient } from "@promptev/client";
36
46
 
37
- const client = new PromptevClient({
38
- projectKey: "pv_sk_abc123yourkey"
39
- });
47
+ const client = new PromptevClient({ projectKey: "pv_sk_your_key_here" });
48
+
49
+ // Run a prompt
50
+ const result = await client.runPrompt("support-agent",
51
+ "What is the refund policy?",
52
+ { company: "Acme Corp" }
53
+ );
54
+ console.log(result);
55
+
56
+ // Chat with an AI agent
57
+ const session = await client.startAgent("your-agent-id");
58
+
59
+ for await (const event of client.streamAgent(session.chatbotId, {
60
+ sessionToken: session.sessionToken,
61
+ query: "Summarize our Q4 sales report",
62
+ })) {
63
+ if (event.type === "done") console.log(event.output);
64
+ }
40
65
  ```
41
66
 
42
- ---
67
+ ## Prompts
68
+
69
+ Promptev prompts are versioned, server-managed templates. `runPrompt` compiles the template with your variables — and if the prompt has a model configured in Promptev, it also executes it against the LLM and returns the AI response directly.
43
70
 
44
- ### 2. Fetch a prompt with variables
71
+ ### Run a prompt with variables
45
72
 
46
73
  ```ts
47
- const prompt = await client.getPrompt("onboarding-email", {
48
- name: "Ava",
49
- product: "Promptev"
50
- });
74
+ const result = await client.runPrompt("support-agent",
75
+ "How do I reset my password?",
76
+ { company: "Acme Corp", tone: "professional" }
77
+ );
78
+ ```
51
79
 
52
- console.log(prompt);
53
- // "Subject: Welcome, Ava! Hey Ava, Thanks for joining Promptev..."
80
+ ### Run a prompt without variables
81
+
82
+ ```ts
83
+ const result = await client.runPrompt("knowledge-base",
84
+ "What is the refund policy?"
85
+ );
54
86
  ```
55
87
 
56
- ---
88
+ ### With a model configured (auto-execute)
57
89
 
58
- ### 3. Fetch a prompt without variables
90
+ If your prompt has a model and/or context packs attached in Promptev, `runPrompt` compiles the template, retrieves relevant context via RAG, sends it to the LLM, and returns the AI response:
59
91
 
60
92
  ```ts
61
- const staticPrompt = await client.getPrompt("system-intro");
62
- console.log(staticPrompt);
63
- // "You are a helpful AI assistant..."
93
+ const answer = await client.runPrompt("support-agent",
94
+ "What is the refund policy?",
95
+ { company: "Acme Corp" }
96
+ );
97
+ console.log(answer); // "Our refund policy allows returns within 30 days..."
64
98
  ```
65
99
 
66
- > ⚠️ No variables? You can omit the second argument.
100
+ ### Without a model (use with your own LLM)
67
101
 
68
- ---
69
-
70
- ### 4. Use with LLM APIs (OpenAI, Claude, Gemini, etc.)
102
+ If no model is configured, `runPrompt` returns the compiled template — use it with any LLM:
71
103
 
72
104
  ```ts
73
105
  import OpenAI from "openai";
74
106
 
107
+ const client = new PromptevClient({ projectKey: "pv_sk_..." });
75
108
  const openai = new OpenAI({ apiKey: "sk-..." });
76
109
 
77
- const promptText = await client.getPrompt("clarify-topic", {
78
- topic: "prompt engineering"
79
- });
110
+ const systemPrompt = await client.runPrompt("support-agent",
111
+ "How do I reset my password?",
112
+ { company: "Acme Corp", tone: "professional" }
113
+ );
80
114
 
81
115
  const response = await openai.chat.completions.create({
82
116
  model: "gpt-4",
83
- messages: [{ role: "user", content: promptText }]
117
+ messages: [
118
+ { role: "system", content: systemPrompt },
119
+ { role: "user", content: "How do I reset my password?" },
120
+ ],
84
121
  });
85
-
86
122
  console.log(response.choices[0].message.content);
87
123
  ```
88
124
 
89
- ---
125
+ ### Stream a prompt (with tools or real-time output)
126
+
127
+ When a prompt has tools attached (Jira, Slack, GitHub, etc.) or you want real-time output, use `streamPrompt`. It returns SSE events — same format as agent streaming:
128
+
129
+ ```ts
130
+ for await (const event of client.streamPrompt("research-assistant",
131
+ "Find all P1 bugs assigned to me",
132
+ { project: "ACME" }
133
+ )) {
134
+ if (event.type === "thoughts") console.log(`Thinking: ${event.output}`);
135
+ else if (event.type === "processing") console.log(`Running: ${event.output}`);
136
+ else if (event.type === "done") console.log(event.output);
137
+ }
138
+ ```
139
+
140
+ > **When to use which:**
141
+ > - `runPrompt()` — simple prompt execution, no tools, returns a string
142
+ > - `streamPrompt()` — prompts with tools, RAG-heavy queries, or when you want real-time output
143
+
144
+ ## Agents
145
+
146
+ Promptev agents are deployed AI assistants with built-in memory, tools (Jira, Slack, GitHub, etc.), and RAG context packs. The SDK lets you start sessions and stream responses in real time.
147
+
148
+ ### Start a session
149
+
150
+ ```ts
151
+ const session = await client.startAgent("your-agent-id", { visitor: "John" });
152
+
153
+ console.log(session.sessionToken); // Use this for all subsequent messages
154
+ console.log(session.name); // Agent display name
155
+ console.log(session.memoryEnabled); // Whether agent retains conversation context
156
+ ```
157
+
158
+ ### Stream a response
90
159
 
91
- ## Features
160
+ The agent responds via Server-Sent Events (SSE). Each event has a `type` and `output`:
92
161
 
93
- - Works with or without prompt variables
94
- - 🔁 In-memory caching (Map or NodeCache)
95
- - 🔄 Background refresh (default: 30s, configurable)
96
- - Token-safe variable formatting
97
- - 🔐 Secure for frontend & server
98
- - 🔌 Works with any LLM provider
162
+ | Event Type | Description |
163
+ |---|---|
164
+ | `thoughts` | Agent's internal reasoning |
165
+ | `processing` | Tool execution status (e.g., "Searching Jira...") |
166
+ | `approval_required` | Agent needs permission to run a tool |
167
+ | `done` | Final response text |
168
+ | `error` | Something went wrong |
99
169
 
100
- ---
170
+ ```ts
171
+ for await (const event of client.streamAgent(session.chatbotId, {
172
+ sessionToken: session.sessionToken,
173
+ query: "What are the open P1 bugs in our backlog?",
174
+ })) {
175
+ if (event.type === "thoughts") console.log(`Thinking: ${event.output}`);
176
+ else if (event.type === "processing") console.log(`Running: ${event.output}`);
177
+ else if (event.type === "done") console.log(`\n${event.output}`);
178
+ else if (event.type === "error") console.log(`Error: ${event.output}`);
179
+ }
180
+ ```
101
181
 
102
- ## Options
182
+ ### Multi-turn conversation
183
+
184
+ The session token maintains conversation context across messages:
103
185
 
104
186
  ```ts
105
- new PromptevClient({
106
- projectKey: "pv_sk_abc...",
107
- baseUrl: "https://api.promptev.ai", // optional
108
- cacheRefreshIntervalMs: 60000 // optional
187
+ const session = await client.startAgent("your-agent-id", { visitor: "Sarah" });
188
+
189
+ // First message
190
+ for await (const event of client.streamAgent(session.chatbotId, {
191
+ sessionToken: session.sessionToken,
192
+ query: "Summarize our Q4 sales report",
193
+ })) {
194
+ if (event.type === "done") console.log(event.output);
195
+ }
196
+
197
+ // Follow-up — agent remembers the previous context
198
+ for await (const event of client.streamAgent(session.chatbotId, {
199
+ sessionToken: session.sessionToken,
200
+ query: "Compare that with Q3",
201
+ })) {
202
+ if (event.type === "done") console.log(event.output);
203
+ }
204
+ ```
205
+
206
+ ### Collect the final response only
207
+
208
+ ```ts
209
+ async function askAgent(client, session, query) {
210
+ for await (const event of client.streamAgent(session.chatbotId, {
211
+ sessionToken: session.sessionToken,
212
+ query,
213
+ })) {
214
+ if (event.type === "done") return event.output;
215
+ if (event.type === "error") throw new Error(event.output);
216
+ }
217
+ }
218
+
219
+ const answer = await askAgent(client, session, "What's our monthly churn rate?");
220
+ ```
221
+
222
+ ## Error Handling
223
+
224
+ The SDK raises typed errors for each failure scenario:
225
+
226
+ ```ts
227
+ import {
228
+ PromptevClient,
229
+ ValidationError,
230
+ AuthenticationError,
231
+ NotFoundError,
232
+ RateLimitError,
233
+ ServerError,
234
+ NetworkError,
235
+ } from "@promptev/client";
236
+
237
+ const client = new PromptevClient({ projectKey: "pv_sk_..." });
238
+
239
+ try {
240
+ const result = await client.runPrompt("my-prompt", "Hello", { name: "Ava" });
241
+ } catch (err) {
242
+ if (err instanceof ValidationError) {
243
+ // 400 — missing variables, bad input
244
+ console.log(`Invalid request: ${err.message}`);
245
+ } else if (err instanceof NotFoundError) {
246
+ // 404 — prompt or project not found
247
+ console.log(`Not found: ${err.message}`);
248
+ } else if (err instanceof AuthenticationError) {
249
+ // 401/403 — invalid API key or agent not active
250
+ console.log(`Auth error: ${err.message}`);
251
+ } else if (err instanceof RateLimitError) {
252
+ // 429 — API usage quota exceeded
253
+ console.log(`Rate limited: ${err.message}`);
254
+ } else if (err instanceof ServerError) {
255
+ // 5xx — server error (after retries exhausted)
256
+ console.log(`Server error: ${err.message}`);
257
+ } else if (err instanceof NetworkError) {
258
+ // Connection failed, timeout, DNS error
259
+ console.log(`Network error: ${err.message}`);
260
+ }
261
+ }
262
+ ```
263
+
264
+ All errors extend `PromptevError` and include:
265
+ - `err.statusCode` — HTTP status code (if applicable)
266
+ - `err.responseText` — Raw response body (for debugging)
267
+
268
+ ## Configuration
269
+
270
+ ```ts
271
+ const client = new PromptevClient({
272
+ projectKey: "pv_sk_...", // Required — your project API key
273
+ baseUrl: "https://api.promptev.ai", // Default — override for self-hosted
274
+ timeout: 30000, // Default — request timeout in ms
275
+ maxRetries: 2, // Default — retries for 502/503/504
276
+ headers: { "X-Custom": "value" }, // Optional — extra HTTP headers
109
277
  });
110
278
  ```
111
279
 
112
- ---
280
+ | Parameter | Default | Description |
281
+ |---|---|---|
282
+ | `projectKey` | *required* | Your Promptev project API key |
283
+ | `baseUrl` | `https://api.promptev.ai` | API base URL |
284
+ | `timeout` | `30000` | Request timeout in milliseconds |
285
+ | `maxRetries` | `2` | Automatic retries for transient server errors (502, 503, 504) |
286
+ | `headers` | `{}` | Additional HTTP headers |
287
+
288
+ ## API Reference
289
+
290
+ ### `PromptevClient`
291
+
292
+ | Method | Description | Returns |
293
+ |---|---|---|
294
+ | `runPrompt(promptKey, query, variables?)` | Compile and execute a prompt | `Promise<string>` |
295
+ | `streamPrompt(promptKey, query, variables?)` | Stream prompt execution with tools | `AsyncGenerator<AgentEvent>` |
296
+ | `startAgent(chatbotId, options?)` | Start agent session | `Promise<AgentSession>` |
297
+ | `streamAgent(chatbotId, options)` | Stream agent response | `AsyncGenerator<AgentEvent>` |
298
+
299
+ ### `AgentSession`
300
+
301
+ | Field | Type | Description |
302
+ |---|---|---|
303
+ | `sessionToken` | `string` | Token for subsequent stream calls |
304
+ | `chatbotId` | `string` | Agent identifier |
305
+ | `name` | `string` | Agent display name |
306
+ | `memoryEnabled` | `boolean` | Whether agent retains conversation context |
307
+ | `messages` | `array` | Previous messages (populated when resuming a session) |
308
+
309
+ ### `AgentEvent`
310
+
311
+ | Field | Type | Description |
312
+ |---|---|---|
313
+ | `type` | `string` | Event type: `thoughts`, `processing`, `done`, `error`, `approval_required` |
314
+ | `output` | `string` | Event content text |
315
+ | `raw` | `object` | Full parsed SSE event data |
316
+
317
+ ### Exceptions
318
+
319
+ | Error | HTTP Status | When |
320
+ |---|---|---|
321
+ | `ValidationError` | 400 | Missing required variables, bad input |
322
+ | `AuthenticationError` | 401, 403 | Invalid API key, agent not active |
323
+ | `NotFoundError` | 404 | Project, prompt, or agent not found |
324
+ | `RateLimitError` | 429 | API usage quota exceeded |
325
+ | `ServerError` | 5xx | Server error (after retries exhausted) |
326
+ | `NetworkError` | — | Connection failed, timeout, DNS error |
327
+ | `PromptevError` | any | Base class for all above errors |
328
+
329
+ ## Framework Examples
330
+
331
+ ### React
332
+
333
+ ```tsx
334
+ import { useState } from "react";
335
+ import { PromptevClient } from "@promptev/client";
336
+
337
+ const client = new PromptevClient({ projectKey: "pv_sk_..." });
338
+
339
+ export function AskAI() {
340
+ const [answer, setAnswer] = useState("");
341
+ const [loading, setLoading] = useState(false);
342
+
343
+ async function handleAsk() {
344
+ setLoading(true);
345
+ try {
346
+ const result = await client.runPrompt("support-agent", "What is your refund policy?");
347
+ setAnswer(result);
348
+ } finally {
349
+ setLoading(false);
350
+ }
351
+ }
352
+
353
+ return (
354
+ <div>
355
+ <button onClick={handleAsk} disabled={loading}>Ask AI</button>
356
+ {answer && <p>{answer}</p>}
357
+ </div>
358
+ );
359
+ }
360
+ ```
361
+
362
+ ### React — Streaming Agent Chat
363
+
364
+ ```tsx
365
+ import { useState, useRef } from "react";
366
+ import { PromptevClient } from "@promptev/client";
367
+
368
+ const client = new PromptevClient({ projectKey: "pv_sk_..." });
369
+
370
+ export function AgentChat() {
371
+ const [messages, setMessages] = useState<string[]>([]);
372
+ const [input, setInput] = useState("");
373
+ const sessionRef = useRef(null);
374
+
375
+ async function startChat() {
376
+ sessionRef.current = await client.startAgent("your-agent-id", { visitor: "user" });
377
+ }
378
+
379
+ async function sendMessage() {
380
+ if (!sessionRef.current) await startChat();
381
+
382
+ setMessages((prev) => [...prev, `You: ${input}`]);
383
+ const query = input;
384
+ setInput("");
385
+
386
+ for await (const event of client.streamAgent(sessionRef.current.chatbotId, {
387
+ sessionToken: sessionRef.current.sessionToken,
388
+ query,
389
+ })) {
390
+ if (event.type === "done") {
391
+ setMessages((prev) => [...prev, `Agent: ${event.output}`]);
392
+ }
393
+ }
394
+ }
395
+
396
+ return (
397
+ <div>
398
+ {messages.map((msg, i) => <p key={i}>{msg}</p>)}
399
+ <input value={input} onChange={(e) => setInput(e.target.value)} />
400
+ <button onClick={sendMessage}>Send</button>
401
+ </div>
402
+ );
403
+ }
404
+ ```
405
+
406
+ ### Node.js / Express API
407
+
408
+ ```js
409
+ import express from "express";
410
+ import { PromptevClient } from "@promptev/client";
411
+
412
+ const app = express();
413
+ const client = new PromptevClient({ projectKey: "pv_sk_..." });
414
+
415
+ app.post("/api/ask", express.json(), async (req, res) => {
416
+ const answer = await client.runPrompt("support-agent", req.body.question);
417
+ res.json({ answer });
418
+ });
419
+
420
+ app.listen(3000);
421
+ ```
422
+
423
+ ### Vanilla HTML
424
+
425
+ ```html
426
+ <script type="module">
427
+ import { PromptevClient } from "https://cdn.jsdelivr.net/npm/@promptev/client/dist/esm/index.js";
428
+
429
+ const client = new PromptevClient({ projectKey: "pv_sk_..." });
430
+
431
+ document.getElementById("ask-btn").addEventListener("click", async () => {
432
+ const answer = await client.runPrompt("support-agent", "What is the refund policy?");
433
+ document.getElementById("output").textContent = answer;
434
+ });
435
+ </script>
436
+ ```
113
437
 
114
438
  ## License
115
439
 
116
- This SDK is **commercial software** by Promptev Inc.
440
+ This SDK is commercial software by [Promptev Inc](https://promptev.ai).
117
441
 
118
- By using this package, you agree to the terms in [`LICENSE.txt`](./LICENSE.txt).
442
+ - Free tier use allowed
443
+ - Production use requires an active subscription
119
444
 
120
- ---
445
+ See [LICENSE](./LICENSE.txt) for full terms.
121
446
 
122
- ## Contact
447
+ ## Support
123
448
 
124
- - 🌐 [https://promptev.ai](https://promptev.ai)
125
- - 📧 support@promptev.ai
449
+ - Website: [promptev.ai](https://promptev.ai)
450
+ - Email: support@promptev.ai
@@ -1,30 +0,0 @@
1
- export interface PromptliyClientConfig {
2
- baseUrl?: string;
3
- projectKey: string;
4
- }
5
- export interface Prompt {
6
- prompt: string;
7
- variables: string[] | string | null;
8
- format(values?: Record<string, string>): string;
9
- }
10
- export declare class PromptliyClient {
11
- private client;
12
- private baseUrl;
13
- private projectKey;
14
- private promptCache;
15
- private refreshInterval;
16
- private cacheRefreshIntervalMs;
17
- private isReady;
18
- constructor(config: PromptliyClientConfig);
19
- private ensureReady;
20
- private startCacheRefresh;
21
- private refreshCachedPrompt;
22
- private createPromptObject;
23
- getPrompt(promptKey: string): Promise<Prompt> & {
24
- format: (values: Record<string, string>) => Promise<string>;
25
- };
26
- private fetchPrompt;
27
- private fetchPromptFromServer;
28
- dispose(): Promise<void>;
29
- }
30
- export default PromptliyClient;