@promptlycms/prompts 0.1.0 → 0.1.1-canary.5fc554c

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,16 +1,35 @@
1
1
  # @promptlycms/prompts
2
2
 
3
- TypeScript SDK for the [Promptly CMS](https://promptlycms.com) API. Fetch prompts at runtime, get typed template variables via codegen, and integrate with the [Vercel AI SDK](https://sdk.vercel.ai).
3
+ TypeScript SDK for the [Promptly CMS](https://promptlycms.com) API. Stop hardcoding prompts in your codebase manage them in a purpose-built CMS with versioning and instant publishing, then fetch them at runtime with full type safety.
4
+
5
+ - **Zero hardcoded prompts** — fetch prompts at runtime; update wording, models, and settings from the [CMS](https://promptlycms.com) without code changes or redeploys
6
+ - **Runtime client** — `getPrompt()` and `getPrompts()` with full TypeScript support
7
+ - **Codegen CLI** — generates typed template variables via declaration merging
8
+ - **AI SDK integration** — destructure directly into [Vercel AI SDK](https://ai-sdk.dev/) `generateText` / `streamText`
9
+ - **Any AI provider** — supports [all providers](https://ai-sdk.dev/providers/ai-sdk-providers#provider-support) supported by the Vercel AI SDK
10
+ - **Structured output** — Zod schemas built from CMS-defined output schemas
4
11
 
5
12
  ## Install
6
13
 
7
14
  ```bash
8
15
  npm install @promptlycms/prompts
9
- # or
10
- bun add @promptlycms/prompts
11
16
  ```
12
17
 
13
- Peer dependencies: `zod@^4`, `ai@^4 || ^5 || ^6`, `typescript@^5`
18
+ Peer dependencies:
19
+
20
+ ```bash
21
+ npm install zod ai typescript
22
+ ```
23
+
24
+ You'll also need at least one AI provider SDK for model resolution:
25
+
26
+ ```bash
27
+ # Install the provider(s) your prompts use
28
+ npm install @ai-sdk/anthropic # Claude models
29
+ npm install @ai-sdk/openai # GPT / o-series models
30
+ npm install @ai-sdk/google # Gemini models
31
+ npm install @ai-sdk/mistral # Mistral / Mixtral models
32
+ ```
14
33
 
15
34
  ## Quick start
16
35
 
@@ -27,7 +46,7 @@ PROMPTLY_API_KEY=pk_live_...
27
46
  npx promptly generate
28
47
  ```
29
48
 
30
- This fetches all your prompts from the API and generates a `promptly-env.d.ts` file in your project root. This gives you typed autocomplete on `userMessage()` variables for every prompt ID.
49
+ This fetches all your prompts from the API and generates a `promptly-env.d.ts` file in your project root with typed autocomplete for every prompt ID and its template variables.
31
50
 
32
51
  ```bash
33
52
  # Custom output path
@@ -40,9 +59,9 @@ npx promptly generate --api-key pk_live_...
40
59
  ### 3. Create a client
41
60
 
42
61
  ```typescript
43
- import { createPromptClient } from '@promptlycms/prompts';
62
+ import { createPromptlyClient } from '@promptlycms/prompts';
44
63
 
45
- const promptly = createPromptClient({
64
+ const promptly = createPromptlyClient({
46
65
  apiKey: process.env.PROMPTLY_API_KEY,
47
66
  });
48
67
  ```
@@ -52,13 +71,14 @@ const promptly = createPromptClient({
52
71
  ### Single prompt
53
72
 
54
73
  ```typescript
55
- const result = await promptly.get('JPxlUpstuhXB5OwOtKPpj');
74
+ const result = await promptly.getPrompt('JPxlUpstuhXB5OwOtKPpj');
56
75
 
57
76
  // Access prompt metadata
58
77
  result.promptId; // 'JPxlUpstuhXB5OwOtKPpj'
59
78
  result.promptName; // 'Review Prompt'
60
79
  result.systemMessage; // 'You are a helpful assistant.'
61
80
  result.temperature; // 0.7
81
+ result.model; // LanguageModel (auto-resolved from CMS config)
62
82
 
63
83
  // Interpolate template variables (typed if you ran codegen)
64
84
  const message = result.userMessage({
@@ -74,7 +94,7 @@ const template = String(result.userMessage);
74
94
  Fetch a specific version:
75
95
 
76
96
  ```typescript
77
- const result = await promptly.get('JPxlUpstuhXB5OwOtKPpj', {
97
+ const result = await promptly.getPrompt('JPxlUpstuhXB5OwOtKPpj', {
78
98
  version: '2.0.0',
79
99
  });
80
100
  ```
@@ -98,27 +118,52 @@ welcomePrompt.userMessage({ email: 'a@b.com', subject: 'Hi' });
98
118
 
99
119
  ## AI SDK integration
100
120
 
101
- `aiParams()` returns an object you can spread directly into Vercel AI SDK functions:
121
+ Destructure `getPrompt()` and pass the properties directly to Vercel AI SDK functions:
102
122
 
103
123
  ```typescript
104
124
  import { generateText } from 'ai';
105
- import { anthropic } from '@ai-sdk/anthropic';
106
125
 
107
- const params = await promptly.aiParams('my-prompt', {
108
- variables: { name: 'Alice', task: 'coding' },
109
- });
126
+ const { userMessage, systemMessage, temperature, model } = await promptly.getPrompt('my-prompt');
110
127
 
111
128
  const { text } = await generateText({
112
- model: anthropic('claude-sonnet-4-5-20250929'),
113
- ...params,
129
+ model,
130
+ system: systemMessage,
131
+ prompt: userMessage({ name: 'Alice', task: 'coding' }),
132
+ temperature,
114
133
  });
115
134
  ```
116
135
 
117
- If the prompt has a structured output schema defined in the CMS, `params.output` is automatically populated with a Zod schema wrapped in `Output.object()`.
136
+ The model configured in the CMS is auto-resolved to the correct AI SDK provider.
137
+
138
+ ## Model auto-detection
139
+
140
+ The SDK automatically resolves models configured in the CMS to the correct AI SDK provider based on the model name prefix:
141
+
142
+ | Prefix | Provider | Package |
143
+ |--------|----------|---------|
144
+ | `claude-*` | Anthropic | `@ai-sdk/anthropic` |
145
+ | `gpt-*`, `o1-*`, `o3-*`, `o4-*`, `chatgpt-*` | OpenAI | `@ai-sdk/openai` |
146
+ | `gemini-*` | Google | `@ai-sdk/google` |
147
+ | `mistral-*`, `mixtral-*`, `codestral-*` | Mistral | `@ai-sdk/mistral` |
148
+
149
+ CMS model display names (e.g. `claude-sonnet-4.5`) are mapped to their full API model IDs automatically.
150
+
151
+ ### Custom model resolver
152
+
153
+ If you need full control over model resolution, pass a `model` function:
154
+
155
+ ```typescript
156
+ import { anthropic } from '@ai-sdk/anthropic';
157
+
158
+ const promptly = createPromptlyClient({
159
+ apiKey: process.env.PROMPTLY_API_KEY,
160
+ model: (modelId) => anthropic('claude-sonnet-4-5-20250929'),
161
+ });
162
+ ```
118
163
 
119
164
  ## Type generation
120
165
 
121
- Running `npx promptly generate` creates a `promptly-env.d.ts` file that uses [declaration merging](https://www.typescriptlang.org/docs/handbook/declaration-merging.html) to narrow types:
166
+ Running `npx promptly generate` creates a `promptly-env.d.ts` file that uses [declaration merging](https://www.typescriptlang.org/docs/handbook/declaration-merging.html) to type your prompts:
122
167
 
123
168
  ```typescript
124
169
  // Auto-generated by @promptlycms/prompts — do not edit
@@ -127,26 +172,24 @@ import '@promptlycms/prompts';
127
172
  declare module '@promptlycms/prompts' {
128
173
  interface PromptVariableMap {
129
174
  'JPxlUpstuhXB5OwOtKPpj': {
130
- pickupLocation: string;
131
- items: string;
175
+ [V in 'latest' | '2.0.0' | '1.0.0']: {
176
+ pickupLocation: string;
177
+ items: string;
178
+ };
132
179
  };
133
180
  'abc123': {
134
- email: string;
135
- subject: string;
181
+ [V in 'latest' | '1.0.0']: {
182
+ email: string;
183
+ subject: string;
184
+ };
136
185
  };
137
186
  }
138
187
  }
139
188
  ```
140
189
 
141
- With this file present, `get()` and `getPrompts()` return typed `userMessage` functions with autocomplete. Unknown prompt IDs fall back to `Record<string, string>`.
142
-
143
- Add the generated file to version control so types are available without running codegen in CI:
190
+ With this file present, `getPrompt()` and `getPrompts()` return typed `userMessage` functions with autocomplete. Unknown prompt IDs fall back to `Record<string, string>`.
144
191
 
145
- ```bash
146
- # .gitignore — do NOT ignore promptly-env.d.ts
147
- ```
148
-
149
- Re-run `npx promptly generate` whenever you add, remove, or rename template variables in the CMS.
192
+ Add the generated file to version control so types are available without running codegen in CI. Re-run `npx promptly generate` whenever you add, remove, or rename template variables in the CMS.
150
193
 
151
194
  ## Error handling
152
195
 
@@ -156,7 +199,7 @@ All API errors throw `PromptlyError`:
156
199
  import { PromptlyError } from '@promptlycms/prompts';
157
200
 
158
201
  try {
159
- await promptly.get('nonexistent');
202
+ await promptly.getPrompt('nonexistent');
160
203
  } catch (err) {
161
204
  if (err instanceof PromptlyError) {
162
205
  err.code; // 'NOT_FOUND' | 'INVALID_KEY' | 'USAGE_LIMIT_EXCEEDED' | ...
@@ -170,26 +213,38 @@ try {
170
213
 
171
214
  ## API reference
172
215
 
173
- ### `createPromptClient(config)`
216
+ ### `createPromptlyClient(config?)`
174
217
 
175
218
  | Option | Type | Required | Description |
176
219
  |-----------|----------|----------|----------------------------------------------------|
177
- | `apiKey` | `string` | Yes | Your Promptly CMS API key |
220
+ | `apiKey` | `string` | No | Your Promptly API key (defaults to `PROMPTLY_API_KEY` env var) |
178
221
  | `baseUrl` | `string` | No | API base URL (default: `https://api.promptlycms.com`) |
222
+ | `model` | `(modelId: string) => LanguageModel` | No | Custom model resolver — overrides auto-detection |
179
223
 
180
- Returns a `PromptClient` with `get()`, `getPrompts()`, and `aiParams()` methods.
224
+ Returns a `PromptlyClient` with `getPrompt()` and `getPrompts()` methods.
181
225
 
182
- ### `client.get(promptId, options?)`
226
+ ### `client.getPrompt(promptId, options?)`
183
227
 
184
228
  Fetch a single prompt. Returns `PromptResult` with typed `userMessage` when codegen types are present.
185
229
 
230
+ | Option | Type | Description |
231
+ |-----------|----------|----------------------|
232
+ | `version` | `string` | Specific version to fetch (default: latest) |
233
+
186
234
  ### `client.getPrompts(entries)`
187
235
 
188
236
  Fetch multiple prompts in parallel. Accepts `PromptRequest[]` and returns a typed tuple matching the input order.
189
237
 
190
- ### `client.aiParams(promptId, options?)`
238
+ ### `@promptlycms/prompts/schema`
239
+
240
+ Subpath export for working with Zod schemas from CMS schema fields:
241
+
242
+ ```typescript
243
+ import { buildZodSchema, schemaFieldsToZodSource } from '@promptlycms/prompts/schema';
244
+ ```
191
245
 
192
- Fetch a prompt and return params ready to spread into AI SDK functions (`system`, `prompt`, `temperature`, and optionally `output`).
246
+ - `buildZodSchema(fields)` builds a Zod object schema at runtime from `SchemaField[]`
247
+ - `schemaFieldsToZodSource(fields)` — generates Zod source code as a string for codegen
193
248
 
194
249
  ### CLI: `npx promptly generate`
195
250
 
@@ -197,3 +252,7 @@ Fetch a prompt and return params ready to spread into AI SDK functions (`system`
197
252
  |-------------|-------|------------------------------------------------------|
198
253
  | `--api-key` | | API key (defaults to `PROMPTLY_API_KEY` env var) |
199
254
  | `--output` | `-o` | Output path (default: `./promptly-env.d.ts`) |
255
+
256
+ ## License
257
+
258
+ MIT
package/dist/cli.js CHANGED
@@ -41,6 +41,38 @@ var createErrorFromResponse = async (response) => {
41
41
 
42
42
  // src/cli/generate.ts
43
43
  import { writeFile } from "fs/promises";
44
+ import { createRequire } from "module";
45
+
46
+ // src/client.ts
47
+ var PROVIDER_PREFIXES = [
48
+ ["claude", "anthropic"],
49
+ ["gpt", "openai"],
50
+ ["o1", "openai"],
51
+ ["o3", "openai"],
52
+ ["o4", "openai"],
53
+ ["chatgpt", "openai"],
54
+ ["gemini", "google"],
55
+ ["mistral", "mistral"],
56
+ ["mixtral", "mistral"],
57
+ ["codestral", "mistral"]
58
+ ];
59
+ var detectProviderName = (modelId) => {
60
+ const lower = modelId.toLowerCase();
61
+ for (const [prefix, provider] of PROVIDER_PREFIXES) {
62
+ if (lower.startsWith(prefix)) {
63
+ return provider;
64
+ }
65
+ }
66
+ return void 0;
67
+ };
68
+
69
+ // src/cli/generate.ts
70
+ var PROVIDER_PACKAGES = {
71
+ anthropic: "@ai-sdk/anthropic",
72
+ openai: "@ai-sdk/openai",
73
+ google: "@ai-sdk/google",
74
+ mistral: "@ai-sdk/mistral"
75
+ };
44
76
  var DEFAULT_BASE_URL = "https://api.promptlycms.com";
45
77
  var extractTemplateVariables = (text) => {
46
78
  const matches = text.matchAll(/\$\{(\w+)\}/g);
@@ -215,6 +247,36 @@ var generateTypeDeclaration = (prompts) => {
215
247
  lines.push("");
216
248
  return lines.join("\n");
217
249
  };
250
+ var warnMissingProviders = (prompts) => {
251
+ const require2 = createRequire(import.meta.url);
252
+ const needed = /* @__PURE__ */ new Map();
253
+ for (const prompt of prompts) {
254
+ const provider = detectProviderName(prompt.config.model);
255
+ if (!provider) {
256
+ continue;
257
+ }
258
+ const pkg = PROVIDER_PACKAGES[provider];
259
+ if (!pkg) {
260
+ continue;
261
+ }
262
+ const existing = needed.get(pkg);
263
+ if (existing) {
264
+ existing.push(prompt.promptName);
265
+ } else {
266
+ needed.set(pkg, [prompt.promptName]);
267
+ }
268
+ }
269
+ for (const [pkg, promptNames] of needed) {
270
+ try {
271
+ require2.resolve(pkg);
272
+ } catch {
273
+ const names = promptNames.map((n) => `"${n}"`).join(", ");
274
+ console.warn(
275
+ ` Warning: ${names} requires ${pkg} \u2014 install it: npm install ${pkg}`
276
+ );
277
+ }
278
+ }
279
+ };
218
280
  var generate = async (apiKey, outputPath, baseUrl) => {
219
281
  const prompts = await fetchAllPrompts(apiKey, baseUrl);
220
282
  if (prompts.length === 0) {
@@ -222,6 +284,7 @@ var generate = async (apiKey, outputPath, baseUrl) => {
222
284
  return;
223
285
  }
224
286
  console.log(` Found ${prompts.length} prompt(s)`);
287
+ warnMissingProviders(prompts);
225
288
  const content = generateTypeDeclaration(prompts);
226
289
  await writeFile(outputPath, content, "utf-8");
227
290
  console.log(` Generated ${outputPath}`);