@positronic/template-new-project 0.0.74 → 0.0.76

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -53,10 +53,10 @@ module.exports = {
53
53
  ],
54
54
  setup: async ctx => {
55
55
  const devRootPath = process.env.POSITRONIC_LOCAL_PATH;
56
- let coreVersion = '^0.0.74';
57
- let cloudflareVersion = '^0.0.74';
58
- let clientVercelVersion = '^0.0.74';
59
- let genUIComponentsVersion = '^0.0.74';
56
+ let coreVersion = '^0.0.76';
57
+ let cloudflareVersion = '^0.0.76';
58
+ let clientVercelVersion = '^0.0.76';
59
+ let genUIComponentsVersion = '^0.0.76';
60
60
 
61
61
  // Map backend selection to package names
62
62
  const backendPackageMap = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@positronic/template-new-project",
3
- "version": "0.0.74",
3
+ "version": "0.0.76",
4
4
  "publishConfig": {
5
5
  "access": "public"
6
6
  },
@@ -12,6 +12,7 @@ This is a Positronic project - an AI-powered framework for building and running
12
12
  - **`/webhooks`** - Webhook definitions for external integrations (auto-discovered)
13
13
  - **`/resources`** - Files and documents that brains can access via the resource system
14
14
  - **`/tests`** - Test files for brains (kept separate to avoid deployment issues)
15
+ - **`/utils`** - Shared utilities (e.g., `bottleneck` for rate limiting)
15
16
  - **`/docs`** - Documentation including brain testing guide
16
17
  - **`/runner.ts`** - The main entry point for running brains locally
17
18
  - **`/positronic.config.json`** - Project configuration
package/template/_env CHANGED
@@ -33,4 +33,4 @@ R2_BUCKET_NAME=<%= projectName %>
33
33
  # 4. Copy your Account ID from the right sidebar on any Cloudflare page
34
34
  #
35
35
  # CLOUDFLARE_API_TOKEN=
36
- # CLOUDFLARE_ACCOUNT_ID=
36
+ # CLOUDFLARE_ACCOUNT_ID=
package/template/brain.ts CHANGED
@@ -18,6 +18,10 @@ import { components } from './components/index.js';
18
18
  * - consoleLog: Log messages for debugging
19
19
  * - done: Complete the agent and return a result
20
20
  *
21
+ * Tool configuration:
22
+ * - `withTools({ ... })` — replaces the default tools entirely
23
+ * - `withExtraTools({ ... })` — adds tools alongside the defaults
24
+ *
21
25
  * To add services (e.g., Slack, Gmail, database clients):
22
26
  *
23
27
  * ```typescript
@@ -1,6 +1,10 @@
1
1
  import { z } from 'zod';
2
2
  import { brain } from '../brain.js';
3
3
 
4
+ // This brain uses the AI client configured in runner.ts.
5
+ // By default it uses Google Gemini (requires GOOGLE_GENERATIVE_AI_API_KEY in .env).
6
+ // See runner.ts to switch to Anthropic, OpenAI, or any other Vercel AI SDK provider.
7
+
4
8
  const exampleBrain = brain('example')
5
9
  .step('Start', ({ state }) => ({
6
10
  ...state,
@@ -773,6 +773,39 @@ export const brain = createBrain({
773
773
 
774
774
  All brains created with this factory will have access to the configured services, tools, components, and store.
775
775
 
776
+ #### Typing Initial State and Options
777
+
778
+ By default, the first `.step()` establishes the state type and inference flows from there. But when a brain receives its initial state from outside — via `initialState` in `.run()`, from the CLI, or from a parent brain — the first step's `state` parameter is untyped.
779
+
780
+ You can provide type parameters to `brain()` to type the initial state and options:
781
+
782
+ ```typescript
783
+ // brain<TOptions, TState>(title)
784
+ // Both parameters are optional and default to {} and object respectively.
785
+
786
+ // Type just the initial state (pass {} for options)
787
+ const myBrain = brain<{}, { userId: string; email: string }>('process-user')
788
+ .step('Greet', ({ state }) => {
789
+ // state.userId and state.email are correctly typed
790
+ return { ...state, greeting: 'Hello ' + state.email };
791
+ });
792
+
793
+ // Type both options and initial state
794
+ const myBrain = brain<{ verbose: boolean }, { count: number }>('counter')
795
+ .step('Process', ({ state, options }) => {
796
+ if (options.verbose) console.log('Count:', state.count);
797
+ return { ...state, doubled: state.count * 2 };
798
+ });
799
+ ```
800
+
801
+ This is useful in several situations:
802
+
803
+ - **Brains run with `initialState`**: When calling `.run({ initialState: { ... } })` or passing initial state from the CLI
804
+ - **Sub-brains**: When a parent brain provides initial state via `.brain()` or iterate's `initialState` option
805
+ - **Any brain where the first step receives rather than creates state**
806
+
807
+ Existing `brain('title')` calls without type parameters continue to work unchanged.
808
+
776
809
  ## Running Brains
777
810
 
778
811
  ### Basic Execution
@@ -1042,12 +1075,16 @@ Extract prompts to separate files when:
1042
1075
  - The prompt might be reused in other brains
1043
1076
  - You want to test the prompt logic separately
1044
1077
 
1045
- ## Batch Prompt Mode
1078
+ ## Iterating Over Items
1046
1079
 
1047
- When you need to run the same prompt over multiple items, use batch mode with the `over` option:
1080
+ When you need to run the same prompt, brain, or agent over multiple items, use the `over` option.
1081
+
1082
+ ### Prompt Iterate
1083
+
1084
+ Run the same prompt once per item in a list:
1048
1085
 
1049
1086
  ```typescript
1050
- brain('Batch Processor')
1087
+ brain('Item Processor')
1051
1088
  .step('Initialize', () => ({
1052
1089
  items: [
1053
1090
  { id: 1, title: 'First item' },
@@ -1062,31 +1099,132 @@ brain('Batch Processor')
1062
1099
  name: 'summaries' as const
1063
1100
  }
1064
1101
  }, {
1065
- over: (state) => state.items, // Array to iterate over
1066
- concurrency: 10, // Parallel requests (default: 10)
1067
- error: (item, error) => ({ summary: 'Failed to summarize' }) // Fallback on error
1102
+ over: ({ state }) => state.items,
1103
+ error: (item, error) => ({ summary: 'Failed to summarize' })
1068
1104
  })
1069
1105
  .step('Process Results', ({ state }) => ({
1070
1106
  ...state,
1071
- // summaries is [item, response][] - array of tuples
1072
- processedSummaries: state.summaries.map(([item, response]) => ({
1107
+ // summaries is an IterateResult — use .values, .items, .entries, .filter(), .map()
1108
+ processedSummaries: state.summaries.map((item, response) => ({
1073
1109
  id: item.id,
1074
1110
  summary: response.summary
1075
1111
  }))
1076
1112
  }));
1077
1113
  ```
1078
1114
 
1079
- ### Batch Options
1115
+ Prompt iterate also supports per-step `client` overrides (see Prompt Steps above), so you can use a different model for processing.
1080
1116
 
1081
- - `over: (state) => T[]` - Function returning the array to iterate over
1082
- - `concurrency: number` - Maximum number of items processed in parallel (default: 10)
1083
- - `error: (item, error) => Response` - Fallback function when a request fails
1117
+ ### Brain Iterate
1084
1118
 
1085
- Batch prompts also support per-step `client` overrides (see Prompt Steps above), so you can use a different model for batch processing.
1119
+ Run a nested brain once per item:
1120
+
1121
+ ```typescript
1122
+ const processBrain = brain('Process Item')
1123
+ .step('Transform', ({ state }) => ({
1124
+ ...state,
1125
+ result: state.value * 2,
1126
+ }));
1127
+
1128
+ brain('Process All Items')
1129
+ .step('Initialize', () => ({
1130
+ items: [{ value: 1 }, { value: 2 }, { value: 3 }]
1131
+ }))
1132
+ .brain('Process Each', processBrain, {
1133
+ over: ({ state }) => state.items,
1134
+ initialState: (item) => ({ value: item.value }),
1135
+ outputKey: 'results' as const,
1136
+ error: (item, error) => ({ value: item.value, failed: true }),
1137
+ })
1138
+ .step('Use Results', ({ state }) => ({
1139
+ ...state,
1140
+ // results is an IterateResult — use .values to get just the results
1141
+ totals: state.results.values.map(result => result.result),
1142
+ }));
1143
+ ```
1144
+
1145
+ ### Agent Iterate
1146
+
1147
+ Run an agent config once per item. The `configFn` receives the item as its first argument:
1148
+
1149
+ ```typescript
1150
+ brain('Research Topics')
1151
+ .step('Initialize', () => ({
1152
+ topics: [{ name: 'AI' }, { name: 'Robotics' }]
1153
+ }))
1154
+ .brain('Research Each', (item, { state, tools }) => ({
1155
+ system: 'You are a research assistant.',
1156
+ prompt: `Research this topic: <%= '${item.name}' %>`,
1157
+ tools: {
1158
+ search: tools.search,
1159
+ },
1160
+ outputSchema: {
1161
+ schema: z.object({ summary: z.string() }),
1162
+ name: 'research' as const,
1163
+ },
1164
+ }), {
1165
+ over: ({ state }) => state.topics,
1166
+ outputKey: 'results' as const,
1167
+ })
1168
+ .step('Use Results', ({ state }) => ({
1169
+ ...state,
1170
+ // results is an IterateResult — use .values to get just the results
1171
+ summaries: state.results.values.map(result => result.summary),
1172
+ }));
1173
+ ```
1174
+
1175
+ ### Iterate Options
1176
+
1177
+ All iterate variants share these options:
1178
+
1179
+ - `over: (context) => T[] | Promise<T[]>` - Function returning the array to iterate over. Receives the full step context (`{ state, options, client, resources, services, ... }`) — the same context object that step actions receive. Most commonly you'll destructure just `{ state }`, but you can access options, services, or any other context field. Can be async.
1180
+ - `error: (item, error) => Result | null` - Fallback when an item fails. Return `null` to skip the item entirely.
1181
+
1182
+ Brain and agent iterate also require:
1183
+
1184
+ - `outputKey: string` - Key under which results are stored in state (use `as const` for type inference)
1185
+
1186
+ Brain iterate additionally requires:
1187
+
1188
+ - `initialState: (item, outerState) => State` - Function to create the inner brain's initial state from each item
1189
+
1190
+ #### Accessing options and services in `over`
1191
+
1192
+ Since `over` receives the full step context, you can use options or services to determine which items to iterate over:
1193
+
1194
+ ```typescript
1195
+ brain('Dynamic Processor')
1196
+ .withOptionsSchema(z.object({ category: z.string() }))
1197
+ .step('Load items', () => ({
1198
+ items: [
1199
+ { id: 1, category: 'a' },
1200
+ { id: 2, category: 'b' },
1201
+ { id: 3, category: 'a' },
1202
+ ]
1203
+ }))
1204
+ .prompt('Process', {
1205
+ template: (item) => `Process item <%= '${item.id}' %>`,
1206
+ outputSchema: {
1207
+ schema: z.object({ result: z.string() }),
1208
+ name: 'results' as const,
1209
+ },
1210
+ }, {
1211
+ over: ({ state, options }) => state.items.filter(i => i.category === options.category),
1212
+ })
1213
+ ```
1086
1214
 
1087
1215
  ### Result Format
1088
1216
 
1089
- The result is stored as an array of `[item, response]` tuples, preserving the relationship between each input item and its generated response.
1217
+ By default, results are stored as an `IterateResult` a collection that wraps `[item, result]` pairs and provides a richer API than raw tuples:
1218
+
1219
+ - **`.items`** — array of all input items
1220
+ - **`.values`** — array of all results
1221
+ - **`.entries`** — array of `[item, result]` tuples
1222
+ - **`.length`** — number of results
1223
+ - **`.filter((item, result) => boolean)`** — returns a new `IterateResult` with only matching pairs
1224
+ - **`.map((item, result) => value)`** — maps over both item and result, returns a plain array
1225
+ - **`for...of`** — iterates as `[item, result]` tuples (backward compatible with destructuring)
1226
+
1227
+ For prompts, the key comes from `outputSchema.name`. For brain and agent iterate, it comes from `outputKey`.
1090
1228
 
1091
1229
  ## Agent Steps
1092
1230
 
@@ -6,6 +6,40 @@ This document contains helpful tips and patterns for AI agents working with Posi
6
6
 
7
7
  Run `npm run typecheck` frequently as you make changes to ensure your TypeScript code compiles correctly. This will catch type errors early and help maintain code quality.
8
8
 
9
+ ## Prefer Type Inference
10
+
11
+ Never add explicit type annotations unless `npm run typecheck` tells you to. TypeScript's inference is very strong — especially within the Brain DSL chain — and explicit types add noise without value.
12
+
13
+ Start by writing code with no annotations. If `typecheck` fails, add the minimum annotation or cast needed to fix it.
14
+
15
+ ```typescript
16
+ // ❌ DON'T DO THIS - explicit types on callback parameters
17
+ .filter(([_, result]: [any, any]) => result !== null)
18
+ .map((pr: any) => pr.author)
19
+ .map((n: string) => n.trim())
20
+ error: (thread: any, error: any) => { ... }
21
+
22
+ // ✅ DO THIS - let inference work
23
+ .filter(([_, result]) => result !== null)
24
+ .map(pr => pr.author)
25
+ .map(n => n.trim())
26
+ error: (thread, error) => { ... }
27
+ ```
28
+
29
+ This also applies to variable declarations and function parameters:
30
+
31
+ ```typescript
32
+ // ❌ DON'T DO THIS
33
+ const names: string[] = options.notify.split(',');
34
+ template: (state: any) => { ... }
35
+
36
+ // ✅ DO THIS
37
+ const names = options.notify.split(',');
38
+ template: (state) => { ... }
39
+ ```
40
+
41
+ If you genuinely need a cast to fix a type error, prefer the narrowest cast possible and add it only after seeing the error.
42
+
9
43
  ## Running the Development Server
10
44
 
11
45
  When you need to run a development server, use the `--log-file` option to capture server output. **Important**: Always place the server log file in the `/tmp` directory so it gets cleaned up automatically by the operating system.
@@ -125,6 +159,145 @@ Key rules:
125
159
  - Optional title as second argument: `.guard(predicate, 'Check condition')`
126
160
  - See `/docs/brain-dsl-guide.md` for more details
127
161
 
162
+ **Guards vs exceptions**: Use guards for conditions that are an expected part of the brain's flow — like "no audio URL was found" after a discovery step. Guards are documented in the DSL and show up when viewing the brain's steps. Reserve `throw` for truly unexpected errors. If a missing value is a normal possible outcome of a previous step, handle it with a guard, not an exception.
163
+
164
+ ```typescript
165
+ // ❌ DON'T DO THIS - throwing for an expected outcome
166
+ .step('Transcribe', async ({ state }) => {
167
+ if (!state.discovery.audioUrl) {
168
+ throw new Error('No audio URL found');
169
+ }
170
+ const transcript = await whisper.transcribe(state.discovery.audioUrl);
171
+ return { ...state, transcript };
172
+ })
173
+
174
+ // ✅ DO THIS - guard for expected flow, keep the step focused
175
+ .guard(({ state: { discovery } }) => !!discovery.audioUrl, 'Has audio URL')
176
+ .step('Transcribe', async ({ state: { discovery } }) => {
177
+ const transcript = await whisper.transcribe(discovery.audioUrl!);
178
+ return { ...state, transcript };
179
+ })
180
+ ```
181
+
182
+ ## Destructure State in Steps
183
+
184
+ Always destructure properties off of `state` rather than accessing them through `state.property`. This applies to steps, prompt templates, brain callbacks, and guards — anywhere state is accessed.
185
+
186
+ ```typescript
187
+ // ❌ DON'T DO THIS - accessing properties through state
188
+ .brain('Find data', ({ state }) => ({
189
+ prompt: `Process <%= '${state.user.name}' %> from <%= '${state.user.email}' %>`,
190
+ }))
191
+
192
+ // ✅ DO THIS - destructure in the parameter when state itself isn't needed
193
+ .brain('Find data', ({ state: { user } }) => ({
194
+ prompt: `Process <%= '${user.name}' %> from <%= '${user.email}' %>`,
195
+ }))
196
+ ```
197
+
198
+ The same applies to prompt templates:
199
+
200
+ ```typescript
201
+ // ❌ DON'T DO THIS
202
+ template: (state) => `Hello <%= '${state.user.name}' %>, your order <%= '${state.order.id}' %> is ready.`,
203
+
204
+ // ✅ DO THIS
205
+ template: ({ user, order }) => `Hello <%= '${user.name}' %>, your order <%= '${order.id}' %> is ready.`,
206
+ ```
207
+
208
+ When you still need `state` (e.g. for `...state` in the return value), destructure in the function body instead:
209
+
210
+ ```typescript
211
+ // ❌ DON'T DO THIS
212
+ .step('Format', ({ state }) => ({
213
+ ...state,
214
+ summary: `<%= '${state.title}' %> by <%= '${state.author}' %>`,
215
+ }))
216
+
217
+ // ✅ DO THIS - destructure in the body when you also need ...state
218
+ .step('Format', ({ state }) => {
219
+ const { title, author } = state;
220
+ return {
221
+ ...state,
222
+ summary: `<%= '${title}' %> by <%= '${author}' %>`,
223
+ };
224
+ })
225
+ ```
226
+
227
+ ## State Shape
228
+
229
+ ### Each step should have one clear purpose, and add one thing to state
230
+
231
+ Don't let steps do multiple unrelated things. Each step should have a clear name that describes its single purpose, and it should add one key to state. If a step produces multiple data points, namespace them under a single key.
232
+
233
+ ```typescript
234
+ // ❌ DON'T DO THIS - step does too much and adds multiple keys
235
+ .step('Process', async ({ state }) => ({
236
+ ...state,
237
+ transcript: await transcribe(state.audioUrl),
238
+ episodeTitle: state.discovery.episodeTitle,
239
+ podcastName: state.podcast.source,
240
+ podcastUrl: state.podcast.url,
241
+ }))
242
+
243
+ // ✅ DO THIS - step has one purpose, adds one thing
244
+ .step('Transcribe', async ({ state }) => {
245
+ const { discovery } = state;
246
+ const transcript = await whisper.transcribe(discovery.audioUrl!);
247
+ return { ...state, transcript };
248
+ })
249
+ ```
250
+
251
+ Previous steps already namespace their results on state (e.g. `state.discovery`, `state.podcast`). Don't copy their fields to the top level — it duplicates data and makes it unclear which version is canonical.
252
+
253
+ ### Reshape state at phase boundaries
254
+
255
+ As steps build up state, it can accumulate intermediate artifacts. At major phase transitions in a brain — like going from "gathering data" to "analyzing it" — reshape state to a clean form for the next phase. Return only what the next phase needs instead of spreading everything forward.
256
+
257
+ The smell to watch for: if you're reading a brain and can't quickly answer "what's the canonical version of X on state?" then state needs reshaping.
258
+
259
+ ```typescript
260
+ // After a data-gathering phase, clean up for analysis
261
+ .step('Prepare for analysis', ({ state }) => {
262
+ const { discovery, transcript, podcast } = state;
263
+ // Only carry forward what the analysis phase needs
264
+ return { podcast, discovery, transcript };
265
+ })
266
+ ```
267
+
268
+ ## Iterate Results
269
+
270
+ Iterate steps produce an `IterateResult` — use its properties and methods to access results cleanly:
271
+
272
+ ```typescript
273
+ // Access just the results
274
+ state.results.values.map(r => r.summary)
275
+
276
+ // Access just the input items
277
+ state.results.items
278
+
279
+ // Filter by both item and result
280
+ state.results.filter((item, r) => r.isImportant).items
281
+
282
+ // Map over both item and result
283
+ state.results.map((item, r) => ({ id: item.id, summary: r.summary }))
284
+
285
+ // Tuple destructuring still works (backward compatible)
286
+ for (const [item, result] of state.results) { ... }
287
+ ```
288
+
289
+ Use `.values` for simple extraction, `.filter()` for correlated filtering, and `.map()` when you need both item and result:
290
+
291
+ ```typescript
292
+ .step('Process', ({ state }) => ({
293
+ ...state,
294
+ important: state.results.filter((item, r) => r.score > 0.8).items,
295
+ summaries: state.results.values.map(r => r.summary),
296
+ }))
297
+ ```
298
+
299
+ **Name the `outputKey` after the content.** If results contain analyses, use `outputKey: 'analyses' as const`, not `outputKey: 'processedItems' as const`.
300
+
128
301
  ## Brain DSL Type Inference
129
302
 
130
303
  The Brain DSL has very strong type inference capabilities. **Important**: You should NOT explicitly specify types on the state object as it flows through steps. The types are automatically inferred from the previous step.
@@ -287,6 +460,86 @@ export const brain = createBrain({
287
460
 
288
461
  This keeps your service implementations separate from your brain logic and makes them easier to test and maintain.
289
462
 
463
+ ## Rate Limiting with bottleneck
464
+
465
+ Most external APIs have rate limits. The `utils/bottleneck.ts` utility creates a simple rate limiter you can wrap around any async call.
466
+
467
+ ### Basic Usage
468
+
469
+ ```typescript
470
+ import { bottleneck } from '../utils/bottleneck.js';
471
+
472
+ // Create a limiter — exactly one rate unit is required
473
+ const limit = bottleneck({ rpm: 60 }); // 60 requests per minute
474
+
475
+ // Wrap any async call with the limiter
476
+ const result = await limit(() => api.fetchData(id));
477
+ ```
478
+
479
+ ### Config Options
480
+
481
+ Pass exactly one of these (TypeScript enforces this):
482
+
483
+ - `rps` — requests per second
484
+ - `rpm` — requests per minute
485
+ - `rph` — requests per hour
486
+ - `rpd` — requests per day
487
+
488
+ ```typescript
489
+ const fast = bottleneck({ rps: 10 }); // 10 per second
490
+ const slow = bottleneck({ rpd: 1000 }); // 1000 per day
491
+ ```
492
+
493
+ ### Wrapping a Service
494
+
495
+ Create one limiter per API and wrap all calls through it:
496
+
497
+ ```typescript
498
+ // services/github.ts
499
+ import { bottleneck } from '../utils/bottleneck.js';
500
+
501
+ const limit = bottleneck({ rps: 10 });
502
+
503
+ async function getRepo(owner: string, repo: string) {
504
+ return limit(() =>
505
+ fetch('https://api.github.com/repos/' + owner + '/' + repo)
506
+ .then(r => r.json())
507
+ );
508
+ }
509
+
510
+ async function listIssues(owner: string, repo: string) {
511
+ return limit(() =>
512
+ fetch('https://api.github.com/repos/' + owner + '/' + repo + '/issues')
513
+ .then(r => r.json())
514
+ );
515
+ }
516
+
517
+ export default { getRepo, listIssues };
518
+ ```
519
+
520
+ ### Using with Iterate
521
+
522
+ When iterating over items, wrap the API call inside the step callback:
523
+
524
+ ```typescript
525
+ import { bottleneck } from '../utils/bottleneck.js';
526
+
527
+ const limit = bottleneck({ rpm: 60 });
528
+
529
+ brain('process-items')
530
+ .step('Init', ({ state }) => ({ items: state.items }))
531
+ .step('Fetch details', async ({ state }) => {
532
+ const details = await Promise.all(
533
+ state.items.map(item => limit(() => api.getDetail(item.id)))
534
+ );
535
+ return { ...state, details };
536
+ });
537
+ ```
538
+
539
+ ### When Creating Services
540
+
541
+ When building a new service that wraps an external API, research the API's rate limits and add a bottleneck upfront. It's much easier to add rate limiting from the start than to debug 429 errors later.
542
+
290
543
  ## Brain Options Usage
291
544
 
292
545
  When creating brains that need runtime configuration, use the options schema pattern:
@@ -309,14 +562,14 @@ const alertBrain = brain('Alert System')
309
562
  }))
310
563
  .step('Send Alerts', async ({ state, options, slack }) => {
311
564
  if (!state.shouldAlert) return state;
312
-
565
+
313
566
  await slack.post(options.slackChannel, state.message);
314
-
567
+
315
568
  if (options.emailEnabled === 'true') {
316
569
  // Note: CLI options come as strings
317
570
  await email.send('admin@example.com', state.message);
318
571
  }
319
-
572
+
320
573
  return { ...state, alerted: true };
321
574
  });
322
575
  ```
@@ -528,4 +781,4 @@ export default feedbackBrain;
528
781
  - Let TypeScript infer types - don't add explicit type annotations
529
782
  - Don't catch errors unless it's part of the workflow logic
530
783
  - Run `npm run typecheck` frequently to catch type errors early
531
- - Stop the server when done: `px server -k` (default server) or `kill $(cat .positronic-server.pid)`
784
+ - Stop the server when done: `px server -k` (default server) or `kill $(cat .positronic-server.pid)`
@@ -5,6 +5,39 @@ import { google } from '@ai-sdk/google';
5
5
  /**
6
6
  * The BrainRunner executes brains with the configured client and adapters.
7
7
  *
8
+ * ## AI Provider Setup
9
+ *
10
+ * By default this uses Google Gemini. Set GOOGLE_GENERATIVE_AI_API_KEY in
11
+ * your .env file (get a key at https://aistudio.google.com/apikey).
12
+ *
13
+ * To switch to a different provider, install its Vercel AI SDK adapter
14
+ * and swap the model below:
15
+ *
16
+ * **Anthropic (Claude):**
17
+ * ```bash
18
+ * npm install @ai-sdk/anthropic
19
+ * ```
20
+ * ```typescript
21
+ * import { anthropic } from '@ai-sdk/anthropic';
22
+ * const client = new VercelClient(anthropic('claude-sonnet-4-5-20250929'));
23
+ * ```
24
+ * Then set ANTHROPIC_API_KEY in your .env file.
25
+ *
26
+ * **OpenAI:**
27
+ * ```bash
28
+ * npm install @ai-sdk/openai
29
+ * ```
30
+ * ```typescript
31
+ * import { openai } from '@ai-sdk/openai';
32
+ * const client = new VercelClient(openai('gpt-4o'));
33
+ * ```
34
+ * Then set OPENAI_API_KEY in your .env file.
35
+ *
36
+ * Any provider supported by the Vercel AI SDK works — just install the
37
+ * package and pass the model to VercelClient.
38
+ *
39
+ * ## Memory
40
+ *
8
41
  * To add memory (automatic conversation indexing with Mem0):
9
42
  *
10
43
  * ```typescript
@@ -23,7 +56,6 @@ import { google } from '@ai-sdk/google';
23
56
  * });
24
57
  * ```
25
58
  *
26
- * The adapter automatically indexes all agent conversations to memory.
27
59
  * See docs/memory-guide.md for more details.
28
60
  */
29
61
  const client = new VercelClient(google('gemini-3-pro-preview'));
@@ -0,0 +1,26 @@
1
+ type BottleneckConfig =
2
+ | { rps: number; rpm?: never; rph?: never; rpd?: never }
3
+ | { rpm: number; rps?: never; rph?: never; rpd?: never }
4
+ | { rph: number; rps?: never; rpm?: never; rpd?: never }
5
+ | { rpd: number; rps?: never; rpm?: never; rph?: never };
6
+
7
+ export function bottleneck(config: BottleneckConfig) {
8
+ const interval = configToInterval(config);
9
+ let next = 0;
10
+
11
+ return async <T>(fn: () => Promise<T>): Promise<T> => {
12
+ const now = Date.now();
13
+ const delay = Math.max(0, next - now);
14
+ next = Math.max(now, next) + interval;
15
+ if (delay > 0) await new Promise<void>((r) => setTimeout(r, delay));
16
+ return fn();
17
+ };
18
+ }
19
+
20
+ function configToInterval(config: BottleneckConfig) {
21
+ if ('rps' in config && config.rps !== undefined) return 1000 / config.rps;
22
+ if ('rpm' in config && config.rpm !== undefined) return 60_000 / config.rpm;
23
+ if ('rph' in config && config.rph !== undefined) return 3_600_000 / config.rph;
24
+ if ('rpd' in config && config.rpd !== undefined) return 86_400_000 / config.rpd;
25
+ return 0;
26
+ }