@positronic/template-new-project 0.0.67 → 0.0.69

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -53,10 +53,10 @@ module.exports = {
53
53
  ],
54
54
  setup: async ctx => {
55
55
  const devRootPath = process.env.POSITRONIC_LOCAL_PATH;
56
- let coreVersion = '^0.0.67';
57
- let cloudflareVersion = '^0.0.67';
58
- let clientVercelVersion = '^0.0.67';
59
- let genUIComponentsVersion = '^0.0.67';
56
+ let coreVersion = '^0.0.69';
57
+ let cloudflareVersion = '^0.0.69';
58
+ let clientVercelVersion = '^0.0.69';
59
+ let genUIComponentsVersion = '^0.0.69';
60
60
 
61
61
  // Map backend selection to package names
62
62
  const backendPackageMap = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@positronic/template-new-project",
3
- "version": "0.0.67",
3
+ "version": "0.0.69",
4
4
  "publishConfig": {
5
5
  "access": "public"
6
6
  },
@@ -6,6 +6,7 @@ import {
6
6
  BrainRunnerDO,
7
7
  MonitorDO,
8
8
  ScheduleDO,
9
+ GovernorDO,
9
10
  PositronicManifest,
10
11
  } from "@positronic/cloudflare";
11
12
  // Import the generated manifests - NOTE the .js extension for runtime compatibility
@@ -37,4 +38,4 @@ export default {
37
38
  fetch: app.fetch,
38
39
  } satisfies ExportedHandler<Env>;
39
40
 
40
- export { BrainRunnerDO, MonitorDO, ScheduleDO };
41
+ export { BrainRunnerDO, MonitorDO, ScheduleDO, GovernorDO };
@@ -13,13 +13,18 @@
13
13
  {
14
14
  "tag": "v1",
15
15
  "new_sqlite_classes": ["BrainRunnerDO", "MonitorDO", "ScheduleDO"]
16
+ },
17
+ {
18
+ "tag": "v2",
19
+ "new_sqlite_classes": ["GovernorDO"]
16
20
  }
17
21
  ],
18
22
  "durable_objects": {
19
23
  "bindings": [
20
24
  { "name": "BRAIN_RUNNER_DO", "class_name": "BrainRunnerDO" },
21
25
  { "name": "MONITOR_DO", "class_name": "MonitorDO" },
22
- { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" }
26
+ { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" },
27
+ { "name": "GOVERNOR_DO", "class_name": "GovernorDO" }
23
28
  ]
24
29
  },
25
30
  "r2_buckets": [
@@ -59,7 +64,8 @@
59
64
  "bindings": [
60
65
  { "name": "BRAIN_RUNNER_DO", "class_name": "BrainRunnerDO" },
61
66
  { "name": "MONITOR_DO", "class_name": "MonitorDO" },
62
- { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" }
67
+ { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" },
68
+ { "name": "GOVERNOR_DO", "class_name": "GovernorDO" }
63
69
  ]
64
70
  },
65
71
  "r2_buckets": [
package/template/brain.ts CHANGED
@@ -81,7 +81,10 @@ import { components } from './components/index.js';
81
81
  * const memoryTools = createMem0Tools();
82
82
  * ```
83
83
  *
84
- * See docs/memory-guide.md for more details on memory configuration.
84
+ * Memory is automatically scoped to the current user (via currentUser.id)
85
+ * and the brain name. No need to pass userId manually.
86
+ *
87
+ * See docs/memory-guide.md for full details.
85
88
  */
86
89
  export const brain = createBrain({
87
90
  components,
@@ -7,7 +7,7 @@ const exampleBrain = brain('example')
7
7
  topic: 'AI workflows',
8
8
  }))
9
9
  .step('Generate greeting', async ({ state, client }) => {
10
- const result = await client.generateObject({
10
+ const { object } = await client.generateObject({
11
11
  schema: z.object({
12
12
  greeting: z.string(),
13
13
  funFact: z.string(),
@@ -18,8 +18,8 @@ const exampleBrain = brain('example')
18
18
  });
19
19
  return {
20
20
  ...state,
21
- greeting: result.greeting,
22
- funFact: result.funFact,
21
+ greeting: object.greeting,
22
+ funFact: object.funFact,
23
23
  };
24
24
  })
25
25
  .step('Finish', ({ state }) => ({
@@ -135,6 +135,41 @@ Key points about prompt steps:
135
135
  - You can optionally provide a transform function as the third parameter
136
136
  - Type inference works throughout - TypeScript knows about your schema types
137
137
 
138
+ #### Per-Step Client Overrides
139
+
140
+ You can use a different AI model for a specific prompt step by passing a `client` option. This is useful when some steps need a cheaper model for simple tasks and others need a more capable model for complex reasoning:
141
+
142
+ ```typescript
143
+ import { createAnthropicClient } from '@positronic/client-anthropic';
144
+
145
+ const fastModel = createAnthropicClient({ model: 'claude-haiku-4-5-20251001' });
146
+ const smartModel = createAnthropicClient({ model: 'claude-sonnet-4-5-20250929' });
147
+
148
+ brain('Multi-Model Brain')
149
+ .prompt('Quick summary', {
150
+ template: ({ document }) => `Summarize this briefly: <%= '${document}' %>`,
151
+ outputSchema: {
152
+ schema: z.object({ summary: z.string() }),
153
+ name: 'quickSummary' as const,
154
+ },
155
+ client: fastModel, // Use a fast, cheap model for summarization
156
+ })
157
+ .prompt('Deep analysis', {
158
+ template: ({ quickSummary }) =>
159
+ `Analyze the implications of this summary: <%= '${quickSummary.summary}' %>`,
160
+ outputSchema: {
161
+ schema: z.object({
162
+ insights: z.array(z.string()),
163
+ risks: z.array(z.string()),
164
+ }),
165
+ name: 'analysis' as const,
166
+ },
167
+ client: smartModel, // Use a more capable model for analysis
168
+ });
169
+ ```
170
+
171
+ When deployed to Cloudflare, rate limiting is applied automatically to all clients — including per-step overrides — through the Governor system. Brain authors don't need to worry about rate limiting.
172
+
138
173
  ### 3. Nested Brains
139
174
 
140
175
  Compose complex workflows from smaller brains:
@@ -607,6 +642,101 @@ const brainWithComponents = brain('Custom UI Brain')
607
642
  });
608
643
  ```
609
644
 
645
+ ### Typed Store with `withStore()`
646
+
647
+ The `withStore()` method declares a typed key-value store for persistent structured data. Unlike brain state (which resets each run), store data persists across runs.
648
+
649
+ #### Declaring Store Shape
650
+
651
+ Use Zod types to declare the shape of your store. All store keys are scoped per-brain — each brain gets its own namespace automatically.
652
+
653
+ ```typescript
654
+ import { z } from 'zod';
655
+
656
+ const myBrain = brain('email-preferences')
657
+ .withStore({
658
+ deselectedThreads: z.array(z.string()),
659
+ lastProcessedAt: z.number(),
660
+ })
661
+ .step('Process', async ({ store }) => {
662
+ // Typed get — returns the value or undefined if not set
663
+ const deselected = await store.get('deselectedThreads') ?? [];
664
+ const lastTime = await store.get('lastProcessedAt');
665
+
666
+ // Typed set
667
+ await store.set('lastProcessedAt', Date.now());
668
+
669
+ return { deselected, lastTime };
670
+ });
671
+ ```
672
+
673
+ #### Per-User Fields
674
+
675
+ Mark fields as per-user to scope them to the current user. This is useful for user preferences, user-specific state, or any data that should be isolated between users.
676
+
677
+ ```typescript
678
+ const myBrain = brain('dashboard')
679
+ .withStore({
680
+ globalConfig: z.object({ theme: z.string() }), // shared across all users
681
+ userPreferences: { type: z.object({ darkMode: z.boolean() }), perUser: true }, // per-user
682
+ })
683
+ .step('Load Preferences', async ({ store }) => {
684
+ const config = await store.get('globalConfig');
685
+ const prefs = await store.get('userPreferences'); // scoped to currentUser automatically
686
+ return { config, prefs };
687
+ });
688
+ ```
689
+
690
+ Per-user fields require a `currentUser` to be set when running the brain. If a per-user field is accessed without a current user, an error is thrown.
691
+
692
+ #### Store Scoping
693
+
694
+ All store keys are automatically namespaced:
695
+
696
+ - **Shared fields**: scoped per-brain (e.g., brain "my-brain" key "counter" is isolated from brain "other-brain" key "counter")
697
+ - **Per-user fields**: scoped per-brain AND per-user (each user gets their own value)
698
+
699
+ There is no global scope — every field belongs to a specific brain.
700
+
701
+ #### Store Operations
702
+
703
+ The store provides four operations:
704
+
705
+ ```typescript
706
+ await store.get('key'); // Returns T | undefined
707
+ await store.set('key', value); // Sets the value
708
+ await store.delete('key'); // Removes the key
709
+ await store.has('key'); // Returns boolean
710
+ ```
711
+
712
+ #### Using with `createBrain()`
713
+
714
+ You can declare store fields at the project level so all brains share the same store shape:
715
+
716
+ ```typescript
717
+ // brain.ts
718
+ export const brain = createBrain({
719
+ services: { slack },
720
+ store: {
721
+ processedCount: z.number(),
722
+ userSettings: { type: z.object({ notifications: z.boolean() }), perUser: true },
723
+ },
724
+ });
725
+ ```
726
+
727
+ Or declare per-brain stores for brain-specific data:
728
+
729
+ ```typescript
730
+ // brains/my-brain.ts
731
+ export default brain('my-brain')
732
+ .withStore({ counter: z.number() })
733
+ .step('Increment', async ({ store }) => {
734
+ const count = await store.get('counter') ?? 0;
735
+ await store.set('counter', count + 1);
736
+ return { count: count + 1 };
737
+ });
738
+ ```
739
+
610
740
  ### Using `createBrain()` for Project Configuration
611
741
 
612
742
  For project-wide configuration, use `createBrain()` in your `brain.ts` file:
@@ -634,11 +764,14 @@ export const brain = createBrain({
634
764
  props: z.object({ message: z.string(), type: z.enum(['info', 'warning', 'error']) }),
635
765
  render: (props) => `<div class="alert alert-<%= '${props.type}' %>"><%= '${props.message}' %></div>`
636
766
  }
767
+ },
768
+ store: {
769
+ processedCount: z.number(),
637
770
  }
638
771
  });
639
772
  ```
640
773
 
641
- All brains created with this factory will have access to the configured services, tools, and components.
774
+ All brains created with this factory will have access to the configured services, tools, components, and store.
642
775
 
643
776
  ## Running Brains
644
777
 
@@ -733,13 +866,31 @@ for await (const event of brain.run({ client })) {
733
866
 
734
867
  ## Resources
735
868
 
736
- Access loaded resources with type-safe API:
869
+ Resources are files in your project's `/resources` directory that brains can access at runtime. They provide a type-safe way to load text and binary content.
870
+
871
+ ### Adding Resources
872
+
873
+ Place files in the `/resources` directory:
874
+
875
+ ```
876
+ resources/
877
+ ├── config.json
878
+ ├── prompts/
879
+ │ ├── customerSupport.md
880
+ │ └── codeReview.md
881
+ └── data/
882
+ └── records.csv
883
+ ```
884
+
885
+ ### Accessing Resources
886
+
887
+ Access resources using dot notation that matches the file structure:
737
888
 
738
889
  ```typescript
739
890
  brain('Resource Example').step('Load Data', async ({ resources }) => {
740
- const config = await resources.config.loadText();
741
- const data = await resources.data.records.loadText();
742
- return { config: JSON.parse(config), data };
891
+ const config = await resources.config.load();
892
+ const template = await resources.prompts.customerSupport.load();
893
+ return { config: JSON.parse(config), template };
743
894
  });
744
895
  ```
745
896
 
@@ -748,7 +899,7 @@ Resources are also available in prompt templates:
748
899
  ```typescript
749
900
  brain('Template Example').prompt('Generate Content', {
750
901
  template: async (state, resources) => {
751
- const template = await resources.prompts.customerSupport.loadText();
902
+ const template = await resources.prompts.customerSupport.load();
752
903
  return template.replace('{{issue}}', state.issue);
753
904
  },
754
905
  outputSchema: {
@@ -758,6 +909,52 @@ brain('Template Example').prompt('Generate Content', {
758
909
  });
759
910
  ```
760
911
 
912
+ ### Resource Methods
913
+
914
+ Each resource has a single `load()` method that returns the appropriate type:
915
+
916
+ - `TextResource.load()` - Returns `Promise<string>` (for text files like `.md`, `.json`, `.txt`)
917
+ - `BinaryResource.load()` - Returns `Promise<Buffer>` (for binary files like images)
918
+
919
+ The resource type is determined automatically based on file content detection when you run `px resources types`.
920
+
921
+ ### File Naming and Property Access
922
+
923
+ The resource name you use in code must be a valid JavaScript identifier. The system strips file extensions automatically, so `config.json` is accessed as `resources.config`.
924
+
925
+ **Important**: Resource filenames must be valid JS identifiers (after extension stripping) to be accessible via dot notation. This means:
926
+
927
+ ```
928
+ resources/
929
+ ├── myPrompt.md ✅ → resources.myPrompt.load()
930
+ ├── config.json ✅ → resources.config.load()
931
+ ├── reference-material.md ❌ → "reference-material" has a hyphen, not a valid identifier
932
+ ├── referenceMaterial.md ✅ → resources.referenceMaterial.load()
933
+ ```
934
+
935
+ Use camelCase or single-word names for your resource files. Avoid hyphens, spaces, or other characters that aren't valid in JavaScript identifiers.
936
+
937
+ You can also access resources by their full filename (including extension) using bracket notation:
938
+
939
+ ```typescript
940
+ const content = await resources['config.json'].load();
941
+ ```
942
+
943
+ ### Type Generation
944
+
945
+ Run `px resources types` to generate a `resources.d.ts` file in your project root. This provides TypeScript type safety for your resources — your editor will autocomplete resource names and flag typos.
946
+
947
+ The generated types distinguish between `TextResource` and `BinaryResource` based on file content detection, so `load()` returns the correct type (`string` or `Buffer`).
948
+
949
+ ### Path-Based Access
950
+
951
+ You can also load resources by path string at any level of the resource tree:
952
+
953
+ ```typescript
954
+ const content = await resources.loadText('prompts/customerSupport.md');
955
+ const binary = await resources.loadBinary('images/logo.png');
956
+ ```
957
+
761
958
  ## Organizing Complex Prompts
762
959
 
763
960
  When prompts become more than a sentence or two, extract them into separate files for better maintainability:
@@ -797,7 +994,7 @@ interface FilterPromptState {
797
994
  export const aiFilterPrompt = {
798
995
  template: async (state: FilterPromptState, resources: Resources) => {
799
996
  // Load a prompt template from resources
800
- const template = await resources.prompts.hnFilter.loadText();
997
+ const template = await resources.prompts.hnFilter.load();
801
998
 
802
999
  // Build the prompt with state data
803
1000
  const articleList = state.articles
@@ -885,6 +1082,8 @@ brain('Batch Processor')
885
1082
  - `concurrency: number` - Maximum number of items processed in parallel (default: 10)
886
1083
  - `error: (item, error) => Response` - Fallback function when a request fails
887
1084
 
1085
+ Batch prompts also support per-step `client` overrides (see Prompt Steps above), so you can use a different model for batch processing.
1086
+
888
1087
  ### Result Format
889
1088
 
890
1089
  The result is stored as an array of `[item, response]` tuples, preserving the relationship between each input item and its generated response.
@@ -1373,7 +1572,7 @@ const completeBrain = brain({
1373
1572
  .prompt('Generate Plan', {
1374
1573
  template: async (state, resources) => {
1375
1574
  // Load a template from resources
1376
- const template = await resources.templates.projectPlan.loadText();
1575
+ const template = await resources.templates.projectPlan.load();
1377
1576
  return template.replace('{{context}}', 'software project');
1378
1577
  },
1379
1578
  outputSchema: {
@@ -0,0 +1,292 @@
1
+ # Memory Guide
2
+
3
+ This guide covers the memory system in Positronic, which enables brains to store and retrieve long-term memories using [Mem0](https://mem0.ai) or other memory providers.
4
+
5
+ ## Overview
6
+
7
+ The memory system provides:
8
+ - **Long-term memory storage** - Persist facts, preferences, and context across brain runs
9
+ - **Semantic search** - Retrieve relevant memories based on natural language queries
10
+ - **Automatic conversation indexing** - Optionally store all conversations for later retrieval
11
+ - **Tools for agents** - Built-in tools that let agents store and recall memories
12
+ - **Automatic user scoping** - Memories are scoped to the current user via `currentUser`, no manual userId threading needed
13
+
14
+ ## Quick Start
15
+
16
+ ### 1. Install the package
17
+
18
+ ```bash
19
+ npm install @positronic/mem0
20
+ ```
21
+
22
+ ### 2. Set up the provider
23
+
24
+ Add your Mem0 API key to `.env`:
25
+
26
+ ```bash
27
+ MEM0_API_KEY=your-api-key-here
28
+ ```
29
+
30
+ ### 3. Configure in brain.ts
31
+
32
+ ```typescript
33
+ import { createBrain, defaultTools } from '@positronic/core';
34
+ import { createMem0Provider, createMem0Tools } from '@positronic/mem0';
35
+ import { components } from './components/index.js';
36
+
37
+ const memory = createMem0Provider({
38
+ apiKey: process.env.MEM0_API_KEY!,
39
+ });
40
+
41
+ export const brain = createBrain({
42
+ components,
43
+ defaultTools,
44
+ memory,
45
+ });
46
+ ```
47
+
48
+ ### 4. Use memory tools in agents
49
+
50
+ ```typescript
51
+ import { brain } from '../brain.js';
52
+ import { createMem0Tools } from '@positronic/mem0';
53
+ import { z } from 'zod';
54
+
55
+ const memoryTools = createMem0Tools();
56
+
57
+ export default brain('assistant')
58
+ .brain('Help User', () => ({
59
+ system: 'You are helpful. Use rememberFact to store user preferences.',
60
+ prompt: 'The user said: I prefer dark mode',
61
+ tools: {
62
+ ...memoryTools,
63
+ done: {
64
+ description: 'Complete the task',
65
+ inputSchema: z.object({ result: z.string() }),
66
+ terminal: true,
67
+ },
68
+ },
69
+ }));
70
+ ```
71
+
72
+ ## Memory Tools
73
+
74
+ The package provides two tools that agents can use:
75
+
76
+ ### rememberFact
77
+
78
+ Stores a fact in long-term memory.
79
+
80
+ - **Input**: `{ fact: string }`
81
+ - **Output**: `{ remembered: boolean, fact: string }`
82
+
83
+ When the agent calls `rememberFact({ fact: "User prefers dark mode" })`, the fact is stored in Mem0 and can be retrieved later.
84
+
85
+ ### recallMemories
86
+
87
+ Searches for relevant memories.
88
+
89
+ - **Input**: `{ query: string, limit?: number }`
90
+ - **Output**: `{ found: number, memories: Array<{ content: string, relevance?: number }> }`
91
+
92
+ When the agent calls `recallMemories({ query: "user preferences" })`, it receives matching memories with relevance scores.
93
+
94
+ ### Using Memory Tools in Agents
95
+
96
+ ```typescript
97
+ import { brain } from '../brain.js';
98
+ import { createMem0Tools } from '@positronic/mem0';
99
+ import { z } from 'zod';
100
+
101
+ const memoryTools = createMem0Tools();
102
+
103
+ export default brain('personalized-assistant')
104
+ .brain('Chat', () => ({
105
+ system: `You are a personalized assistant.
106
+
107
+ Use rememberFact to store important information about the user:
108
+ - Preferences (theme, communication style, etc.)
109
+ - Context (current projects, goals)
110
+ - Any facts they want you to remember
111
+
112
+ Use recallMemories before responding to check for relevant context.`,
113
+ prompt: userMessage,
114
+ tools: {
115
+ ...memoryTools,
116
+ done: {
117
+ description: 'Send final response',
118
+ inputSchema: z.object({ response: z.string() }),
119
+ terminal: true,
120
+ },
121
+ },
122
+ }));
123
+ ```
124
+
125
+ ## Automatic Conversation Indexing
126
+
127
+ The Mem0 adapter automatically stores all agent conversations to memory. This builds up context over time without explicit tool calls.
128
+
129
+ ### Setting Up the Adapter
130
+
131
+ In your `runner.ts`:
132
+
133
+ ```typescript
134
+ import { BrainRunner } from '@positronic/core';
135
+ import { createMem0Adapter, createMem0Provider } from '@positronic/mem0';
136
+
137
+ const provider = createMem0Provider({
138
+ apiKey: process.env.MEM0_API_KEY!,
139
+ });
140
+
141
+ const adapter = createMem0Adapter({ provider });
142
+
143
+ export const runner = new BrainRunner({
144
+ adapters: [adapter],
145
+ client: myClient,
146
+ });
147
+ ```
148
+
149
+ ### Adapter Behavior
150
+
151
+ - **On agent start**: Buffers the initial prompt as a user message
152
+ - **During execution**: Buffers all user and assistant messages
153
+ - **On completion**: Flushes buffer to memory provider
154
+ - **On error/cancel**: Discards buffer (doesn't store failed conversations)
155
+
156
+ ### Including Tool Calls
157
+
158
+ By default, tool calls are not included in the indexed conversation. Enable this for full conversation history:
159
+
160
+ ```typescript
161
+ const adapter = createMem0Adapter({
162
+ provider,
163
+ includeToolCalls: true,
164
+ });
165
+ ```
166
+
167
+ ## Accessing Memory in Steps
168
+
169
+ When memory is attached, you can access it directly in step functions:
170
+
171
+ ### In Regular Steps
172
+
173
+ ```typescript
174
+ export default brain('my-brain')
175
+ .step('Load Context', async ({ memory }) => {
176
+ const memories = await memory.search('user preferences', {
177
+ limit: 5,
178
+ });
179
+
180
+ return {
181
+ context: memories.map(m => m.content).join('\n'),
182
+ };
183
+ });
184
+ ```
185
+
186
+ ### In Agent Config Functions
187
+
188
+ ```typescript
189
+ export default brain('my-brain')
190
+ .brain('Process', async ({ memory }) => {
191
+ const prefs = await memory.search('user preferences');
192
+
193
+ const context = prefs.length > 0
194
+ ? '\n\nUser preferences:\n' + prefs.map(p => '- ' + p.content).join('\n')
195
+ : '';
196
+
197
+ return {
198
+ system: 'You are helpful.' + context,
199
+ prompt: 'Help the user with their request',
200
+ tools: { /* ... */ },
201
+ };
202
+ });
203
+ ```
204
+
205
+ ## Helper Functions
206
+
207
+ The package includes helper functions for common memory patterns:
208
+
209
+ ### formatMemories
210
+
211
+ Formats an array of memories into a readable string:
212
+
213
+ ```typescript
214
+ import { formatMemories } from '@positronic/mem0';
215
+
216
+ const memories = await memory.search('preferences');
217
+
218
+ const text = formatMemories(memories);
219
+ // "1. User prefers dark mode\n2. User likes concise responses"
220
+
221
+ const formatted = formatMemories(memories, {
222
+ header: 'Known preferences:',
223
+ includeScores: true,
224
+ emptyText: 'No preferences found',
225
+ });
226
+ ```
227
+
228
+ ### createMemorySystemPrompt
229
+
230
+ Creates a system prompt augmented with relevant memories:
231
+
232
+ ```typescript
233
+ import { createMemorySystemPrompt } from '@positronic/mem0';
234
+
235
+ export default brain('my-brain')
236
+ .brain('Chat', async ({ memory }) => {
237
+ const system = await createMemorySystemPrompt(
238
+ memory,
239
+ 'You are a helpful assistant.',
240
+ 'user context and preferences',
241
+ {
242
+ limit: 10,
243
+ memoriesHeader: '\n\nUser context:',
244
+ }
245
+ );
246
+
247
+ return { system, prompt: userMessage, tools: { /* ... */ } };
248
+ });
249
+ ```
250
+
251
+ ### getMemoryContext
252
+
253
+ Gets just the memory context block for manual prompt construction:
254
+
255
+ ```typescript
256
+ import { getMemoryContext } from '@positronic/mem0';
257
+
258
+ const context = await getMemoryContext(memory, 'user preferences', {
259
+ limit: 5,
260
+ });
261
+
262
+ const system = 'You are helpful.\n\n' + (context ? '## User Context\n' + context : '');
263
+ ```
264
+
265
+ ## Memory Scoping
266
+
267
+ Memories are scoped by two identifiers:
268
+
269
+ ### agentId
270
+
271
+ Automatically set to the brain/step title. Memories are isolated per agent:
272
+
273
+ ```typescript
274
+ brain('support-agent').withMemory(memory) // agentId = 'support-agent'
275
+ brain('sales-agent').withMemory(memory) // agentId = 'sales-agent'
276
+ ```
277
+
278
+ ### userId
279
+
280
+ Automatically set from `currentUser.id` when the brain runs. All memory operations are automatically scoped to the current user — no need to pass userId manually:
281
+
282
+ ```typescript
283
+ // userId is auto-bound from currentUser — just use memory directly
284
+ await memory.search('preferences');
285
+ await memory.add(messages);
286
+
287
+ // In tools — the agent just passes the fact/query, userId is automatic
288
+ rememberFact({ fact: 'Prefers dark mode' })
289
+ recallMemories({ query: 'preferences' })
290
+ ```
291
+
292
+ See the [currentUser section in positronic-guide.md](positronic-guide.md#currentuser) for how to set the current user when running brains.
@@ -232,9 +232,60 @@ const api = {
232
232
  };
233
233
  ```
234
234
 
235
+ ## currentUser
236
+
237
+ Every brain run requires a `currentUser` — an object with at least an `id` field that identifies who is running the brain. This identity is used to scope per-user data like memory and store fields.
238
+
239
+ ### How currentUser Gets Set
240
+
241
+ The way `currentUser` is provided depends on how the brain is running:
242
+
243
+ **Deployed (Cloudflare / production)**: The backend sets `currentUser` from the authenticated request. When a user hits an API endpoint to start a brain run, the auth middleware determines their identity and passes it through. You don't need to set it manually.
244
+
245
+ **Local development with `px brain run`**: The CLI passes a default user identity automatically. You don't need to do anything special.
246
+
247
+ **Local development with `runner.ts`**: When calling `runner.run()` directly, you must pass `currentUser`:
248
+
249
+ ```typescript
250
+ import { runner } from './runner.js';
251
+ import myBrain from './brains/my-brain.js';
252
+
253
+ await runner.run(myBrain, {
254
+ currentUser: { id: 'local-dev-user' },
255
+ });
256
+ ```
257
+
258
+ **In tests**: Pass `currentUser` when running the brain:
259
+
260
+ ```typescript
261
+ const events = await collectEvents(
262
+ testBrain.run({
263
+ client: mockClient,
264
+ currentUser: { id: 'test-user' },
265
+ })
266
+ );
267
+ ```
268
+
269
+ ### What currentUser Scopes
270
+
271
+ - **Memory**: All memory operations (search, add) are automatically scoped to the current user. No need to pass `userId` manually — see [docs/memory-guide.md](memory-guide.md).
272
+ - **Store (per-user fields)**: Store fields marked with `perUser: true` are automatically scoped to the current user — see [docs/brain-dsl-guide.md](brain-dsl-guide.md).
273
+
274
+ ### Accessing currentUser in Steps
275
+
276
+ `currentUser` is available in step context if you need it:
277
+
278
+ ```typescript
279
+ export default brain('greet')
280
+ .step('Hello', ({ currentUser }) => ({
281
+ greeting: 'Hello, user ' + currentUser.id,
282
+ }));
283
+ ```
284
+
235
285
  ## Getting Help
236
286
 
237
287
  - **Documentation**: https://positronic.dev
238
288
  - **CLI Help**: `px --help`
239
289
  - **Brain DSL Guide**: `/docs/brain-dsl-guide.md` (includes UI steps for generating forms)
290
+ - **Memory Guide**: `/docs/memory-guide.md`
240
291
  - **Testing Guide**: `/docs/brain-testing-guide.md`
@@ -26,8 +26,10 @@ import { google } from '@ai-sdk/google';
26
26
  * The adapter automatically indexes all agent conversations to memory.
27
27
  * See docs/memory-guide.md for more details.
28
28
  */
29
+ const client = new VercelClient(google('gemini-3-pro-preview'));
30
+
29
31
  export const runner = new BrainRunner({
30
32
  adapters: [],
31
- client: new VercelClient(google('gemini-3-pro-preview')),
33
+ client,
32
34
  resources: {},
33
- });
35
+ });