@positronic/template-new-project 0.0.67 → 0.0.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -53,10 +53,10 @@ module.exports = {
53
53
  ],
54
54
  setup: async ctx => {
55
55
  const devRootPath = process.env.POSITRONIC_LOCAL_PATH;
56
- let coreVersion = '^0.0.67';
57
- let cloudflareVersion = '^0.0.67';
58
- let clientVercelVersion = '^0.0.67';
59
- let genUIComponentsVersion = '^0.0.67';
56
+ let coreVersion = '^0.0.68';
57
+ let cloudflareVersion = '^0.0.68';
58
+ let clientVercelVersion = '^0.0.68';
59
+ let genUIComponentsVersion = '^0.0.68';
60
60
 
61
61
  // Map backend selection to package names
62
62
  const backendPackageMap = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@positronic/template-new-project",
3
- "version": "0.0.67",
3
+ "version": "0.0.68",
4
4
  "publishConfig": {
5
5
  "access": "public"
6
6
  },
@@ -6,6 +6,7 @@ import {
6
6
  BrainRunnerDO,
7
7
  MonitorDO,
8
8
  ScheduleDO,
9
+ GovernorDO,
9
10
  PositronicManifest,
10
11
  } from "@positronic/cloudflare";
11
12
  // Import the generated manifests - NOTE the .js extension for runtime compatibility
@@ -37,4 +38,4 @@ export default {
37
38
  fetch: app.fetch,
38
39
  } satisfies ExportedHandler<Env>;
39
40
 
40
- export { BrainRunnerDO, MonitorDO, ScheduleDO };
41
+ export { BrainRunnerDO, MonitorDO, ScheduleDO, GovernorDO };
@@ -13,13 +13,18 @@
13
13
  {
14
14
  "tag": "v1",
15
15
  "new_sqlite_classes": ["BrainRunnerDO", "MonitorDO", "ScheduleDO"]
16
+ },
17
+ {
18
+ "tag": "v2",
19
+ "new_sqlite_classes": ["GovernorDO"]
16
20
  }
17
21
  ],
18
22
  "durable_objects": {
19
23
  "bindings": [
20
24
  { "name": "BRAIN_RUNNER_DO", "class_name": "BrainRunnerDO" },
21
25
  { "name": "MONITOR_DO", "class_name": "MonitorDO" },
22
- { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" }
26
+ { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" },
27
+ { "name": "GOVERNOR_DO", "class_name": "GovernorDO" }
23
28
  ]
24
29
  },
25
30
  "r2_buckets": [
@@ -59,7 +64,8 @@
59
64
  "bindings": [
60
65
  { "name": "BRAIN_RUNNER_DO", "class_name": "BrainRunnerDO" },
61
66
  { "name": "MONITOR_DO", "class_name": "MonitorDO" },
62
- { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" }
67
+ { "name": "SCHEDULE_DO", "class_name": "ScheduleDO" },
68
+ { "name": "GOVERNOR_DO", "class_name": "GovernorDO" }
63
69
  ]
64
70
  },
65
71
  "r2_buckets": [
@@ -7,7 +7,7 @@ const exampleBrain = brain('example')
7
7
  topic: 'AI workflows',
8
8
  }))
9
9
  .step('Generate greeting', async ({ state, client }) => {
10
- const result = await client.generateObject({
10
+ const { object } = await client.generateObject({
11
11
  schema: z.object({
12
12
  greeting: z.string(),
13
13
  funFact: z.string(),
@@ -18,8 +18,8 @@ const exampleBrain = brain('example')
18
18
  });
19
19
  return {
20
20
  ...state,
21
- greeting: result.greeting,
22
- funFact: result.funFact,
21
+ greeting: object.greeting,
22
+ funFact: object.funFact,
23
23
  };
24
24
  })
25
25
  .step('Finish', ({ state }) => ({
@@ -135,6 +135,41 @@ Key points about prompt steps:
135
135
  - You can optionally provide a transform function as the third parameter
136
136
  - Type inference works throughout - TypeScript knows about your schema types
137
137
 
138
+ #### Per-Step Client Overrides
139
+
140
+ You can use a different AI model for a specific prompt step by passing a `client` option. This is useful when some steps need a cheaper model for simple tasks and others need a more capable model for complex reasoning:
141
+
142
+ ```typescript
143
+ import { createAnthropicClient } from '@positronic/client-anthropic';
144
+
145
+ const fastModel = createAnthropicClient({ model: 'claude-haiku-4-5-20251001' });
146
+ const smartModel = createAnthropicClient({ model: 'claude-sonnet-4-5-20250929' });
147
+
148
+ brain('Multi-Model Brain')
149
+ .prompt('Quick summary', {
150
+ template: ({ document }) => `Summarize this briefly: <%= '${document}' %>`,
151
+ outputSchema: {
152
+ schema: z.object({ summary: z.string() }),
153
+ name: 'quickSummary' as const,
154
+ },
155
+ client: fastModel, // Use a fast, cheap model for summarization
156
+ })
157
+ .prompt('Deep analysis', {
158
+ template: ({ quickSummary }) =>
159
+ `Analyze the implications of this summary: <%= '${quickSummary.summary}' %>`,
160
+ outputSchema: {
161
+ schema: z.object({
162
+ insights: z.array(z.string()),
163
+ risks: z.array(z.string()),
164
+ }),
165
+ name: 'analysis' as const,
166
+ },
167
+ client: smartModel, // Use a more capable model for analysis
168
+ });
169
+ ```
170
+
171
+ When deployed to Cloudflare, rate limiting is applied automatically to all clients — including per-step overrides — through the Governor system. Brain authors don't need to worry about rate limiting.
172
+
138
173
  ### 3. Nested Brains
139
174
 
140
175
  Compose complex workflows from smaller brains:
@@ -733,13 +768,31 @@ for await (const event of brain.run({ client })) {
733
768
 
734
769
  ## Resources
735
770
 
736
- Access loaded resources with type-safe API:
771
+ Resources are files in your project's `/resources` directory that brains can access at runtime. They provide a type-safe way to load text and binary content.
772
+
773
+ ### Adding Resources
774
+
775
+ Place files in the `/resources` directory:
776
+
777
+ ```
778
+ resources/
779
+ ├── config.json
780
+ ├── prompts/
781
+ │ ├── customerSupport.md
782
+ │ └── codeReview.md
783
+ └── data/
784
+ └── records.csv
785
+ ```
786
+
787
+ ### Accessing Resources
788
+
789
+ Access resources using dot notation that matches the file structure:
737
790
 
738
791
  ```typescript
739
792
  brain('Resource Example').step('Load Data', async ({ resources }) => {
740
- const config = await resources.config.loadText();
741
- const data = await resources.data.records.loadText();
742
- return { config: JSON.parse(config), data };
793
+ const config = await resources.config.load();
794
+ const template = await resources.prompts.customerSupport.load();
795
+ return { config: JSON.parse(config), template };
743
796
  });
744
797
  ```
745
798
 
@@ -748,7 +801,7 @@ Resources are also available in prompt templates:
748
801
  ```typescript
749
802
  brain('Template Example').prompt('Generate Content', {
750
803
  template: async (state, resources) => {
751
- const template = await resources.prompts.customerSupport.loadText();
804
+ const template = await resources.prompts.customerSupport.load();
752
805
  return template.replace('{{issue}}', state.issue);
753
806
  },
754
807
  outputSchema: {
@@ -758,6 +811,52 @@ brain('Template Example').prompt('Generate Content', {
758
811
  });
759
812
  ```
760
813
 
814
+ ### Resource Methods
815
+
816
+ Each resource has a single `load()` method that returns the appropriate type:
817
+
818
+ - `TextResource.load()` - Returns `Promise<string>` (for text files like `.md`, `.json`, `.txt`)
819
+ - `BinaryResource.load()` - Returns `Promise<Buffer>` (for binary files like images)
820
+
821
+ The resource type is determined automatically based on file content detection when you run `px resources types`.
822
+
823
+ ### File Naming and Property Access
824
+
825
+ The resource name you use in code must be a valid JavaScript identifier. The system strips file extensions automatically, so `config.json` is accessed as `resources.config`.
826
+
827
+ **Important**: Resource filenames must be valid JS identifiers (after extension stripping) to be accessible via dot notation. This means:
828
+
829
+ ```
830
+ resources/
831
+ ├── myPrompt.md ✅ → resources.myPrompt.load()
832
+ ├── config.json ✅ → resources.config.load()
833
+ ├── reference-material.md ❌ → "reference-material" has a hyphen, not a valid identifier
834
+ ├── referenceMaterial.md ✅ → resources.referenceMaterial.load()
835
+ ```
836
+
837
+ Use camelCase or single-word names for your resource files. Avoid hyphens, spaces, or other characters that aren't valid in JavaScript identifiers.
838
+
839
+ You can also access resources by their full filename (including extension) using bracket notation:
840
+
841
+ ```typescript
842
+ const content = await resources['config.json'].load();
843
+ ```
844
+
845
+ ### Type Generation
846
+
847
+ Run `px resources types` to generate a `resources.d.ts` file in your project root. This provides TypeScript type safety for your resources — your editor will autocomplete resource names and flag typos.
848
+
849
+ The generated types distinguish between `TextResource` and `BinaryResource` based on file content detection, so `load()` returns the correct type (`string` or `Buffer`).
850
+
851
+ ### Path-Based Access
852
+
853
+ You can also load resources by path string at any level of the resource tree:
854
+
855
+ ```typescript
856
+ const content = await resources.loadText('prompts/customerSupport.md');
857
+ const binary = await resources.loadBinary('images/logo.png');
858
+ ```
859
+
761
860
  ## Organizing Complex Prompts
762
861
 
763
862
  When prompts become more than a sentence or two, extract them into separate files for better maintainability:
@@ -797,7 +896,7 @@ interface FilterPromptState {
797
896
  export const aiFilterPrompt = {
798
897
  template: async (state: FilterPromptState, resources: Resources) => {
799
898
  // Load a prompt template from resources
800
- const template = await resources.prompts.hnFilter.loadText();
899
+ const template = await resources.prompts.hnFilter.load();
801
900
 
802
901
  // Build the prompt with state data
803
902
  const articleList = state.articles
@@ -885,6 +984,8 @@ brain('Batch Processor')
885
984
  - `concurrency: number` - Maximum number of items processed in parallel (default: 10)
886
985
  - `error: (item, error) => Response` - Fallback function when a request fails
887
986
 
987
+ Batch prompts also support per-step `client` overrides (see Prompt Steps above), so you can use a different model for batch processing.
988
+
888
989
  ### Result Format
889
990
 
890
991
  The result is stored as an array of `[item, response]` tuples, preserving the relationship between each input item and its generated response.
@@ -1373,7 +1474,7 @@ const completeBrain = brain({
1373
1474
  .prompt('Generate Plan', {
1374
1475
  template: async (state, resources) => {
1375
1476
  // Load a template from resources
1376
- const template = await resources.templates.projectPlan.loadText();
1477
+ const template = await resources.templates.projectPlan.load();
1377
1478
  return template.replace('{{context}}', 'software project');
1378
1479
  },
1379
1480
  outputSchema: {
@@ -26,8 +26,10 @@ import { google } from '@ai-sdk/google';
26
26
  * The adapter automatically indexes all agent conversations to memory.
27
27
  * See docs/memory-guide.md for more details.
28
28
  */
29
+ const client = new VercelClient(google('gemini-3-pro-preview'));
30
+
29
31
  export const runner = new BrainRunner({
30
32
  adapters: [],
31
- client: new VercelClient(google('gemini-3-pro-preview')),
33
+ client,
32
34
  resources: {},
33
- });
35
+ });