@mariozechner/pi-ai 0.5.39 → 0.5.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -194,6 +194,51 @@ for (const block of response.content) {
194
194
  }
195
195
  ```
196
196
 
197
+ ### Streaming Tool Calls with Partial JSON
198
+
199
+ During streaming, tool call arguments are progressively parsed as they arrive. This enables real-time UI updates before the complete arguments are available:
200
+
201
+ ```typescript
202
+ const s = stream(model, context);
203
+
204
+ for await (const event of s) {
205
+ if (event.type === 'toolcall_delta') {
206
+ const toolCall = event.partial.content[event.contentIndex];
207
+
208
+ // toolCall.arguments contains partially parsed JSON during streaming
209
+ // This allows for progressive UI updates
210
+ if (toolCall.type === 'toolCall' && toolCall.arguments) {
211
+ // BE DEFENSIVE: arguments may be incomplete
212
+ // Example: Show file path being written even before content is complete
213
+ if (toolCall.name === 'write_file' && toolCall.arguments.path) {
214
+ console.log(`Writing to: ${toolCall.arguments.path}`);
215
+
216
+ // Content might be partial or missing
217
+ if (toolCall.arguments.content) {
218
+ console.log(`Content preview: ${toolCall.arguments.content.substring(0, 100)}...`);
219
+ }
220
+ }
221
+ }
222
+ }
223
+
224
+ if (event.type === 'toolcall_end') {
225
+ // Here toolCall.arguments is complete and validated
226
+ const toolCall = event.toolCall;
227
+ console.log(`Tool completed: ${toolCall.name}`, toolCall.arguments);
228
+ }
229
+ }
230
+ ```
231
+
232
+ **Important notes about partial tool arguments:**
233
+ - During `toolcall_delta` events, `arguments` contains the best-effort parse of partial JSON
234
+ - Fields may be missing or incomplete - always check for existence before use
235
+ - String values may be truncated mid-word
236
+ - Arrays may be incomplete
237
+ - Nested objects may be partially populated
238
+ - At minimum, `arguments` will be an empty object `{}`, never `undefined`
239
+ - Full validation only occurs at `toolcall_end` when arguments are complete
240
+ - The Google provider does not support function call streaming. Instead, you will receive a single `toolcall_delta` even with the full arguments.
241
+
197
242
  ## Image Input
198
243
 
199
244
  Models with vision capabilities can process images. You can check if a model supports images via the `input` property. If you pass images to a non-vision model, they are silently ignored.
@@ -642,26 +687,26 @@ for await (const event of stream) {
642
687
  case 'agent_start':
643
688
  console.log('Agent started');
644
689
  break;
645
-
690
+
646
691
  case 'turn_start':
647
692
  console.log('New turn started');
648
693
  break;
649
-
694
+
650
695
  case 'message_start':
651
696
  console.log(`${event.message.role} message started`);
652
697
  break;
653
-
698
+
654
699
  case 'message_update':
655
700
  // Only for assistant messages during streaming
656
701
  if (event.message.content.some(c => c.type === 'text')) {
657
702
  console.log('Assistant:', event.message.content);
658
703
  }
659
704
  break;
660
-
705
+
661
706
  case 'tool_execution_start':
662
707
  console.log(`Calling ${event.toolName} with:`, event.args);
663
708
  break;
664
-
709
+
665
710
  case 'tool_execution_end':
666
711
  if (event.isError) {
667
712
  console.error(`Tool failed:`, event.result);
@@ -669,11 +714,11 @@ for await (const event of stream) {
669
714
  console.log(`Tool result:`, event.result.output);
670
715
  }
671
716
  break;
672
-
717
+
673
718
  case 'turn_end':
674
719
  console.log(`Turn ended with ${event.toolResults.length} tool calls`);
675
720
  break;
676
-
721
+
677
722
  case 'agent_end':
678
723
  console.log(`Agent completed with ${event.messages.length} new messages`);
679
724
  break;
@@ -0,0 +1,9 @@
1
+ /**
2
+ * Attempts to parse potentially incomplete JSON during streaming.
3
+ * Always returns a valid object, even if the JSON is incomplete.
4
+ *
5
+ * @param partialJson The partial JSON string from streaming
6
+ * @returns Parsed object or empty object if parsing fails
7
+ */
8
+ export declare function parseStreamingJson<T = any>(partialJson: string | undefined): T;
9
+ //# sourceMappingURL=json-parse.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"json-parse.d.ts","sourceRoot":"","sources":["../src/json-parse.ts"],"names":[],"mappings":"AAEA;;;;;;GAMG;AACH,wBAAgB,kBAAkB,CAAC,CAAC,GAAG,GAAG,EAAE,WAAW,EAAE,MAAM,GAAG,SAAS,GAAG,CAAC,CAkB9E"}
@@ -0,0 +1,29 @@
1
+ import { parse as partialParse } from "partial-json";
2
+ /**
3
+ * Attempts to parse potentially incomplete JSON during streaming.
4
+ * Always returns a valid object, even if the JSON is incomplete.
5
+ *
6
+ * @param partialJson The partial JSON string from streaming
7
+ * @returns Parsed object or empty object if parsing fails
8
+ */
9
+ export function parseStreamingJson(partialJson) {
10
+ if (!partialJson || partialJson.trim() === "") {
11
+ return {};
12
+ }
13
+ // Try standard parsing first (fastest for complete JSON)
14
+ try {
15
+ return JSON.parse(partialJson);
16
+ }
17
+ catch {
18
+ // Try partial-json for incomplete JSON
19
+ try {
20
+ const result = partialParse(partialJson);
21
+ return (result ?? {});
22
+ }
23
+ catch {
24
+ // If all parsing fails, return empty object
25
+ return {};
26
+ }
27
+ }
28
+ }
29
+ //# sourceMappingURL=json-parse.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"json-parse.js","sourceRoot":"","sources":["../src/json-parse.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,IAAI,YAAY,EAAE,MAAM,cAAc,CAAC;AAErD;;;;;;GAMG;AACH,MAAM,UAAU,kBAAkB,CAAU,WAA+B;IAC1E,IAAI,CAAC,WAAW,IAAI,WAAW,CAAC,IAAI,EAAE,KAAK,EAAE,EAAE,CAAC;QAC/C,OAAO,EAAO,CAAC;IAChB,CAAC;IAED,yDAAyD;IACzD,IAAI,CAAC;QACJ,OAAO,IAAI,CAAC,KAAK,CAAC,WAAW,CAAM,CAAC;IACrC,CAAC;IAAC,MAAM,CAAC;QACR,uCAAuC;QACvC,IAAI,CAAC;YACJ,MAAM,MAAM,GAAG,YAAY,CAAC,WAAW,CAAC,CAAC;YACzC,OAAO,CAAC,MAAM,IAAI,EAAE,CAAM,CAAC;QAC5B,CAAC;QAAC,MAAM,CAAC;YACR,4CAA4C;YAC5C,OAAO,EAAO,CAAC;QAChB,CAAC;IACF,CAAC;AACF,CAAC","sourcesContent":["import { parse as partialParse } from \"partial-json\";\n\n/**\n * Attempts to parse potentially incomplete JSON during streaming.\n * Always returns a valid object, even if the JSON is incomplete.\n *\n * @param partialJson The partial JSON string from streaming\n * @returns Parsed object or empty object if parsing fails\n */\nexport function parseStreamingJson<T = any>(partialJson: string | undefined): T {\n\tif (!partialJson || partialJson.trim() === \"\") {\n\t\treturn {} as T;\n\t}\n\n\t// Try standard parsing first (fastest for complete JSON)\n\ttry {\n\t\treturn JSON.parse(partialJson) as T;\n\t} catch {\n\t\t// Try partial-json for incomplete JSON\n\t\ttry {\n\t\t\tconst result = partialParse(partialJson);\n\t\t\treturn (result ?? {}) as T;\n\t\t} catch {\n\t\t\t// If all parsing fails, return empty object\n\t\t\treturn {} as T;\n\t\t}\n\t}\n}\n"]}
@@ -3074,7 +3074,7 @@ export declare const MODELS: {
3074
3074
  contextWindow: number;
3075
3075
  maxTokens: number;
3076
3076
  };
3077
- readonly "mistralai/mistral-7b-instruct-v0.3": {
3077
+ readonly "mistralai/mistral-7b-instruct:free": {
3078
3078
  id: string;
3079
3079
  name: string;
3080
3080
  api: "openai-completions";
@@ -3091,7 +3091,7 @@ export declare const MODELS: {
3091
3091
  contextWindow: number;
3092
3092
  maxTokens: number;
3093
3093
  };
3094
- readonly "mistralai/mistral-7b-instruct:free": {
3094
+ readonly "mistralai/mistral-7b-instruct": {
3095
3095
  id: string;
3096
3096
  name: string;
3097
3097
  api: "openai-completions";
@@ -3108,7 +3108,7 @@ export declare const MODELS: {
3108
3108
  contextWindow: number;
3109
3109
  maxTokens: number;
3110
3110
  };
3111
- readonly "mistralai/mistral-7b-instruct": {
3111
+ readonly "mistralai/mistral-7b-instruct-v0.3": {
3112
3112
  id: string;
3113
3113
  name: string;
3114
3114
  api: "openai-completions";
@@ -3159,7 +3159,7 @@ export declare const MODELS: {
3159
3159
  contextWindow: number;
3160
3160
  maxTokens: number;
3161
3161
  };
3162
- readonly "meta-llama/llama-3-70b-instruct": {
3162
+ readonly "meta-llama/llama-3-8b-instruct": {
3163
3163
  id: string;
3164
3164
  name: string;
3165
3165
  api: "openai-completions";
@@ -3176,7 +3176,7 @@ export declare const MODELS: {
3176
3176
  contextWindow: number;
3177
3177
  maxTokens: number;
3178
3178
  };
3179
- readonly "meta-llama/llama-3-8b-instruct": {
3179
+ readonly "meta-llama/llama-3-70b-instruct": {
3180
3180
  id: string;
3181
3181
  name: string;
3182
3182
  api: "openai-completions";
@@ -3295,7 +3295,7 @@ export declare const MODELS: {
3295
3295
  contextWindow: number;
3296
3296
  maxTokens: number;
3297
3297
  };
3298
- readonly "mistralai/mistral-tiny": {
3298
+ readonly "mistralai/mistral-small": {
3299
3299
  id: string;
3300
3300
  name: string;
3301
3301
  api: "openai-completions";
@@ -3312,7 +3312,7 @@ export declare const MODELS: {
3312
3312
  contextWindow: number;
3313
3313
  maxTokens: number;
3314
3314
  };
3315
- readonly "mistralai/mistral-small": {
3315
+ readonly "mistralai/mistral-tiny": {
3316
3316
  id: string;
3317
3317
  name: string;
3318
3318
  api: "openai-completions";
@@ -2711,13 +2711,13 @@ export const MODELS = {
2711
2711
  reasoning: false,
2712
2712
  input: ["text"],
2713
2713
  cost: {
2714
- input: 0.038000000000000006,
2715
- output: 0.12,
2714
+ input: 0.012,
2715
+ output: 0.036,
2716
2716
  cacheRead: 0,
2717
2717
  cacheWrite: 0,
2718
2718
  },
2719
2719
  contextWindow: 131072,
2720
- maxTokens: 16384,
2720
+ maxTokens: 8192,
2721
2721
  },
2722
2722
  "amazon/nova-lite-v1": {
2723
2723
  id: "amazon/nova-lite-v1",
@@ -3076,43 +3076,43 @@ export const MODELS = {
3076
3076
  contextWindow: 131072,
3077
3077
  maxTokens: 128000,
3078
3078
  },
3079
- "mistralai/mistral-7b-instruct-v0.3": {
3080
- id: "mistralai/mistral-7b-instruct-v0.3",
3081
- name: "Mistral: Mistral 7B Instruct v0.3",
3079
+ "mistralai/mistral-7b-instruct:free": {
3080
+ id: "mistralai/mistral-7b-instruct:free",
3081
+ name: "Mistral: Mistral 7B Instruct (free)",
3082
3082
  api: "openai-completions",
3083
3083
  provider: "openrouter",
3084
3084
  baseUrl: "https://openrouter.ai/api/v1",
3085
3085
  reasoning: false,
3086
3086
  input: ["text"],
3087
3087
  cost: {
3088
- input: 0.028,
3089
- output: 0.054,
3088
+ input: 0,
3089
+ output: 0,
3090
3090
  cacheRead: 0,
3091
3091
  cacheWrite: 0,
3092
3092
  },
3093
3093
  contextWindow: 32768,
3094
3094
  maxTokens: 16384,
3095
3095
  },
3096
- "mistralai/mistral-7b-instruct:free": {
3097
- id: "mistralai/mistral-7b-instruct:free",
3098
- name: "Mistral: Mistral 7B Instruct (free)",
3096
+ "mistralai/mistral-7b-instruct": {
3097
+ id: "mistralai/mistral-7b-instruct",
3098
+ name: "Mistral: Mistral 7B Instruct",
3099
3099
  api: "openai-completions",
3100
3100
  provider: "openrouter",
3101
3101
  baseUrl: "https://openrouter.ai/api/v1",
3102
3102
  reasoning: false,
3103
3103
  input: ["text"],
3104
3104
  cost: {
3105
- input: 0,
3106
- output: 0,
3105
+ input: 0.028,
3106
+ output: 0.054,
3107
3107
  cacheRead: 0,
3108
3108
  cacheWrite: 0,
3109
3109
  },
3110
3110
  contextWindow: 32768,
3111
3111
  maxTokens: 16384,
3112
3112
  },
3113
- "mistralai/mistral-7b-instruct": {
3114
- id: "mistralai/mistral-7b-instruct",
3115
- name: "Mistral: Mistral 7B Instruct",
3113
+ "mistralai/mistral-7b-instruct-v0.3": {
3114
+ id: "mistralai/mistral-7b-instruct-v0.3",
3115
+ name: "Mistral: Mistral 7B Instruct v0.3",
3116
3116
  api: "openai-completions",
3117
3117
  provider: "openrouter",
3118
3118
  baseUrl: "https://openrouter.ai/api/v1",
@@ -3161,34 +3161,34 @@ export const MODELS = {
3161
3161
  contextWindow: 128000,
3162
3162
  maxTokens: 4096,
3163
3163
  },
3164
- "meta-llama/llama-3-70b-instruct": {
3165
- id: "meta-llama/llama-3-70b-instruct",
3166
- name: "Meta: Llama 3 70B Instruct",
3164
+ "meta-llama/llama-3-8b-instruct": {
3165
+ id: "meta-llama/llama-3-8b-instruct",
3166
+ name: "Meta: Llama 3 8B Instruct",
3167
3167
  api: "openai-completions",
3168
3168
  provider: "openrouter",
3169
3169
  baseUrl: "https://openrouter.ai/api/v1",
3170
3170
  reasoning: false,
3171
3171
  input: ["text"],
3172
3172
  cost: {
3173
- input: 0.3,
3174
- output: 0.39999999999999997,
3173
+ input: 0.03,
3174
+ output: 0.06,
3175
3175
  cacheRead: 0,
3176
3176
  cacheWrite: 0,
3177
3177
  },
3178
3178
  contextWindow: 8192,
3179
3179
  maxTokens: 16384,
3180
3180
  },
3181
- "meta-llama/llama-3-8b-instruct": {
3182
- id: "meta-llama/llama-3-8b-instruct",
3183
- name: "Meta: Llama 3 8B Instruct",
3181
+ "meta-llama/llama-3-70b-instruct": {
3182
+ id: "meta-llama/llama-3-70b-instruct",
3183
+ name: "Meta: Llama 3 70B Instruct",
3184
3184
  api: "openai-completions",
3185
3185
  provider: "openrouter",
3186
3186
  baseUrl: "https://openrouter.ai/api/v1",
3187
3187
  reasoning: false,
3188
3188
  input: ["text"],
3189
3189
  cost: {
3190
- input: 0.03,
3191
- output: 0.06,
3190
+ input: 0.3,
3191
+ output: 0.39999999999999997,
3192
3192
  cacheRead: 0,
3193
3193
  cacheWrite: 0,
3194
3194
  },
@@ -3297,34 +3297,34 @@ export const MODELS = {
3297
3297
  contextWindow: 128000,
3298
3298
  maxTokens: 4096,
3299
3299
  },
3300
- "mistralai/mistral-tiny": {
3301
- id: "mistralai/mistral-tiny",
3302
- name: "Mistral Tiny",
3300
+ "mistralai/mistral-small": {
3301
+ id: "mistralai/mistral-small",
3302
+ name: "Mistral Small",
3303
3303
  api: "openai-completions",
3304
3304
  provider: "openrouter",
3305
3305
  baseUrl: "https://openrouter.ai/api/v1",
3306
3306
  reasoning: false,
3307
3307
  input: ["text"],
3308
3308
  cost: {
3309
- input: 0.25,
3310
- output: 0.25,
3309
+ input: 0.19999999999999998,
3310
+ output: 0.6,
3311
3311
  cacheRead: 0,
3312
3312
  cacheWrite: 0,
3313
3313
  },
3314
3314
  contextWindow: 32768,
3315
3315
  maxTokens: 4096,
3316
3316
  },
3317
- "mistralai/mistral-small": {
3318
- id: "mistralai/mistral-small",
3319
- name: "Mistral Small",
3317
+ "mistralai/mistral-tiny": {
3318
+ id: "mistralai/mistral-tiny",
3319
+ name: "Mistral Tiny",
3320
3320
  api: "openai-completions",
3321
3321
  provider: "openrouter",
3322
3322
  baseUrl: "https://openrouter.ai/api/v1",
3323
3323
  reasoning: false,
3324
3324
  input: ["text"],
3325
3325
  cost: {
3326
- input: 0.19999999999999998,
3327
- output: 0.6,
3326
+ input: 0.25,
3327
+ output: 0.25,
3328
3328
  cacheRead: 0,
3329
3329
  cacheWrite: 0,
3330
3330
  },