@mariozechner/pi-ai 0.5.15 → 0.5.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/README.md +79 -27
  2. package/package.json +1 -1
package/README.md CHANGED
@@ -4,6 +4,22 @@ Unified LLM API with automatic model discovery, provider configuration, token an
4
4
 
5
5
  **Note**: This library only includes models that support tool calling (function calling), as this is essential for agentic workflows.
6
6
 
7
+ ## API Changes in v0.5.15+
8
+
9
+ The `AssistantMessage` response structure has been updated to support multiple content blocks of different types. Instead of separate fields for `text`, `thinking`, and `toolCalls`, responses now have a unified `content` array that can contain multiple blocks of each type in any order.
10
+
11
+ ```typescript
12
+ // Old API (pre-0.5.15)
13
+ response.text // single text string
14
+ response.thinking // single thinking string
15
+ response.toolCalls // array of tool calls
16
+
17
+ // New API (0.5.15+)
18
+ response.content // array of TextContent | ThinkingContent | ToolCall blocks
19
+ ```
20
+
21
+ This change allows models to return multiple thinking and text blocks, which is especially useful for complex reasoning tasks.
22
+
7
23
  ## Supported Providers
8
24
 
9
25
  - **OpenAI**
@@ -26,13 +42,18 @@ npm install @mariozechner/pi-ai
26
42
  ```typescript
27
43
  import { createLLM } from '@mariozechner/pi-ai';
28
44
 
29
- const llm = createLLM('openai', 'gpt-5-mini');
45
+ const llm = createLLM('openai', 'gpt-4o-mini');
30
46
 
31
47
  const response = await llm.complete({
32
48
  messages: [{ role: 'user', content: 'Hello!' }]
33
49
  });
34
50
 
35
- console.log(response.content);
51
+ // response.content is now an array of content blocks
52
+ for (const block of response.content) {
53
+ if (block.type === 'text') {
54
+ console.log(block.text);
55
+ }
56
+ }
36
57
  ```
37
58
 
38
59
  ## Image Input
@@ -75,24 +96,34 @@ messages.push({ role: 'user', content: 'What is the weather in Paris?' });
75
96
  const response = await llm.complete({ messages, tools });
76
97
  messages.push(response);
77
98
 
78
- if (response.toolCalls) {
79
- for (const call of response.toolCalls) {
80
- // Call your actual function
81
- const result = await getWeather(call.arguments.location);
82
-
83
- // Add tool result to context
84
- messages.push({
85
- role: 'toolResult',
86
- content: JSON.stringify(result),
87
- toolCallId: call.id,
88
- isError: false
89
- });
90
- }
99
+ // Check for tool calls in the content blocks
100
+ const toolCalls = response.content.filter(block => block.type === 'toolCall');
101
+
102
+ for (const call of toolCalls) {
103
+ // Call your actual function
104
+ const result = await getWeather(call.arguments.location);
105
+
106
+ // Add tool result to context
107
+ messages.push({
108
+ role: 'toolResult',
109
+ content: JSON.stringify(result),
110
+ toolCallId: call.id,
111
+ toolName: call.name,
112
+ isError: false
113
+ });
114
+ }
91
115
 
116
+ if (toolCalls.length > 0) {
92
117
  // Continue conversation with tool results
93
118
  const followUp = await llm.complete({ messages, tools });
94
119
  messages.push(followUp);
95
- console.log(followUp.content);
120
+
121
+ // Print text blocks from the response
122
+ for (const block of followUp.content) {
123
+ if (block.type === 'text') {
124
+ console.log(block.text);
125
+ }
126
+ }
96
127
  }
97
128
  ```
98
129
 
@@ -102,13 +133,30 @@ if (response.toolCalls) {
102
133
  const response = await llm.complete({
103
134
  messages: [{ role: 'user', content: 'Write a story' }]
104
135
  }, {
105
- onText: (chunk, complete) => {
106
- process.stdout.write(chunk);
107
- if (complete) console.log('\n[Text streaming complete]');
108
- },
109
- onThinking: (chunk, complete) => {
110
- process.stderr.write(chunk);
111
- if (complete) console.error('\n[Thinking complete]');
136
+ onEvent: (event) => {
137
+ switch (event.type) {
138
+ case 'text_start':
139
+ console.log('[Starting text block]');
140
+ break;
141
+ case 'text_delta':
142
+ process.stdout.write(event.delta);
143
+ break;
144
+ case 'text_end':
145
+ console.log('\n[Text block complete]');
146
+ break;
147
+ case 'thinking_start':
148
+ console.error('[Starting thinking]');
149
+ break;
150
+ case 'thinking_delta':
151
+ process.stderr.write(event.delta);
152
+ break;
153
+ case 'thinking_end':
154
+ console.error('\n[Thinking complete]');
155
+ break;
156
+ case 'toolCall':
157
+ console.log('Tool called:', event.toolCall.name);
158
+ break;
159
+ }
112
160
  }
113
161
  });
114
162
  ```
@@ -126,7 +174,11 @@ try {
126
174
  messages: [{ role: 'user', content: 'Write a long story' }]
127
175
  }, {
128
176
  signal: controller.signal,
129
- onText: (chunk) => process.stdout.write(chunk)
177
+ onEvent: (event) => {
178
+ if (event.type === 'text_delta') {
179
+ process.stdout.write(event.delta);
180
+ }
181
+ }
130
182
  });
131
183
  } catch (error) {
132
184
  if (error.name === 'AbortError') {
@@ -139,7 +191,7 @@ try {
139
191
 
140
192
  ### OpenAI Reasoning (o1, o3)
141
193
  ```typescript
142
- const llm = createLLM('openai', 'gpt-5-mini');
194
+ const llm = createLLM('openai', 'o1-mini');
143
195
 
144
196
  await llm.complete(context, {
145
197
  reasoningEffort: 'medium' // 'minimal' | 'low' | 'medium' | 'high'
@@ -148,7 +200,7 @@ await llm.complete(context, {
148
200
 
149
201
  ### Anthropic Thinking
150
202
  ```typescript
151
- const llm = createLLM('anthropic', 'claude-sonnet-4-0');
203
+ const llm = createLLM('anthropic', 'claude-3-5-sonnet-20241022');
152
204
 
153
205
  await llm.complete(context, {
154
206
  thinking: {
@@ -160,7 +212,7 @@ await llm.complete(context, {
160
212
 
161
213
  ### Google Gemini Thinking
162
214
  ```typescript
163
- const llm = createLLM('google', 'gemini-2.5-flash');
215
+ const llm = createLLM('google', 'gemini-2.0-flash-thinking-exp');
164
216
 
165
217
  await llm.complete(context, {
166
218
  thinking: { enabled: true }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mariozechner/pi-ai",
3
- "version": "0.5.15",
3
+ "version": "0.5.16",
4
4
  "description": "Unified LLM API with automatic model discovery and provider configuration",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",