smoltalk 0.0.20 → 0.0.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,10 @@ export class BaseClient {
17
17
  async textSync(promptConfig) {
18
18
  const { continue: shouldContinue, newPromptConfig } = this.checkForToolLoops(promptConfig);
19
19
  if (!shouldContinue) {
20
- return { success: true, value: { output: null, toolCalls: [] } };
20
+ return {
21
+ success: true,
22
+ value: { output: null, toolCalls: [], model: this.config.model },
23
+ };
21
24
  }
22
25
  return this.textWithRetry(newPromptConfig, newPromptConfig.responseFormatOptions?.numRetries || DEFAULT_NUM_RETRIES);
23
26
  }
@@ -96,7 +99,14 @@ export class BaseClient {
96
99
  async *textStream(config) {
97
100
  const { continue: shouldContinue, newPromptConfig } = this.checkForToolLoops(config);
98
101
  if (!shouldContinue) {
99
- yield { type: "done", result: { output: null, toolCalls: [] } };
102
+ yield {
103
+ type: "done",
104
+ result: {
105
+ output: null,
106
+ toolCalls: [],
107
+ model: this.config.model,
108
+ },
109
+ };
100
110
  return;
101
111
  }
102
112
  yield* this._textStream(newPromptConfig);
@@ -87,7 +87,13 @@ export class SmolGoogle extends BaseClient {
87
87
  // Extract usage and calculate cost
88
88
  const { usage, cost } = this.calculateUsageAndCost(result.usageMetadata);
89
89
  // Return the response, updating the chat history
90
- return success({ output, toolCalls, usage, cost });
90
+ return success({
91
+ output,
92
+ toolCalls,
93
+ usage,
94
+ cost,
95
+ model: request.model,
96
+ });
91
97
  }
92
98
  async *_textStream(config) {
93
99
  const request = this.buildRequest(config);
@@ -134,7 +140,13 @@ export class SmolGoogle extends BaseClient {
134
140
  }
135
141
  yield {
136
142
  type: "done",
137
- result: { output: content || null, toolCalls, usage, cost },
143
+ result: {
144
+ output: content || null,
145
+ toolCalls,
146
+ usage,
147
+ cost,
148
+ model: request.model,
149
+ },
138
150
  };
139
151
  }
140
152
  }
@@ -84,7 +84,7 @@ export class SmolOllama extends BaseClient {
84
84
  // Extract usage and calculate cost
85
85
  const { usage, cost } = this.calculateUsageAndCost(result);
86
86
  // Return the response, updating the chat history
87
- return success({ output, toolCalls, usage, cost });
87
+ return success({ output, toolCalls, usage, cost, model: this.model });
88
88
  }
89
89
  async *_textStream(config) {
90
90
  const messages = config.messages.map((msg) => msg.toOpenAIMessage());
@@ -164,7 +164,7 @@ export class SmolOllama extends BaseClient {
164
164
  }
165
165
  yield {
166
166
  type: "done",
167
- result: { output: content || null, toolCalls, usage, cost },
167
+ result: { output: content || null, toolCalls, usage, cost, model: this.model },
168
168
  };
169
169
  }
170
170
  }
@@ -89,7 +89,13 @@ export class SmolOpenAi extends BaseClient {
89
89
  }
90
90
  // Extract usage and calculate cost
91
91
  const { usage, cost } = this.calculateUsageAndCost(completion.usage);
92
- return success({ output, toolCalls, usage, cost });
92
+ return success({
93
+ output,
94
+ toolCalls,
95
+ usage,
96
+ cost,
97
+ model: request.model,
98
+ });
93
99
  }
94
100
  async *_textStream(config) {
95
101
  const request = this.buildRequest(config);
@@ -148,7 +154,13 @@ export class SmolOpenAi extends BaseClient {
148
154
  }
149
155
  yield {
150
156
  type: "done",
151
- result: { output: content || null, toolCalls, usage, cost },
157
+ result: {
158
+ output: content || null,
159
+ toolCalls,
160
+ usage,
161
+ cost,
162
+ model: request.model,
163
+ },
152
164
  };
153
165
  }
154
166
  }
@@ -115,7 +115,13 @@ export class SmolOpenAiResponses extends BaseClient {
115
115
  }
116
116
  }
117
117
  const { usage, cost } = this.calculateUsageAndCost(response.usage);
118
- return success({ output, toolCalls, usage, cost });
118
+ return success({
119
+ output,
120
+ toolCalls,
121
+ usage,
122
+ cost,
123
+ model: request.model,
124
+ });
119
125
  }
120
126
  async *_textStream(config) {
121
127
  const request = this.buildRequest(config);
@@ -195,7 +201,13 @@ export class SmolOpenAiResponses extends BaseClient {
195
201
  }
196
202
  yield {
197
203
  type: "done",
198
- result: { output: content || null, toolCalls, usage, cost },
204
+ result: {
205
+ output: content || null,
206
+ toolCalls,
207
+ usage,
208
+ cost,
209
+ model: request.model,
210
+ },
199
211
  };
200
212
  }
201
213
  }
package/dist/types.d.ts CHANGED
@@ -64,6 +64,7 @@ export type PromptResult = {
64
64
  toolCalls: ToolCall[];
65
65
  usage?: TokenUsage;
66
66
  cost?: CostEstimate;
67
+ model?: ModelName | ModelConfig;
67
68
  };
68
69
  export type StreamChunk = {
69
70
  type: "text";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "smoltalk",
3
- "version": "0.0.20",
3
+ "version": "0.0.21",
4
4
  "description": "A common interface for LLM APIs",
5
5
  "homepage": "https://github.com/egonSchiele/smoltalk",
6
6
  "scripts": {