@friendliai/ai-provider 0.2.5 → 0.2.7-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -47,7 +47,8 @@ var import_zod2 = require("zod");
47
47
  var import_zod = require("zod");
48
48
  var import_provider_utils = require("@ai-sdk/provider-utils");
49
49
  var friendliaiErrorSchema = import_zod.z.object({
50
- message: import_zod.z.string()
50
+ message: import_zod.z.string(),
51
+ error: import_zod.z.record(import_zod.z.any())
51
52
  });
52
53
  var friendliaiErrorStructure = {
53
54
  errorSchema: friendliaiErrorSchema,
@@ -60,70 +61,51 @@ var friendliaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorR
60
61
  // src/friendli-prepare-tools.ts
61
62
  var import_provider = require("@ai-sdk/provider");
62
63
  function prepareTools({
63
- mode,
64
- tools: hostedTools
64
+ tools,
65
+ toolChoice
65
66
  }) {
66
- var _a;
67
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
67
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
68
68
  const toolWarnings = [];
69
- if (tools == null && hostedTools == null) {
70
- return { tools: void 0, tool_choice: void 0, toolWarnings };
69
+ if (tools == null) {
70
+ return { tools: void 0, toolChoice: void 0, toolWarnings };
71
71
  }
72
- const toolChoice = mode.toolChoice;
73
- const mappedTools = [];
74
- if (tools) {
75
- for (const tool of tools) {
76
- if (tool.type === "provider-defined") {
77
- toolWarnings.push({ type: "unsupported-tool", tool });
78
- } else {
79
- mappedTools.push({
80
- type: "function",
81
- function: {
82
- name: tool.name,
83
- description: tool.description,
84
- parameters: tool.parameters
85
- }
86
- });
87
- }
72
+ const openaiCompatTools = [];
73
+ for (const tool of tools) {
74
+ if (tool.type === "provider-defined") {
75
+ toolWarnings.push({ type: "unsupported-tool", tool });
76
+ } else {
77
+ openaiCompatTools.push({
78
+ type: "function",
79
+ function: {
80
+ name: tool.name,
81
+ description: tool.description,
82
+ parameters: tool.parameters
83
+ }
84
+ });
88
85
  }
89
86
  }
90
- const mappedHostedTools = hostedTools == null ? void 0 : hostedTools.map((tool) => {
91
- return {
92
- type: tool.type
93
- };
94
- });
95
87
  if (toolChoice == null) {
96
- return {
97
- tools: [...mappedTools != null ? mappedTools : [], ...mappedHostedTools != null ? mappedHostedTools : []],
98
- tool_choice: void 0,
99
- toolWarnings
100
- };
88
+ return { tools: openaiCompatTools, toolChoice: void 0, toolWarnings };
101
89
  }
102
90
  const type = toolChoice.type;
103
91
  switch (type) {
104
92
  case "auto":
105
93
  case "none":
106
94
  case "required":
107
- return {
108
- tools: [...mappedTools != null ? mappedTools : [], ...mappedHostedTools != null ? mappedHostedTools : []],
109
- tool_choice: type,
110
- toolWarnings
111
- };
95
+ return { tools: openaiCompatTools, toolChoice: type, toolWarnings };
112
96
  case "tool":
113
97
  return {
114
- tools: [...mappedTools != null ? mappedTools : [], ...mappedHostedTools != null ? mappedHostedTools : []],
115
- tool_choice: {
98
+ tools: openaiCompatTools,
99
+ toolChoice: {
116
100
  type: "function",
117
- function: {
118
- name: toolChoice.toolName
119
- }
101
+ function: { name: toolChoice.toolName }
120
102
  },
121
103
  toolWarnings
122
104
  };
123
105
  default: {
124
106
  const _exhaustiveCheck = type;
125
107
  throw new import_provider.UnsupportedFunctionalityError({
126
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
108
+ functionality: `tool choice type: ${_exhaustiveCheck}`
127
109
  });
128
110
  }
129
111
  }
@@ -131,39 +113,48 @@ function prepareTools({
131
113
 
132
114
  // src/friendli-chat-language-model.ts
133
115
  var FriendliAIChatLanguageModel = class {
134
- constructor(modelId, settings, config) {
135
- this.specificationVersion = "v1";
116
+ // type inferred via constructor
117
+ constructor(modelId, config) {
118
+ this.specificationVersion = "v2";
136
119
  var _a;
137
120
  this.modelId = modelId;
138
- this.settings = settings;
139
121
  this.config = config;
122
+ const errorStructure = friendliaiErrorStructure;
123
+ this.chunkSchema = createOpenAICompatibleChatChunkSchema(
124
+ errorStructure.errorSchema
125
+ );
140
126
  this.failedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)(
141
127
  friendliaiErrorStructure
142
128
  );
143
129
  this.supportsStructuredOutputs = (_a = config.supportsStructuredOutputs) != null ? _a : true;
144
130
  }
145
- get defaultObjectGenerationMode() {
146
- var _a;
147
- return (_a = this.config.defaultObjectGenerationMode) != null ? _a : "json";
148
- }
149
131
  get provider() {
150
132
  return this.config.provider;
151
133
  }
152
- getArgs({
153
- mode,
134
+ get supportedUrls() {
135
+ var _a, _b, _c;
136
+ return (_c = (_b = (_a = this.config).supportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
137
+ }
138
+ async getArgs({
154
139
  prompt,
155
- maxTokens,
140
+ maxOutputTokens,
156
141
  temperature,
157
142
  topP,
158
143
  topK,
159
144
  frequencyPenalty,
160
145
  presencePenalty,
146
+ // providerOptions,
161
147
  stopSequences,
162
148
  responseFormat,
163
- seed
149
+ seed,
150
+ toolChoice,
151
+ tools
164
152
  }) {
165
- const type = mode.type;
153
+ var _a;
166
154
  const warnings = [];
155
+ if (topK != null) {
156
+ warnings.push({ type: "unsupported-setting", setting: "topK" });
157
+ }
167
158
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !this.supportsStructuredOutputs) {
168
159
  warnings.push({
169
160
  type: "unsupported-setting",
@@ -171,220 +162,173 @@ var FriendliAIChatLanguageModel = class {
171
162
  details: "JSON response format schema is only supported with structuredOutputs"
172
163
  });
173
164
  }
174
- const baseArgs = {
175
- // model id:
176
- model: this.modelId,
177
- // model specific settings:
178
- user: this.settings.user,
179
- parallel_tool_calls: this.settings.parallelToolCalls,
180
- // standardized settings:
181
- max_tokens: maxTokens,
182
- temperature,
183
- top_p: topP,
184
- top_k: topK,
185
- frequency_penalty: frequencyPenalty,
186
- presence_penalty: presencePenalty,
187
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
188
- type: "json_schema",
189
- json_schema: {
190
- schema: responseFormat.schema,
191
- description: responseFormat.description
192
- }
193
- } : { type: "json_object" } : void 0,
194
- stop: stopSequences,
195
- seed,
196
- // messages:
197
- messages: (0, import_internal.convertToOpenAICompatibleChatMessages)(prompt)
198
- };
199
- if (this.settings.regex != null && type !== "regular") {
200
- throw new import_provider2.UnsupportedFunctionalityError({
201
- functionality: "egular expression is only supported with regular mode (generateText, streamText)"
202
- });
203
- }
204
- switch (type) {
205
- case "regular": {
206
- if (this.settings.regex != null) {
207
- if (this.settings.tools != null || mode.tools != null) {
208
- throw new import_provider2.UnsupportedFunctionalityError({
209
- functionality: "Regular expression and tools cannot be used together. Use either regular expression or tools."
210
- });
165
+ const {
166
+ tools: openaiTools,
167
+ toolChoice: openaiToolChoice,
168
+ toolWarnings
169
+ } = prepareTools({
170
+ tools,
171
+ toolChoice
172
+ });
173
+ return {
174
+ args: {
175
+ // model id:
176
+ model: this.modelId,
177
+ // model specific settings:
178
+ // user: compatibleOptions.user,
179
+ // standardized settings:
180
+ max_tokens: maxOutputTokens,
181
+ temperature,
182
+ top_p: topP,
183
+ frequency_penalty: frequencyPenalty,
184
+ presence_penalty: presencePenalty,
185
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? this.supportsStructuredOutputs === true && responseFormat.schema != null ? {
186
+ type: "json_schema",
187
+ json_schema: {
188
+ schema: responseFormat.schema,
189
+ name: (_a = responseFormat.name) != null ? _a : "response",
190
+ description: responseFormat.description
211
191
  }
212
- return {
213
- args: {
214
- ...baseArgs,
215
- response_format: {
216
- type: "regex",
217
- schema: this.settings.regex.source
218
- }
219
- },
220
- warnings
221
- };
222
- }
223
- const { tools, tool_choice, toolWarnings } = prepareTools({
224
- mode,
225
- tools: this.settings.tools
226
- });
227
- return {
228
- args: { ...baseArgs, tools, tool_choice },
229
- warnings: [...warnings, ...toolWarnings]
230
- };
231
- }
232
- case "object-json": {
233
- return {
234
- args: {
235
- ...baseArgs,
236
- response_format: this.supportsStructuredOutputs === true && mode.schema != null ? {
237
- type: "json_schema",
238
- json_schema: {
239
- schema: mode.schema,
240
- description: mode.description
241
- }
242
- } : { type: "json_object" }
243
- },
244
- warnings
245
- };
246
- }
247
- case "object-tool": {
248
- return {
249
- args: {
250
- ...baseArgs,
251
- tool_choice: {
252
- type: "function",
253
- function: { name: mode.tool.name }
254
- },
255
- tools: [
256
- {
257
- type: "function",
258
- function: {
259
- name: mode.tool.name,
260
- description: mode.tool.description,
261
- parameters: mode.tool.parameters
262
- }
263
- }
264
- ]
265
- },
266
- warnings
267
- };
268
- }
269
- default: {
270
- const _exhaustiveCheck = type;
271
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
272
- }
273
- }
192
+ } : { type: "json_object" } : void 0,
193
+ stop: stopSequences,
194
+ seed,
195
+ // ...providerOptions?.[this.providerOptionsName],
196
+ // reasoning_effort: compatibleOptions.reasoningEffort,
197
+ // messages:
198
+ messages: (0, import_internal.convertToOpenAICompatibleChatMessages)(prompt),
199
+ // tools:
200
+ tools: openaiTools,
201
+ tool_choice: openaiToolChoice
202
+ },
203
+ warnings: [...warnings, ...toolWarnings]
204
+ };
274
205
  }
275
206
  async doGenerate(options) {
276
- var _a, _b, _c, _d, _e, _f;
277
- const { args, warnings } = this.getArgs({ ...options });
278
- const body = JSON.stringify({ ...args, stream: false });
279
- const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
207
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
208
+ const { args, warnings } = await this.getArgs({ ...options });
209
+ const body = JSON.stringify(args);
210
+ const {
211
+ responseHeaders,
212
+ value: responseBody,
213
+ rawValue: rawResponse
214
+ } = await (0, import_provider_utils2.postJsonToApi)({
280
215
  url: this.config.url({
281
216
  path: "/chat/completions",
282
217
  modelId: this.modelId
283
218
  }),
284
219
  headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
285
- body: {
286
- ...args,
287
- stream: false
288
- },
220
+ body: args,
289
221
  failedResponseHandler: this.failedResponseHandler,
290
222
  successfulResponseHandler: (0, import_provider_utils2.createJsonResponseHandler)(
291
- friendliAIChatResponseSchema
223
+ OpenAICompatibleChatResponseSchema
292
224
  ),
293
225
  abortSignal: options.abortSignal,
294
226
  fetch: this.config.fetch
295
227
  });
296
- const { messages: rawPrompt, ...rawSettings } = args;
297
- const choice = response.choices[0];
298
- return {
299
- text: (_a = choice.message.content) != null ? _a : void 0,
300
- toolCalls: (_b = choice.message.tool_calls) == null ? void 0 : _b.map((toolCall) => {
301
- var _a2;
302
- return {
228
+ const choice = responseBody.choices[0];
229
+ const content = [];
230
+ const text = choice.message.content;
231
+ if (text != null && text.length > 0) {
232
+ content.push({ type: "text", text });
233
+ }
234
+ const reasoning = choice.message.reasoning_content;
235
+ if (reasoning != null && reasoning.length > 0) {
236
+ content.push({
237
+ type: "reasoning",
238
+ text: reasoning
239
+ });
240
+ }
241
+ if (choice.message.tool_calls != null) {
242
+ for (const toolCall of choice.message.tool_calls) {
243
+ content.push({
244
+ type: "tool-call",
303
245
  toolCallType: "function",
304
- toolCallId: (_a2 = toolCall.id) != null ? _a2 : (0, import_provider_utils2.generateId)(),
246
+ toolCallId: (_a = toolCall.id) != null ? _a : (0, import_provider_utils2.generateId)(),
305
247
  toolName: toolCall.function.name,
306
- args: typeof toolCall.function.arguments === "string" ? toolCall.function.arguments : JSON.stringify(toolCall.function.arguments)
307
- };
308
- }),
248
+ args: toolCall.function.arguments
249
+ });
250
+ }
251
+ }
252
+ return {
253
+ content,
309
254
  finishReason: (0, import_internal.mapOpenAICompatibleFinishReason)(choice.finish_reason),
310
255
  usage: {
311
- promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : NaN,
312
- completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : NaN
256
+ inputTokens: (_c = (_b = responseBody.usage) == null ? void 0 : _b.prompt_tokens) != null ? _c : void 0,
257
+ outputTokens: (_e = (_d = responseBody.usage) == null ? void 0 : _d.completion_tokens) != null ? _e : void 0,
258
+ totalTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.total_tokens) != null ? _g : void 0,
259
+ reasoningTokens: (_j = (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens_details) == null ? void 0 : _i.reasoning_tokens) != null ? _j : void 0,
260
+ cachedInputTokens: (_m = (_l = (_k = responseBody.usage) == null ? void 0 : _k.prompt_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0
313
261
  },
314
- rawCall: { rawPrompt, rawSettings },
315
- rawResponse: { headers: responseHeaders },
316
- response: (0, import_internal.getResponseMetadata)(response),
317
- warnings,
318
- request: { body }
262
+ // providerMetadata,
263
+ request: { body },
264
+ response: {
265
+ ...(0, import_internal.getResponseMetadata)(responseBody),
266
+ headers: responseHeaders,
267
+ body: rawResponse
268
+ },
269
+ warnings
319
270
  };
320
271
  }
321
272
  async doStream(options) {
322
- const { args, warnings } = this.getArgs({ ...options });
323
- const body = JSON.stringify({ ...args, stream: true });
273
+ var _a;
274
+ const { args, warnings } = await this.getArgs({ ...options });
275
+ const body = {
276
+ ...args,
277
+ stream: true,
278
+ // only include stream_options when in strict compatibility mode:
279
+ stream_options: this.config.includeUsage ? { include_usage: true } : void 0
280
+ };
281
+ const metadataExtractor = (_a = this.config.metadataExtractor) == null ? void 0 : _a.createStreamExtractor();
324
282
  const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
325
283
  url: this.config.url({
326
284
  path: "/chat/completions",
327
285
  modelId: this.modelId
328
286
  }),
329
287
  headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
330
- body: {
331
- ...args,
332
- stream: true,
333
- stream_options: { include_usage: true }
334
- },
335
- failedResponseHandler: friendliaiFailedResponseHandler,
288
+ body,
289
+ failedResponseHandler: this.failedResponseHandler,
336
290
  successfulResponseHandler: (0, import_provider_utils2.createEventSourceResponseHandler)(
337
- friendliaiChatChunkSchema
291
+ this.chunkSchema
338
292
  ),
339
293
  abortSignal: options.abortSignal,
340
294
  fetch: this.config.fetch
341
295
  });
342
- const { messages: rawPrompt, ...rawSettings } = args;
343
296
  const toolCalls = [];
344
297
  let finishReason = "unknown";
345
- let usage = {
298
+ const usage = {
299
+ completionTokens: void 0,
300
+ completionTokensDetails: {
301
+ reasoningTokens: void 0,
302
+ acceptedPredictionTokens: void 0,
303
+ rejectedPredictionTokens: void 0
304
+ },
346
305
  promptTokens: void 0,
347
- completionTokens: void 0
306
+ promptTokensDetails: {
307
+ cachedTokens: void 0
308
+ },
309
+ totalTokens: void 0
348
310
  };
349
311
  let isFirstChunk = true;
350
- let providerMetadata;
312
+ const providerOptionsName = "friendliai";
351
313
  return {
352
314
  stream: response.pipeThrough(
353
315
  new TransformStream({
316
+ start(controller) {
317
+ controller.enqueue({ type: "stream-start", warnings });
318
+ },
319
+ // TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
354
320
  transform(chunk, controller) {
355
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
321
+ var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
356
322
  if (!chunk.success) {
357
323
  finishReason = "error";
358
324
  controller.enqueue({ type: "error", error: chunk.error });
359
325
  return;
360
326
  }
361
327
  const value = chunk.value;
362
- if ("status" in value) {
363
- switch (value.status) {
364
- case "STARTED":
365
- break;
366
- case "UPDATING":
367
- break;
368
- case "ENDED":
369
- break;
370
- case "ERRORED":
371
- finishReason = "error";
372
- break;
373
- default:
374
- finishReason = "error";
375
- controller.enqueue({
376
- type: "error",
377
- error: new Error(
378
- `Unsupported tool call status: ${value.status}`
379
- )
380
- });
381
- }
382
- return;
383
- }
384
- if ("message" in value) {
385
- console.error("Error chunk:", value);
328
+ metadataExtractor == null ? void 0 : metadataExtractor.processChunk(chunk.rawValue);
329
+ if ("error" in value) {
386
330
  finishReason = "error";
387
- controller.enqueue({ type: "error", error: value.message });
331
+ controller.enqueue({ type: "error", error: value.error.message });
388
332
  return;
389
333
  }
390
334
  if (isFirstChunk) {
@@ -395,10 +339,28 @@ var FriendliAIChatLanguageModel = class {
395
339
  });
396
340
  }
397
341
  if (value.usage != null) {
398
- usage = {
399
- promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
400
- completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
401
- };
342
+ const {
343
+ prompt_tokens,
344
+ completion_tokens,
345
+ total_tokens,
346
+ prompt_tokens_details,
347
+ completion_tokens_details
348
+ } = value.usage;
349
+ usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
350
+ usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
351
+ usage.totalTokens = total_tokens != null ? total_tokens : void 0;
352
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens) != null) {
353
+ usage.completionTokensDetails.reasoningTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.reasoning_tokens;
354
+ }
355
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens) != null) {
356
+ usage.completionTokensDetails.acceptedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.accepted_prediction_tokens;
357
+ }
358
+ if ((completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens) != null) {
359
+ usage.completionTokensDetails.rejectedPredictionTokens = completion_tokens_details == null ? void 0 : completion_tokens_details.rejected_prediction_tokens;
360
+ }
361
+ if ((prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens) != null) {
362
+ usage.promptTokensDetails.cachedTokens = prompt_tokens_details == null ? void 0 : prompt_tokens_details.cached_tokens;
363
+ }
402
364
  }
403
365
  const choice = value.choices[0];
404
366
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -410,10 +372,16 @@ var FriendliAIChatLanguageModel = class {
410
372
  return;
411
373
  }
412
374
  const delta = choice.delta;
375
+ if (delta.reasoning_content != null) {
376
+ controller.enqueue({
377
+ type: "reasoning",
378
+ text: delta.reasoning_content
379
+ });
380
+ }
413
381
  if (delta.content != null) {
414
382
  controller.enqueue({
415
- type: "text-delta",
416
- textDelta: delta.content
383
+ type: "text",
384
+ text: delta.content
417
385
  });
418
386
  }
419
387
  if (delta.tool_calls != null) {
@@ -432,7 +400,7 @@ var FriendliAIChatLanguageModel = class {
432
400
  message: `Expected 'id' to be a string.`
433
401
  });
434
402
  }
435
- if (((_c = toolCallDelta.function) == null ? void 0 : _c.name) == null) {
403
+ if (((_a2 = toolCallDelta.function) == null ? void 0 : _a2.name) == null) {
436
404
  throw new import_provider2.InvalidResponseDataError({
437
405
  data: toolCallDelta,
438
406
  message: `Expected 'function.name' to be a string.`
@@ -443,11 +411,12 @@ var FriendliAIChatLanguageModel = class {
443
411
  type: "function",
444
412
  function: {
445
413
  name: toolCallDelta.function.name,
446
- arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
447
- }
414
+ arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
415
+ },
416
+ hasFinished: false
448
417
  };
449
418
  const toolCall2 = toolCalls[index];
450
- if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null) {
419
+ if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null) {
451
420
  if (toolCall2.function.arguments.length > 0) {
452
421
  controller.enqueue({
453
422
  type: "tool-call-delta",
@@ -461,55 +430,71 @@ var FriendliAIChatLanguageModel = class {
461
430
  controller.enqueue({
462
431
  type: "tool-call",
463
432
  toolCallType: "function",
464
- toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils2.generateId)(),
433
+ toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils2.generateId)(),
465
434
  toolName: toolCall2.function.name,
466
435
  args: toolCall2.function.arguments
467
436
  });
437
+ toolCall2.hasFinished = true;
468
438
  }
469
439
  }
470
440
  continue;
471
441
  }
472
442
  const toolCall = toolCalls[index];
473
- if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
474
- toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
443
+ if (toolCall.hasFinished) {
444
+ continue;
445
+ }
446
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
447
+ toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
475
448
  }
476
449
  controller.enqueue({
477
450
  type: "tool-call-delta",
478
451
  toolCallType: "function",
479
452
  toolCallId: toolCall.id,
480
453
  toolName: toolCall.function.name,
481
- argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
454
+ argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
482
455
  });
483
- if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
456
+ if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils2.isParsableJson)(toolCall.function.arguments)) {
484
457
  controller.enqueue({
485
458
  type: "tool-call",
486
459
  toolCallType: "function",
487
- toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils2.generateId)(),
460
+ toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils2.generateId)(),
488
461
  toolName: toolCall.function.name,
489
462
  args: toolCall.function.arguments
490
463
  });
464
+ toolCall.hasFinished = true;
491
465
  }
492
466
  }
493
467
  }
494
468
  },
495
469
  flush(controller) {
496
- var _a, _b;
470
+ var _a2, _b, _c, _d, _e;
471
+ const providerMetadata = {
472
+ [providerOptionsName]: {},
473
+ ...metadataExtractor == null ? void 0 : metadataExtractor.buildMetadata()
474
+ };
475
+ if (usage.completionTokensDetails.acceptedPredictionTokens != null) {
476
+ providerMetadata[providerOptionsName].acceptedPredictionTokens = usage.completionTokensDetails.acceptedPredictionTokens;
477
+ }
478
+ if (usage.completionTokensDetails.rejectedPredictionTokens != null) {
479
+ providerMetadata[providerOptionsName].rejectedPredictionTokens = usage.completionTokensDetails.rejectedPredictionTokens;
480
+ }
497
481
  controller.enqueue({
498
482
  type: "finish",
499
483
  finishReason,
500
484
  usage: {
501
- promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
502
- completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
485
+ inputTokens: (_a2 = usage.promptTokens) != null ? _a2 : void 0,
486
+ outputTokens: (_b = usage.completionTokens) != null ? _b : void 0,
487
+ totalTokens: (_c = usage.totalTokens) != null ? _c : void 0,
488
+ reasoningTokens: (_d = usage.completionTokensDetails.reasoningTokens) != null ? _d : void 0,
489
+ cachedInputTokens: (_e = usage.promptTokensDetails.cachedTokens) != null ? _e : void 0
503
490
  },
504
- ...providerMetadata != null ? { providerMetadata } : {}
491
+ providerMetadata
505
492
  });
506
493
  }
507
494
  })
508
495
  ),
509
- rawCall: { rawPrompt, rawSettings },
510
- rawResponse: { headers: responseHeaders },
511
- warnings,
512
- request: { body }
496
+ request: { body },
497
+ response: { headers: responseHeaders }
513
498
  };
514
499
  }
515
500
  };
@@ -593,6 +578,75 @@ var friendliaiChatChunkSchema = import_zod2.z.union([
593
578
  }),
594
579
  friendliaiErrorSchema
595
580
  ]);
581
+ var openaiCompatibleTokenUsageSchema = import_zod2.z.object({
582
+ prompt_tokens: import_zod2.z.number().nullish(),
583
+ completion_tokens: import_zod2.z.number().nullish(),
584
+ total_tokens: import_zod2.z.number().nullish(),
585
+ prompt_tokens_details: import_zod2.z.object({
586
+ cached_tokens: import_zod2.z.number().nullish()
587
+ }).nullish(),
588
+ completion_tokens_details: import_zod2.z.object({
589
+ reasoning_tokens: import_zod2.z.number().nullish(),
590
+ accepted_prediction_tokens: import_zod2.z.number().nullish(),
591
+ rejected_prediction_tokens: import_zod2.z.number().nullish()
592
+ }).nullish()
593
+ }).nullish();
594
+ var OpenAICompatibleChatResponseSchema = import_zod2.z.object({
595
+ id: import_zod2.z.string().nullish(),
596
+ created: import_zod2.z.number().nullish(),
597
+ model: import_zod2.z.string().nullish(),
598
+ choices: import_zod2.z.array(
599
+ import_zod2.z.object({
600
+ message: import_zod2.z.object({
601
+ role: import_zod2.z.literal("assistant").nullish(),
602
+ content: import_zod2.z.string().nullish(),
603
+ reasoning_content: import_zod2.z.string().nullish(),
604
+ tool_calls: import_zod2.z.array(
605
+ import_zod2.z.object({
606
+ id: import_zod2.z.string().nullish(),
607
+ type: import_zod2.z.literal("function"),
608
+ function: import_zod2.z.object({
609
+ name: import_zod2.z.string(),
610
+ arguments: import_zod2.z.string()
611
+ })
612
+ })
613
+ ).nullish()
614
+ }),
615
+ finish_reason: import_zod2.z.string().nullish()
616
+ })
617
+ ),
618
+ usage: openaiCompatibleTokenUsageSchema
619
+ });
620
+ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_zod2.z.union([
621
+ import_zod2.z.object({
622
+ id: import_zod2.z.string().nullish(),
623
+ created: import_zod2.z.number().nullish(),
624
+ model: import_zod2.z.string().nullish(),
625
+ choices: import_zod2.z.array(
626
+ import_zod2.z.object({
627
+ delta: import_zod2.z.object({
628
+ role: import_zod2.z.enum(["assistant"]).nullish(),
629
+ content: import_zod2.z.string().nullish(),
630
+ reasoning_content: import_zod2.z.string().nullish(),
631
+ tool_calls: import_zod2.z.array(
632
+ import_zod2.z.object({
633
+ index: import_zod2.z.number(),
634
+ id: import_zod2.z.string().nullish(),
635
+ type: import_zod2.z.literal("function").nullish(),
636
+ function: import_zod2.z.object({
637
+ name: import_zod2.z.string().nullish(),
638
+ arguments: import_zod2.z.string().nullish()
639
+ })
640
+ })
641
+ ).nullish()
642
+ }).nullish(),
643
+ finish_reason: import_zod2.z.string().nullish()
644
+ })
645
+ ),
646
+ usage: openaiCompatibleTokenUsageSchema
647
+ }),
648
+ errorSchema
649
+ ]);
596
650
 
597
651
  // src/friendli-provider.ts
598
652
  function createFriendli(options = {}) {
@@ -645,28 +699,29 @@ function createFriendli(options = {}) {
645
699
  };
646
700
  }
647
701
  };
648
- const createChatModel = (modelId, settings = {}) => {
702
+ const createChatModel = (modelId) => {
649
703
  const { baseURL, type } = baseURLAutoSelect(
650
704
  modelId,
651
- settings.endpoint || "auto",
652
- options.baseURL,
653
- settings.tools
705
+ // settings.endpoint || 'auto',
706
+ "auto",
707
+ options.baseURL
708
+ // settings.tools,
654
709
  );
655
- return new FriendliAIChatLanguageModel(modelId, settings, {
710
+ return new FriendliAIChatLanguageModel(modelId, {
656
711
  provider: `friendliai.${type}.chat`,
657
712
  url: ({ path }) => `${baseURL}${path}`,
658
713
  headers: getHeaders,
659
- fetch: options.fetch,
660
- defaultObjectGenerationMode: "json"
714
+ fetch: options.fetch
661
715
  });
662
716
  };
663
- const createCompletionModel = (modelId, settings = {}) => {
717
+ const createCompletionModel = (modelId) => {
664
718
  const { baseURL, type } = baseURLAutoSelect(
665
719
  modelId,
666
- settings.endpoint || "auto",
720
+ // settings.endpoint || 'auto',
721
+ "auto",
667
722
  options.baseURL
668
723
  );
669
- return new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(modelId, settings, {
724
+ return new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(modelId, {
670
725
  provider: `friendliai.${type}.completion`,
671
726
  url: ({ path }) => `${baseURL}${path}`,
672
727
  headers: getHeaders,
@@ -674,25 +729,24 @@ function createFriendli(options = {}) {
674
729
  errorStructure: friendliaiErrorStructure
675
730
  });
676
731
  };
677
- const createBetaModel = (modelId, settings = {}) => {
732
+ const createBetaModel = (modelId) => {
678
733
  const { baseURL, type } = baseURLAutoSelect(
679
734
  modelId,
680
735
  "beta",
681
736
  options.baseURL
682
737
  );
683
- return new FriendliAIChatLanguageModel(modelId, settings, {
738
+ return new FriendliAIChatLanguageModel(modelId, {
684
739
  provider: `friendliai.${type}.chat`,
685
740
  url: ({ path }) => `${baseURL}${path}`,
686
741
  headers: getHeaders,
687
- fetch: options.fetch,
688
- defaultObjectGenerationMode: "json"
742
+ fetch: options.fetch
689
743
  });
690
744
  };
691
745
  const createTextEmbeddingModel = (modelId) => {
692
746
  throw new import_provider3.NoSuchModelError({ modelId, modelType: "textEmbeddingModel" });
693
747
  };
694
- const provider = function(modelId, settings) {
695
- return createChatModel(modelId, settings);
748
+ const provider = function(modelId) {
749
+ return createChatModel(modelId);
696
750
  };
697
751
  provider.beta = createBetaModel;
698
752
  provider.chat = createChatModel;