@providerprotocol/ai 0.0.12 → 0.0.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/anthropic/index.d.ts +51 -15
  2. package/dist/anthropic/index.js +80 -29
  3. package/dist/anthropic/index.js.map +1 -1
  4. package/dist/{chunk-SUNYWHTH.js → chunk-MOU4U3PO.js} +55 -3
  5. package/dist/chunk-MOU4U3PO.js.map +1 -0
  6. package/dist/{chunk-Y6Q7JCNP.js → chunk-MSR5P65T.js} +1 -1
  7. package/dist/chunk-MSR5P65T.js.map +1 -0
  8. package/dist/{chunk-W4BB4BG2.js → chunk-SVYROCLD.js} +31 -11
  9. package/dist/chunk-SVYROCLD.js.map +1 -0
  10. package/dist/chunk-U4JJC2YX.js +234 -0
  11. package/dist/chunk-U4JJC2YX.js.map +1 -0
  12. package/dist/{chunk-X5G4EHL7.js → chunk-Z7RBRCRN.js} +1 -1
  13. package/dist/chunk-Z7RBRCRN.js.map +1 -0
  14. package/dist/google/index.d.ts +376 -7
  15. package/dist/google/index.js +149 -21
  16. package/dist/google/index.js.map +1 -1
  17. package/dist/http/index.d.ts +222 -25
  18. package/dist/http/index.js +3 -3
  19. package/dist/index.d.ts +1484 -198
  20. package/dist/index.js +233 -47
  21. package/dist/index.js.map +1 -1
  22. package/dist/ollama/index.d.ts +92 -20
  23. package/dist/ollama/index.js +31 -7
  24. package/dist/ollama/index.js.map +1 -1
  25. package/dist/openai/index.d.ts +340 -61
  26. package/dist/openai/index.js +105 -31
  27. package/dist/openai/index.js.map +1 -1
  28. package/dist/openrouter/index.d.ts +107 -51
  29. package/dist/openrouter/index.js +84 -24
  30. package/dist/openrouter/index.js.map +1 -1
  31. package/dist/provider-Bi0nyNhA.d.ts +505 -0
  32. package/dist/retry-BatS2hjD.d.ts +508 -0
  33. package/dist/xai/index.d.ts +97 -22
  34. package/dist/xai/index.js +129 -45
  35. package/dist/xai/index.js.map +1 -1
  36. package/package.json +8 -3
  37. package/dist/chunk-CUCRF5W6.js +0 -136
  38. package/dist/chunk-CUCRF5W6.js.map +0 -1
  39. package/dist/chunk-SUNYWHTH.js.map +0 -1
  40. package/dist/chunk-W4BB4BG2.js.map +0 -1
  41. package/dist/chunk-X5G4EHL7.js.map +0 -1
  42. package/dist/chunk-Y6Q7JCNP.js.map +0 -1
  43. package/dist/provider-CUJWjgNl.d.ts +0 -192
  44. package/dist/retry-I2661_rv.d.ts +0 -118
package/dist/xai/index.js CHANGED
@@ -3,17 +3,17 @@ import {
3
3
  isAssistantMessage,
4
4
  isToolResultMessage,
5
5
  isUserMessage
6
- } from "../chunk-W4BB4BG2.js";
6
+ } from "../chunk-SVYROCLD.js";
7
7
  import {
8
8
  parseSSEStream
9
- } from "../chunk-X5G4EHL7.js";
9
+ } from "../chunk-Z7RBRCRN.js";
10
10
  import {
11
11
  UPPError,
12
12
  doFetch,
13
13
  doStreamFetch,
14
14
  normalizeHttpError,
15
15
  resolveApiKey
16
- } from "../chunk-SUNYWHTH.js";
16
+ } from "../chunk-MOU4U3PO.js";
17
17
 
18
18
  // src/providers/xai/transform.completions.ts
19
19
  function transformRequest(request, modelId) {
@@ -48,12 +48,18 @@ function transformRequest(request, modelId) {
48
48
  }
49
49
  return xaiRequest;
50
50
  }
51
+ function normalizeSystem(system) {
52
+ if (!system) return void 0;
53
+ if (typeof system === "string") return system;
54
+ return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
55
+ }
51
56
  function transformMessages(messages, system) {
52
57
  const result = [];
53
- if (system) {
58
+ const normalizedSystem = normalizeSystem(system);
59
+ if (normalizedSystem) {
54
60
  result.push({
55
61
  role: "system",
56
- content: system
62
+ content: normalizedSystem
57
63
  });
58
64
  }
59
65
  for (const message of messages) {
@@ -92,7 +98,6 @@ function transformMessage(message) {
92
98
  const hasToolCalls = message.toolCalls && message.toolCalls.length > 0;
93
99
  const assistantMessage = {
94
100
  role: "assistant",
95
- // xAI/OpenAI: content should be null when tool_calls are present and there's no text
96
101
  content: hasToolCalls && !textContent ? null : textContent
97
102
  };
98
103
  if (hasToolCalls) {
@@ -224,7 +229,9 @@ function transformResponse(data) {
224
229
  const usage = {
225
230
  inputTokens: data.usage.prompt_tokens,
226
231
  outputTokens: data.usage.completion_tokens,
227
- totalTokens: data.usage.total_tokens
232
+ totalTokens: data.usage.total_tokens,
233
+ cacheReadTokens: data.usage.prompt_tokens_details?.cached_tokens ?? 0,
234
+ cacheWriteTokens: 0
228
235
  };
229
236
  let stopReason = "end_turn";
230
237
  switch (choice.finish_reason) {
@@ -260,6 +267,7 @@ function createStreamState() {
260
267
  finishReason: null,
261
268
  inputTokens: 0,
262
269
  outputTokens: 0,
270
+ cacheReadTokens: 0,
263
271
  hadRefusal: false
264
272
  };
265
273
  }
@@ -327,6 +335,7 @@ function transformStreamEvent(chunk, state) {
327
335
  if (chunk.usage) {
328
336
  state.inputTokens = chunk.usage.prompt_tokens;
329
337
  state.outputTokens = chunk.usage.completion_tokens;
338
+ state.cacheReadTokens = chunk.usage.prompt_tokens_details?.cached_tokens ?? 0;
330
339
  }
331
340
  return events;
332
341
  }
@@ -371,7 +380,9 @@ function buildResponseFromState(state) {
371
380
  const usage = {
372
381
  inputTokens: state.inputTokens,
373
382
  outputTokens: state.outputTokens,
374
- totalTokens: state.inputTokens + state.outputTokens
383
+ totalTokens: state.inputTokens + state.outputTokens,
384
+ cacheReadTokens: state.cacheReadTokens,
385
+ cacheWriteTokens: 0
375
386
  };
376
387
  let stopReason = "end_turn";
377
388
  switch (state.finishReason) {
@@ -439,14 +450,22 @@ function createCompletionsLLMHandler() {
439
450
  );
440
451
  const baseUrl = request.config.baseUrl ?? XAI_COMPLETIONS_API_URL;
441
452
  const body = transformRequest(request, modelId);
453
+ const headers = {
454
+ "Content-Type": "application/json",
455
+ Authorization: `Bearer ${apiKey}`
456
+ };
457
+ if (request.config.headers) {
458
+ for (const [key, value] of Object.entries(request.config.headers)) {
459
+ if (value !== void 0) {
460
+ headers[key] = value;
461
+ }
462
+ }
463
+ }
442
464
  const response = await doFetch(
443
465
  baseUrl,
444
466
  {
445
467
  method: "POST",
446
- headers: {
447
- "Content-Type": "application/json",
448
- Authorization: `Bearer ${apiKey}`
449
- },
468
+ headers,
450
469
  body: JSON.stringify(body),
451
470
  signal: request.signal
452
471
  },
@@ -477,14 +496,22 @@ function createCompletionsLLMHandler() {
477
496
  const body = transformRequest(request, modelId);
478
497
  body.stream = true;
479
498
  body.stream_options = { include_usage: true };
499
+ const headers = {
500
+ "Content-Type": "application/json",
501
+ Authorization: `Bearer ${apiKey}`
502
+ };
503
+ if (request.config.headers) {
504
+ for (const [key, value] of Object.entries(request.config.headers)) {
505
+ if (value !== void 0) {
506
+ headers[key] = value;
507
+ }
508
+ }
509
+ }
480
510
  const response = await doStreamFetch(
481
511
  baseUrl,
482
512
  {
483
513
  method: "POST",
484
- headers: {
485
- "Content-Type": "application/json",
486
- Authorization: `Bearer ${apiKey}`
487
- },
514
+ headers,
488
515
  body: JSON.stringify(body),
489
516
  signal: request.signal
490
517
  },
@@ -582,13 +609,19 @@ function transformRequest2(request, modelId) {
582
609
  }
583
610
  return xaiRequest;
584
611
  }
612
+ function normalizeSystem2(system) {
613
+ if (!system) return void 0;
614
+ if (typeof system === "string") return system;
615
+ return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
616
+ }
585
617
  function transformInputItems(messages, system) {
586
618
  const result = [];
587
- if (system) {
619
+ const normalizedSystem = normalizeSystem2(system);
620
+ if (normalizedSystem) {
588
621
  result.push({
589
622
  type: "message",
590
623
  role: "system",
591
- content: system
624
+ content: normalizedSystem
592
625
  });
593
626
  }
594
627
  for (const message of messages) {
@@ -769,7 +802,6 @@ function transformResponse2(data) {
769
802
  xai: {
770
803
  model: data.model,
771
804
  status: data.status,
772
- // Store response_id for multi-turn tool calling
773
805
  response_id: data.id,
774
806
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0,
775
807
  citations: data.citations,
@@ -781,7 +813,9 @@ function transformResponse2(data) {
781
813
  const usage = {
782
814
  inputTokens: data.usage.input_tokens,
783
815
  outputTokens: data.usage.output_tokens,
784
- totalTokens: data.usage.total_tokens
816
+ totalTokens: data.usage.total_tokens,
817
+ cacheReadTokens: data.usage.input_tokens_details?.cached_tokens ?? 0,
818
+ cacheWriteTokens: 0
785
819
  };
786
820
  let stopReason = "end_turn";
787
821
  if (data.status === "completed") {
@@ -810,6 +844,7 @@ function createStreamState2() {
810
844
  status: "in_progress",
811
845
  inputTokens: 0,
812
846
  outputTokens: 0,
847
+ cacheReadTokens: 0,
813
848
  hadRefusal: false
814
849
  };
815
850
  }
@@ -829,6 +864,7 @@ function transformStreamEvent2(event, state) {
829
864
  if (event.response.usage) {
830
865
  state.inputTokens = event.response.usage.input_tokens;
831
866
  state.outputTokens = event.response.usage.output_tokens;
867
+ state.cacheReadTokens = event.response.usage.input_tokens_details?.cached_tokens ?? 0;
832
868
  }
833
869
  events.push({ type: "message_stop", index: 0, delta: {} });
834
870
  break;
@@ -876,7 +912,7 @@ function transformStreamEvent2(event, state) {
876
912
  delta: {}
877
913
  });
878
914
  break;
879
- case "response.output_text.delta":
915
+ case "response.output_text.delta": {
880
916
  const currentText = state.textByIndex.get(event.output_index) ?? "";
881
917
  state.textByIndex.set(event.output_index, currentText + event.delta);
882
918
  events.push({
@@ -885,6 +921,7 @@ function transformStreamEvent2(event, state) {
885
921
  delta: { text: event.delta }
886
922
  });
887
923
  break;
924
+ }
888
925
  case "response.output_text.done":
889
926
  state.textByIndex.set(event.output_index, event.text);
890
927
  break;
@@ -1000,7 +1037,6 @@ function buildResponseFromState2(state) {
1000
1037
  xai: {
1001
1038
  model: state.model,
1002
1039
  status: state.status,
1003
- // Store response_id for multi-turn tool calling
1004
1040
  response_id: state.id,
1005
1041
  functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
1006
1042
  }
@@ -1010,7 +1046,9 @@ function buildResponseFromState2(state) {
1010
1046
  const usage = {
1011
1047
  inputTokens: state.inputTokens,
1012
1048
  outputTokens: state.outputTokens,
1013
- totalTokens: state.inputTokens + state.outputTokens
1049
+ totalTokens: state.inputTokens + state.outputTokens,
1050
+ cacheReadTokens: state.cacheReadTokens,
1051
+ cacheWriteTokens: 0
1014
1052
  };
1015
1053
  let stopReason = "end_turn";
1016
1054
  if (state.status === "completed") {
@@ -1069,14 +1107,22 @@ function createResponsesLLMHandler() {
1069
1107
  );
1070
1108
  const baseUrl = request.config.baseUrl ?? XAI_RESPONSES_API_URL;
1071
1109
  const body = transformRequest2(request, modelId);
1110
+ const headers = {
1111
+ "Content-Type": "application/json",
1112
+ Authorization: `Bearer ${apiKey}`
1113
+ };
1114
+ if (request.config.headers) {
1115
+ for (const [key, value] of Object.entries(request.config.headers)) {
1116
+ if (value !== void 0) {
1117
+ headers[key] = value;
1118
+ }
1119
+ }
1120
+ }
1072
1121
  const response = await doFetch(
1073
1122
  baseUrl,
1074
1123
  {
1075
1124
  method: "POST",
1076
- headers: {
1077
- "Content-Type": "application/json",
1078
- Authorization: `Bearer ${apiKey}`
1079
- },
1125
+ headers,
1080
1126
  body: JSON.stringify(body),
1081
1127
  signal: request.signal
1082
1128
  },
@@ -1114,14 +1160,22 @@ function createResponsesLLMHandler() {
1114
1160
  const baseUrl = request.config.baseUrl ?? XAI_RESPONSES_API_URL;
1115
1161
  const body = transformRequest2(request, modelId);
1116
1162
  body.stream = true;
1163
+ const headers = {
1164
+ "Content-Type": "application/json",
1165
+ Authorization: `Bearer ${apiKey}`
1166
+ };
1167
+ if (request.config.headers) {
1168
+ for (const [key, value] of Object.entries(request.config.headers)) {
1169
+ if (value !== void 0) {
1170
+ headers[key] = value;
1171
+ }
1172
+ }
1173
+ }
1117
1174
  const response = await doStreamFetch(
1118
1175
  baseUrl,
1119
1176
  {
1120
1177
  method: "POST",
1121
- headers: {
1122
- "Content-Type": "application/json",
1123
- Authorization: `Bearer ${apiKey}`
1124
- },
1178
+ headers,
1125
1179
  body: JSON.stringify(body),
1126
1180
  signal: request.signal
1127
1181
  },
@@ -1187,15 +1241,21 @@ function createResponsesLLMHandler() {
1187
1241
  }
1188
1242
 
1189
1243
  // src/providers/xai/transform.messages.ts
1244
+ function normalizeSystem3(system) {
1245
+ if (!system) return void 0;
1246
+ if (typeof system === "string") return system;
1247
+ return system.map((block) => block.text ?? "").filter((text) => text.length > 0).join("\n\n");
1248
+ }
1190
1249
  function transformRequest3(request, modelId) {
1191
1250
  const params = request.params ?? {};
1251
+ const normalizedSystem = normalizeSystem3(request.system);
1192
1252
  const xaiRequest = {
1193
1253
  ...params,
1194
1254
  model: modelId,
1195
1255
  messages: request.messages.map(transformMessage3)
1196
1256
  };
1197
- if (request.system) {
1198
- xaiRequest.system = request.system;
1257
+ if (normalizedSystem) {
1258
+ xaiRequest.system = normalizedSystem;
1199
1259
  }
1200
1260
  if (request.tools && request.tools.length > 0) {
1201
1261
  xaiRequest.tools = request.tools.map(transformTool3);
@@ -1351,7 +1411,9 @@ function transformResponse3(data) {
1351
1411
  const usage = {
1352
1412
  inputTokens: data.usage.input_tokens,
1353
1413
  outputTokens: data.usage.output_tokens,
1354
- totalTokens: data.usage.input_tokens + data.usage.output_tokens
1414
+ totalTokens: data.usage.input_tokens + data.usage.output_tokens,
1415
+ cacheReadTokens: data.usage.cache_read_input_tokens ?? 0,
1416
+ cacheWriteTokens: data.usage.cache_creation_input_tokens ?? 0
1355
1417
  };
1356
1418
  return {
1357
1419
  message,
@@ -1368,6 +1430,8 @@ function createStreamState3() {
1368
1430
  stopReason: null,
1369
1431
  inputTokens: 0,
1370
1432
  outputTokens: 0,
1433
+ cacheReadTokens: 0,
1434
+ cacheWriteTokens: 0,
1371
1435
  currentIndex: 0
1372
1436
  };
1373
1437
  }
@@ -1377,6 +1441,8 @@ function transformStreamEvent3(event, state) {
1377
1441
  state.messageId = event.message.id;
1378
1442
  state.model = event.message.model;
1379
1443
  state.inputTokens = event.message.usage.input_tokens;
1444
+ state.cacheReadTokens = event.message.usage.cache_read_input_tokens ?? 0;
1445
+ state.cacheWriteTokens = event.message.usage.cache_creation_input_tokens ?? 0;
1380
1446
  return { type: "message_start", index: 0, delta: {} };
1381
1447
  case "content_block_start":
1382
1448
  state.currentIndex = event.index;
@@ -1485,7 +1551,9 @@ function buildResponseFromState3(state) {
1485
1551
  const usage = {
1486
1552
  inputTokens: state.inputTokens,
1487
1553
  outputTokens: state.outputTokens,
1488
- totalTokens: state.inputTokens + state.outputTokens
1554
+ totalTokens: state.inputTokens + state.outputTokens,
1555
+ cacheReadTokens: state.cacheReadTokens,
1556
+ cacheWriteTokens: state.cacheWriteTokens
1489
1557
  };
1490
1558
  return {
1491
1559
  message,
@@ -1535,15 +1603,23 @@ function createMessagesLLMHandler() {
1535
1603
  );
1536
1604
  const baseUrl = request.config.baseUrl ?? XAI_MESSAGES_API_URL;
1537
1605
  const body = transformRequest3(request, modelId);
1606
+ const headers = {
1607
+ "Content-Type": "application/json",
1608
+ "x-api-key": apiKey,
1609
+ "anthropic-version": "2023-06-01"
1610
+ };
1611
+ if (request.config.headers) {
1612
+ for (const [key, value] of Object.entries(request.config.headers)) {
1613
+ if (value !== void 0) {
1614
+ headers[key] = value;
1615
+ }
1616
+ }
1617
+ }
1538
1618
  const response = await doFetch(
1539
1619
  baseUrl,
1540
1620
  {
1541
1621
  method: "POST",
1542
- headers: {
1543
- "Content-Type": "application/json",
1544
- "x-api-key": apiKey,
1545
- "anthropic-version": "2023-06-01"
1546
- },
1622
+ headers,
1547
1623
  body: JSON.stringify(body),
1548
1624
  signal: request.signal
1549
1625
  },
@@ -1573,15 +1649,23 @@ function createMessagesLLMHandler() {
1573
1649
  const baseUrl = request.config.baseUrl ?? XAI_MESSAGES_API_URL;
1574
1650
  const body = transformRequest3(request, modelId);
1575
1651
  body.stream = true;
1652
+ const headers = {
1653
+ "Content-Type": "application/json",
1654
+ "x-api-key": apiKey,
1655
+ "anthropic-version": "2023-06-01"
1656
+ };
1657
+ if (request.config.headers) {
1658
+ for (const [key, value] of Object.entries(request.config.headers)) {
1659
+ if (value !== void 0) {
1660
+ headers[key] = value;
1661
+ }
1662
+ }
1663
+ }
1576
1664
  const response = await doStreamFetch(
1577
1665
  baseUrl,
1578
1666
  {
1579
1667
  method: "POST",
1580
- headers: {
1581
- "Content-Type": "application/json",
1582
- "x-api-key": apiKey,
1583
- "anthropic-version": "2023-06-01"
1584
- },
1668
+ headers,
1585
1669
  body: JSON.stringify(body),
1586
1670
  signal: request.signal
1587
1671
  },