@yourgpt/llm-sdk 2.1.3 → 2.1.4-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/README.md +59 -0
  2. package/dist/adapters/index.d.mts +9 -2
  3. package/dist/adapters/index.d.ts +9 -2
  4. package/dist/adapters/index.js +421 -19
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +421 -19
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/index.d.mts +164 -11
  9. package/dist/index.d.ts +164 -11
  10. package/dist/index.js +638 -54
  11. package/dist/index.js.map +1 -1
  12. package/dist/index.mjs +635 -55
  13. package/dist/index.mjs.map +1 -1
  14. package/dist/providers/anthropic/index.d.mts +1 -1
  15. package/dist/providers/anthropic/index.d.ts +1 -1
  16. package/dist/providers/anthropic/index.js +95 -1
  17. package/dist/providers/anthropic/index.js.map +1 -1
  18. package/dist/providers/anthropic/index.mjs +95 -1
  19. package/dist/providers/anthropic/index.mjs.map +1 -1
  20. package/dist/providers/azure/index.d.mts +1 -1
  21. package/dist/providers/azure/index.d.ts +1 -1
  22. package/dist/providers/azure/index.js +51 -5
  23. package/dist/providers/azure/index.js.map +1 -1
  24. package/dist/providers/azure/index.mjs +51 -5
  25. package/dist/providers/azure/index.mjs.map +1 -1
  26. package/dist/providers/google/index.d.mts +1 -1
  27. package/dist/providers/google/index.d.ts +1 -1
  28. package/dist/providers/google/index.js +76 -0
  29. package/dist/providers/google/index.js.map +1 -1
  30. package/dist/providers/google/index.mjs +76 -0
  31. package/dist/providers/google/index.mjs.map +1 -1
  32. package/dist/providers/ollama/index.d.mts +2 -2
  33. package/dist/providers/ollama/index.d.ts +2 -2
  34. package/dist/providers/ollama/index.js +51 -8
  35. package/dist/providers/ollama/index.js.map +1 -1
  36. package/dist/providers/ollama/index.mjs +51 -8
  37. package/dist/providers/ollama/index.mjs.map +1 -1
  38. package/dist/providers/openai/index.d.mts +1 -1
  39. package/dist/providers/openai/index.d.ts +1 -1
  40. package/dist/providers/openai/index.js +301 -3
  41. package/dist/providers/openai/index.js.map +1 -1
  42. package/dist/providers/openai/index.mjs +301 -3
  43. package/dist/providers/openai/index.mjs.map +1 -1
  44. package/dist/providers/openrouter/index.d.mts +1 -1
  45. package/dist/providers/openrouter/index.d.ts +1 -1
  46. package/dist/providers/openrouter/index.js +301 -3
  47. package/dist/providers/openrouter/index.js.map +1 -1
  48. package/dist/providers/openrouter/index.mjs +301 -3
  49. package/dist/providers/openrouter/index.mjs.map +1 -1
  50. package/dist/providers/xai/index.d.mts +1 -1
  51. package/dist/providers/xai/index.d.ts +1 -1
  52. package/dist/providers/xai/index.js +51 -5
  53. package/dist/providers/xai/index.js.map +1 -1
  54. package/dist/providers/xai/index.mjs +51 -5
  55. package/dist/providers/xai/index.mjs.map +1 -1
  56. package/dist/{types-D20jKwJW.d.mts → types-COAOEe_y.d.mts} +68 -8
  57. package/dist/{types-D20jKwJW.d.ts → types-COAOEe_y.d.ts} +68 -8
  58. package/package.json +1 -1
@@ -1,6 +1,44 @@
1
1
  'use strict';
2
2
 
3
3
  // src/adapters/base.ts
4
+ function stringifyForDebug(value) {
5
+ return JSON.stringify(
6
+ value,
7
+ (_key, currentValue) => {
8
+ if (typeof currentValue === "bigint") {
9
+ return currentValue.toString();
10
+ }
11
+ if (currentValue instanceof Error) {
12
+ return {
13
+ name: currentValue.name,
14
+ message: currentValue.message,
15
+ stack: currentValue.stack
16
+ };
17
+ }
18
+ return currentValue;
19
+ },
20
+ 2
21
+ );
22
+ }
23
+ function logProviderPayload(provider, label, payload, enabled) {
24
+ if (!enabled) {
25
+ return;
26
+ }
27
+ if (label.toLowerCase().includes("stream ")) {
28
+ return;
29
+ }
30
+ try {
31
+ console.log(
32
+ `[llm-sdk:${provider}] ${label}
33
+ ${stringifyForDebug(payload)}`
34
+ );
35
+ } catch (error) {
36
+ console.log(
37
+ `[llm-sdk:${provider}] ${label} (failed to stringify payload)`,
38
+ error
39
+ );
40
+ }
41
+ }
4
42
  function formatMessages(messages, systemPrompt) {
5
43
  const formatted = [];
6
44
  if (systemPrompt) {
@@ -38,9 +76,44 @@ function parameterToJsonSchema(param) {
38
76
  )
39
77
  ])
40
78
  );
79
+ schema.additionalProperties = false;
41
80
  }
42
81
  return schema;
43
82
  }
83
+ function normalizeObjectJsonSchema(schema) {
84
+ if (!schema || typeof schema !== "object") {
85
+ return {
86
+ type: "object",
87
+ properties: {},
88
+ required: [],
89
+ additionalProperties: false
90
+ };
91
+ }
92
+ const normalized = { ...schema };
93
+ const type = normalized.type;
94
+ if (type === "object") {
95
+ const properties = normalized.properties && typeof normalized.properties === "object" && !Array.isArray(normalized.properties) ? normalized.properties : {};
96
+ normalized.properties = Object.fromEntries(
97
+ Object.entries(properties).map(([key, value]) => [
98
+ key,
99
+ normalizeObjectJsonSchema(value)
100
+ ])
101
+ );
102
+ const propertyKeys = Object.keys(properties);
103
+ const required = Array.isArray(normalized.required) ? normalized.required.filter(
104
+ (value) => typeof value === "string"
105
+ ) : [];
106
+ normalized.required = Array.from(/* @__PURE__ */ new Set([...required, ...propertyKeys]));
107
+ if (normalized.additionalProperties === void 0) {
108
+ normalized.additionalProperties = false;
109
+ }
110
+ } else if (type === "array" && normalized.items && typeof normalized.items === "object") {
111
+ normalized.items = normalizeObjectJsonSchema(
112
+ normalized.items
113
+ );
114
+ }
115
+ return normalized;
116
+ }
44
117
  function formatTools(actions) {
45
118
  return actions.map((action) => ({
46
119
  type: "function",
@@ -55,7 +128,8 @@ function formatTools(actions) {
55
128
  parameterToJsonSchema(param)
56
129
  ])
57
130
  ) : {},
58
- required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
131
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : [],
132
+ additionalProperties: false
59
133
  }
60
134
  }
61
135
  }));
@@ -311,7 +385,152 @@ var OpenAIAdapter = class {
311
385
  }
312
386
  return this.client;
313
387
  }
388
+ shouldUseResponsesApi(request) {
389
+ return request.providerToolOptions?.openai?.nativeToolSearch?.enabled === true && request.providerToolOptions.openai.nativeToolSearch.useResponsesApi !== false && Array.isArray(request.toolDefinitions) && request.toolDefinitions.length > 0;
390
+ }
391
+ buildResponsesInput(request) {
392
+ const sourceMessages = request.rawMessages && request.rawMessages.length > 0 ? request.rawMessages : formatMessagesForOpenAI(request.messages, void 0);
393
+ const input = [];
394
+ for (const message of sourceMessages) {
395
+ if (message.role === "system") {
396
+ continue;
397
+ }
398
+ if (message.role === "assistant") {
399
+ const content = typeof message.content === "string" ? message.content : Array.isArray(message.content) ? message.content : message.content ? JSON.stringify(message.content) : "";
400
+ if (content) {
401
+ input.push({
402
+ type: "message",
403
+ role: "assistant",
404
+ content
405
+ });
406
+ }
407
+ const toolCalls = Array.isArray(message.tool_calls) ? message.tool_calls : [];
408
+ for (const toolCall of toolCalls) {
409
+ input.push({
410
+ type: "function_call",
411
+ call_id: toolCall.id,
412
+ name: toolCall.function?.name,
413
+ arguments: toolCall.function?.arguments ?? "{}"
414
+ });
415
+ }
416
+ continue;
417
+ }
418
+ if (message.role === "tool") {
419
+ input.push({
420
+ type: "function_call_output",
421
+ call_id: message.tool_call_id,
422
+ output: typeof message.content === "string" ? message.content : JSON.stringify(message.content ?? null)
423
+ });
424
+ continue;
425
+ }
426
+ input.push({
427
+ type: "message",
428
+ role: message.role === "developer" ? "developer" : "user",
429
+ content: typeof message.content === "string" ? message.content : Array.isArray(message.content) ? message.content : JSON.stringify(message.content ?? "")
430
+ });
431
+ }
432
+ return input;
433
+ }
434
+ buildResponsesTools(tools) {
435
+ const nativeTools = tools.filter((tool) => tool.available !== false).map((tool) => ({
436
+ type: "function",
437
+ name: tool.name,
438
+ description: tool.description,
439
+ parameters: normalizeObjectJsonSchema(
440
+ tool.inputSchema ?? {
441
+ type: "object",
442
+ properties: {},
443
+ required: []
444
+ }
445
+ ),
446
+ strict: true,
447
+ defer_loading: tool.deferLoading === true
448
+ }));
449
+ return [{ type: "tool_search" }, ...nativeTools];
450
+ }
451
+ parseResponsesResult(response) {
452
+ const content = typeof response?.output_text === "string" ? response.output_text : "";
453
+ const toolCalls = Array.isArray(response?.output) ? response.output.filter((item) => item?.type === "function_call").map((item) => ({
454
+ id: item.call_id ?? item.id ?? generateToolCallId(),
455
+ name: item.name,
456
+ args: (() => {
457
+ try {
458
+ return JSON.parse(item.arguments ?? "{}");
459
+ } catch {
460
+ return {};
461
+ }
462
+ })()
463
+ })) : [];
464
+ return {
465
+ content,
466
+ toolCalls,
467
+ usage: response?.usage ? {
468
+ promptTokens: response.usage.input_tokens ?? 0,
469
+ completionTokens: response.usage.output_tokens ?? 0,
470
+ totalTokens: response.usage.total_tokens ?? (response.usage.input_tokens ?? 0) + (response.usage.output_tokens ?? 0)
471
+ } : void 0,
472
+ rawResponse: response
473
+ };
474
+ }
475
+ async completeWithResponses(request) {
476
+ const client = await this.getClient();
477
+ const openaiToolOptions = request.providerToolOptions?.openai;
478
+ const payload = {
479
+ model: request.config?.model || this.model,
480
+ instructions: request.systemPrompt,
481
+ input: this.buildResponsesInput(request),
482
+ tools: this.buildResponsesTools(request.toolDefinitions ?? []),
483
+ tool_choice: openaiToolOptions?.toolChoice === "required" ? "required" : openaiToolOptions?.toolChoice === "auto" ? "auto" : void 0,
484
+ parallel_tool_calls: openaiToolOptions?.parallelToolCalls,
485
+ temperature: request.config?.temperature ?? this.config.temperature,
486
+ max_output_tokens: request.config?.maxTokens ?? this.config.maxTokens,
487
+ stream: false
488
+ };
489
+ logProviderPayload("openai", "request payload", payload, request.debug);
490
+ const response = await client.responses.create(payload);
491
+ logProviderPayload("openai", "response payload", response, request.debug);
492
+ return this.parseResponsesResult(response);
493
+ }
314
494
  async *stream(request) {
495
+ if (this.shouldUseResponsesApi(request)) {
496
+ const messageId2 = generateMessageId();
497
+ yield { type: "message:start", id: messageId2 };
498
+ try {
499
+ const result = await this.completeWithResponses(request);
500
+ if (result.content) {
501
+ yield { type: "message:delta", content: result.content };
502
+ }
503
+ for (const toolCall of result.toolCalls) {
504
+ yield {
505
+ type: "action:start",
506
+ id: toolCall.id,
507
+ name: toolCall.name
508
+ };
509
+ yield {
510
+ type: "action:args",
511
+ id: toolCall.id,
512
+ args: JSON.stringify(toolCall.args)
513
+ };
514
+ }
515
+ yield { type: "message:end" };
516
+ yield {
517
+ type: "done",
518
+ usage: result.usage ? {
519
+ prompt_tokens: result.usage.promptTokens,
520
+ completion_tokens: result.usage.completionTokens,
521
+ total_tokens: result.usage.totalTokens
522
+ } : void 0
523
+ };
524
+ return;
525
+ } catch (error) {
526
+ yield {
527
+ type: "error",
528
+ message: error instanceof Error ? error.message : "Unknown error",
529
+ code: "OPENAI_RESPONSES_ERROR"
530
+ };
531
+ return;
532
+ }
533
+ }
315
534
  const client = await this.getClient();
316
535
  let messages;
317
536
  if (request.rawMessages && request.rawMessages.length > 0) {
@@ -376,20 +595,32 @@ var OpenAIAdapter = class {
376
595
  const messageId = generateMessageId();
377
596
  yield { type: "message:start", id: messageId };
378
597
  try {
379
- const stream = await client.chat.completions.create({
598
+ const openaiToolOptions = request.providerToolOptions?.openai;
599
+ const toolChoice = openaiToolOptions?.toolChoice && typeof openaiToolOptions.toolChoice === "object" ? {
600
+ type: "function",
601
+ function: {
602
+ name: openaiToolOptions.toolChoice.name
603
+ }
604
+ } : openaiToolOptions?.toolChoice;
605
+ const payload = {
380
606
  model: request.config?.model || this.model,
381
607
  messages,
382
608
  tools: tools.length > 0 ? tools : void 0,
609
+ tool_choice: tools.length > 0 ? toolChoice : void 0,
610
+ parallel_tool_calls: tools.length > 0 ? openaiToolOptions?.parallelToolCalls : void 0,
383
611
  temperature: request.config?.temperature ?? this.config.temperature,
384
612
  max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
385
613
  stream: true,
386
614
  stream_options: { include_usage: true }
387
- });
615
+ };
616
+ logProviderPayload("openai", "request payload", payload, request.debug);
617
+ const stream = await client.chat.completions.create(payload);
388
618
  let currentToolCall = null;
389
619
  const collectedCitations = [];
390
620
  let citationIndex = 0;
391
621
  let usage;
392
622
  for await (const chunk of stream) {
623
+ logProviderPayload("openai", "stream chunk", chunk, request.debug);
393
624
  if (request.signal?.aborted) {
394
625
  break;
395
626
  }
@@ -471,6 +702,70 @@ var OpenAIAdapter = class {
471
702
  };
472
703
  }
473
704
  }
705
+ async complete(request) {
706
+ if (this.shouldUseResponsesApi(request)) {
707
+ return this.completeWithResponses(request);
708
+ }
709
+ const client = await this.getClient();
710
+ let messages;
711
+ if (request.rawMessages && request.rawMessages.length > 0) {
712
+ messages = request.rawMessages;
713
+ if (request.systemPrompt && !messages.some((message2) => message2.role === "system")) {
714
+ messages = [
715
+ { role: "system", content: request.systemPrompt },
716
+ ...messages
717
+ ];
718
+ }
719
+ } else {
720
+ messages = formatMessagesForOpenAI(
721
+ request.messages,
722
+ request.systemPrompt
723
+ );
724
+ }
725
+ const tools = request.actions?.length ? formatTools(request.actions) : [];
726
+ const openaiToolOptions = request.providerToolOptions?.openai;
727
+ const toolChoice = openaiToolOptions?.toolChoice && typeof openaiToolOptions.toolChoice === "object" ? {
728
+ type: "function",
729
+ function: {
730
+ name: openaiToolOptions.toolChoice.name
731
+ }
732
+ } : openaiToolOptions?.toolChoice;
733
+ const payload = {
734
+ model: request.config?.model || this.model,
735
+ messages,
736
+ tools: tools.length > 0 ? tools : void 0,
737
+ tool_choice: tools.length > 0 ? toolChoice : void 0,
738
+ parallel_tool_calls: tools.length > 0 ? openaiToolOptions?.parallelToolCalls : void 0,
739
+ temperature: request.config?.temperature ?? this.config.temperature,
740
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
741
+ stream: false
742
+ };
743
+ logProviderPayload("openai", "request payload", payload, request.debug);
744
+ const response = await client.chat.completions.create(payload);
745
+ logProviderPayload("openai", "response payload", response, request.debug);
746
+ const choice = response.choices?.[0];
747
+ const message = choice?.message;
748
+ return {
749
+ content: message?.content ?? "",
750
+ toolCalls: message?.tool_calls?.map((toolCall) => ({
751
+ id: toolCall.id ?? generateToolCallId(),
752
+ name: toolCall.function?.name ?? "",
753
+ args: (() => {
754
+ try {
755
+ return JSON.parse(toolCall.function?.arguments ?? "{}");
756
+ } catch {
757
+ return {};
758
+ }
759
+ })()
760
+ })) ?? [],
761
+ usage: response.usage ? {
762
+ promptTokens: response.usage.prompt_tokens,
763
+ completionTokens: response.usage.completion_tokens,
764
+ totalTokens: response.usage.total_tokens
765
+ } : void 0,
766
+ rawResponse: response
767
+ };
768
+ }
474
769
  };
475
770
  function extractDomain(url) {
476
771
  try {
@@ -693,6 +988,26 @@ var AnthropicAdapter = class {
693
988
  }
694
989
  return messages;
695
990
  }
991
+ buildNativeSearchTools(tools, variant = "bm25") {
992
+ const nativeSearchTool = variant === "regex" ? {
993
+ type: "tool_search_tool_regex_20251119",
994
+ name: "tool_search_tool_regex"
995
+ } : {
996
+ type: "tool_search_tool_bm25_20251119",
997
+ name: "tool_search_tool_bm25"
998
+ };
999
+ const providerTools = tools.filter((tool) => tool.available !== false).map((tool) => ({
1000
+ name: tool.name,
1001
+ description: tool.description,
1002
+ input_schema: tool.inputSchema ?? {
1003
+ type: "object",
1004
+ properties: {},
1005
+ required: []
1006
+ },
1007
+ defer_loading: tool.deferLoading === true
1008
+ }));
1009
+ return [nativeSearchTool, ...providerTools];
1010
+ }
696
1011
  /**
697
1012
  * Build common request options for both streaming and non-streaming
698
1013
  */
@@ -705,7 +1020,11 @@ var AnthropicAdapter = class {
705
1020
  const formatted = formatMessagesForAnthropic(request.messages, void 0);
706
1021
  messages = formatted.messages;
707
1022
  }
708
- const tools = request.actions?.map((action) => ({
1023
+ const anthropicNativeSearch = request.providerToolOptions?.anthropic?.nativeToolSearch;
1024
+ const tools = anthropicNativeSearch?.enabled ? this.buildNativeSearchTools(
1025
+ request.toolDefinitions ?? [],
1026
+ anthropicNativeSearch.variant
1027
+ ) : request.actions?.map((action) => ({
709
1028
  name: action.name,
710
1029
  description: action.description,
711
1030
  input_schema: {
@@ -758,6 +1077,19 @@ var AnthropicAdapter = class {
758
1077
  messages,
759
1078
  tools: tools.length ? tools : void 0
760
1079
  };
1080
+ const anthropicToolOptions = request.providerToolOptions?.anthropic;
1081
+ if (tools.length > 0 && anthropicToolOptions) {
1082
+ if (anthropicToolOptions.toolChoice || anthropicToolOptions.disableParallelToolUse !== void 0) {
1083
+ const toolChoice = typeof anthropicToolOptions.toolChoice === "object" ? {
1084
+ type: "tool",
1085
+ name: anthropicToolOptions.toolChoice.name
1086
+ } : anthropicToolOptions.toolChoice ? { type: anthropicToolOptions.toolChoice } : { type: "auto" };
1087
+ if (anthropicToolOptions.disableParallelToolUse !== void 0) {
1088
+ toolChoice.disable_parallel_tool_use = anthropicToolOptions.disableParallelToolUse;
1089
+ }
1090
+ options.tool_choice = toolChoice;
1091
+ }
1092
+ }
761
1093
  if (serverToolConfiguration) {
762
1094
  options.server_tool_configuration = serverToolConfiguration;
763
1095
  }
@@ -780,7 +1112,19 @@ var AnthropicAdapter = class {
780
1112
  stream: false
781
1113
  };
782
1114
  try {
1115
+ logProviderPayload(
1116
+ "anthropic",
1117
+ "request payload",
1118
+ nonStreamingOptions,
1119
+ request.debug
1120
+ );
783
1121
  const response = await client.messages.create(nonStreamingOptions);
1122
+ logProviderPayload(
1123
+ "anthropic",
1124
+ "response payload",
1125
+ response,
1126
+ request.debug
1127
+ );
784
1128
  let content = "";
785
1129
  let thinking = "";
786
1130
  const toolCalls = [];
@@ -813,6 +1157,12 @@ var AnthropicAdapter = class {
813
1157
  const messageId = generateMessageId();
814
1158
  yield { type: "message:start", id: messageId };
815
1159
  try {
1160
+ logProviderPayload(
1161
+ "anthropic",
1162
+ "request payload",
1163
+ options,
1164
+ request.debug
1165
+ );
816
1166
  const stream = await client.messages.stream(options);
817
1167
  let currentToolUse = null;
818
1168
  let isInThinkingBlock = false;
@@ -820,6 +1170,7 @@ var AnthropicAdapter = class {
820
1170
  let citationIndex = 0;
821
1171
  let usage;
822
1172
  for await (const event of stream) {
1173
+ logProviderPayload("anthropic", "stream event", event, request.debug);
823
1174
  if (request.signal?.aborted) {
824
1175
  break;
825
1176
  }
@@ -1102,18 +1453,20 @@ var OllamaAdapter = class {
1102
1453
  if (this.config.options) {
1103
1454
  Object.assign(ollamaOptions, this.config.options);
1104
1455
  }
1456
+ const payload = {
1457
+ model: request.config?.model || this.model,
1458
+ messages,
1459
+ tools,
1460
+ stream: true,
1461
+ options: ollamaOptions
1462
+ };
1463
+ logProviderPayload("ollama", "request payload", payload, request.debug);
1105
1464
  const response = await fetch(`${this.baseUrl}/api/chat`, {
1106
1465
  method: "POST",
1107
1466
  headers: {
1108
1467
  "Content-Type": "application/json"
1109
1468
  },
1110
- body: JSON.stringify({
1111
- model: request.config?.model || this.model,
1112
- messages,
1113
- tools,
1114
- stream: true,
1115
- options: ollamaOptions
1116
- }),
1469
+ body: JSON.stringify(payload),
1117
1470
  signal: request.signal
1118
1471
  });
1119
1472
  if (!response.ok) {
@@ -1138,6 +1491,7 @@ var OllamaAdapter = class {
1138
1491
  if (!line.trim()) continue;
1139
1492
  try {
1140
1493
  const chunk = JSON.parse(line);
1494
+ logProviderPayload("ollama", "stream chunk", chunk, request.debug);
1141
1495
  if (chunk.message?.content) {
1142
1496
  yield { type: "message:delta", content: chunk.message.content };
1143
1497
  }
@@ -1391,6 +1745,22 @@ var GoogleAdapter = class {
1391
1745
  const messageId = generateMessageId();
1392
1746
  yield { type: "message:start", id: messageId };
1393
1747
  try {
1748
+ logProviderPayload(
1749
+ "google",
1750
+ "request payload",
1751
+ {
1752
+ model: modelId,
1753
+ history: mergedContents.slice(0, -1),
1754
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1755
+ tools: toolsArray.length > 0 ? toolsArray : void 0,
1756
+ generationConfig: {
1757
+ temperature: request.config?.temperature ?? this.config.temperature,
1758
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1759
+ },
1760
+ messageParts: mergedContents[mergedContents.length - 1]?.parts
1761
+ },
1762
+ request.debug
1763
+ );
1394
1764
  const chat = model.startChat({
1395
1765
  history: mergedContents.slice(0, -1),
1396
1766
  // All but the last message
@@ -1406,6 +1776,7 @@ var GoogleAdapter = class {
1406
1776
  let currentToolCall = null;
1407
1777
  const collectedCitations = [];
1408
1778
  for await (const chunk of result.stream) {
1779
+ logProviderPayload("google", "stream chunk", chunk, request.debug);
1409
1780
  if (request.signal?.aborted) {
1410
1781
  break;
1411
1782
  }
@@ -1468,6 +1839,12 @@ var GoogleAdapter = class {
1468
1839
  let usage;
1469
1840
  try {
1470
1841
  const response = await result.response;
1842
+ logProviderPayload(
1843
+ "google",
1844
+ "response payload",
1845
+ response,
1846
+ request.debug
1847
+ );
1471
1848
  if (response.usageMetadata) {
1472
1849
  usage = {
1473
1850
  prompt_tokens: response.usageMetadata.promptTokenCount || 0,
@@ -1547,6 +1924,18 @@ var GoogleAdapter = class {
1547
1924
  }
1548
1925
  }
1549
1926
  const tools = formatToolsForGemini(request.actions);
1927
+ const payload = {
1928
+ model: modelId,
1929
+ history: mergedContents.slice(0, -1),
1930
+ systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
1931
+ tools: tools ? [tools] : void 0,
1932
+ generationConfig: {
1933
+ temperature: request.config?.temperature ?? this.config.temperature,
1934
+ maxOutputTokens: request.config?.maxTokens ?? this.config.maxTokens
1935
+ },
1936
+ messageParts: mergedContents[mergedContents.length - 1]?.parts
1937
+ };
1938
+ logProviderPayload("google", "request payload", payload, request.debug);
1550
1939
  const chat = model.startChat({
1551
1940
  history: mergedContents.slice(0, -1),
1552
1941
  systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : void 0,
@@ -1559,6 +1948,7 @@ var GoogleAdapter = class {
1559
1948
  const lastMessage = mergedContents[mergedContents.length - 1];
1560
1949
  const result = await chat.sendMessage(lastMessage.parts);
1561
1950
  const response = result.response;
1951
+ logProviderPayload("google", "response payload", response, request.debug);
1562
1952
  let textContent = "";
1563
1953
  const toolCalls = [];
1564
1954
  const candidate = response.candidates?.[0];
@@ -1663,16 +2053,19 @@ var XAIAdapter = class {
1663
2053
  const messageId = generateMessageId();
1664
2054
  yield { type: "message:start", id: messageId };
1665
2055
  try {
1666
- const stream = await client.chat.completions.create({
2056
+ const payload = {
1667
2057
  model: request.config?.model || this.model,
1668
2058
  messages,
1669
2059
  tools,
1670
2060
  temperature: request.config?.temperature ?? this.config.temperature,
1671
2061
  max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1672
2062
  stream: true
1673
- });
2063
+ };
2064
+ logProviderPayload("xai", "request payload", payload, request.debug);
2065
+ const stream = await client.chat.completions.create(payload);
1674
2066
  let currentToolCall = null;
1675
2067
  for await (const chunk of stream) {
2068
+ logProviderPayload("xai", "stream chunk", chunk, request.debug);
1676
2069
  if (request.signal?.aborted) {
1677
2070
  break;
1678
2071
  }
@@ -1749,13 +2142,16 @@ var XAIAdapter = class {
1749
2142
  );
1750
2143
  }
1751
2144
  const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1752
- const response = await client.chat.completions.create({
2145
+ const payload = {
1753
2146
  model: request.config?.model || this.model,
1754
2147
  messages,
1755
2148
  tools,
1756
2149
  temperature: request.config?.temperature ?? this.config.temperature,
1757
2150
  max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1758
- });
2151
+ };
2152
+ logProviderPayload("xai", "request payload", payload, request.debug);
2153
+ const response = await client.chat.completions.create(payload);
2154
+ logProviderPayload("xai", "response payload", response, request.debug);
1759
2155
  const choice = response.choices[0];
1760
2156
  const message = choice?.message;
1761
2157
  const toolCalls = (message?.tool_calls || []).map((tc) => ({
@@ -1851,7 +2247,7 @@ var AzureAdapter = class {
1851
2247
  const messageId = generateMessageId();
1852
2248
  yield { type: "message:start", id: messageId };
1853
2249
  try {
1854
- const stream = await client.chat.completions.create({
2250
+ const payload = {
1855
2251
  // Azure uses deployment name, not model name
1856
2252
  model: this.config.deploymentName,
1857
2253
  messages,
@@ -1859,9 +2255,12 @@ var AzureAdapter = class {
1859
2255
  temperature: request.config?.temperature ?? this.config.temperature,
1860
2256
  max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
1861
2257
  stream: true
1862
- });
2258
+ };
2259
+ logProviderPayload("azure", "request payload", payload, request.debug);
2260
+ const stream = await client.chat.completions.create(payload);
1863
2261
  let currentToolCall = null;
1864
2262
  for await (const chunk of stream) {
2263
+ logProviderPayload("azure", "stream chunk", chunk, request.debug);
1865
2264
  if (request.signal?.aborted) {
1866
2265
  break;
1867
2266
  }
@@ -1938,13 +2337,16 @@ var AzureAdapter = class {
1938
2337
  );
1939
2338
  }
1940
2339
  const tools = request.actions?.length ? formatTools(request.actions) : void 0;
1941
- const response = await client.chat.completions.create({
2340
+ const payload = {
1942
2341
  model: this.config.deploymentName,
1943
2342
  messages,
1944
2343
  tools,
1945
2344
  temperature: request.config?.temperature ?? this.config.temperature,
1946
2345
  max_tokens: request.config?.maxTokens ?? this.config.maxTokens
1947
- });
2346
+ };
2347
+ logProviderPayload("azure", "request payload", payload, request.debug);
2348
+ const response = await client.chat.completions.create(payload);
2349
+ logProviderPayload("azure", "response payload", response, request.debug);
1948
2350
  const choice = response.choices[0];
1949
2351
  const message = choice?.message;
1950
2352
  const toolCalls = (message?.tool_calls || []).map((tc) => ({