@yourgpt/llm-sdk 2.1.2 → 2.1.4-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/README.md +59 -0
  2. package/dist/adapters/index.d.mts +9 -2
  3. package/dist/adapters/index.d.ts +9 -2
  4. package/dist/adapters/index.js +421 -19
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +421 -19
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/index.d.mts +164 -11
  9. package/dist/index.d.ts +164 -11
  10. package/dist/index.js +639 -54
  11. package/dist/index.js.map +1 -1
  12. package/dist/index.mjs +636 -55
  13. package/dist/index.mjs.map +1 -1
  14. package/dist/providers/anthropic/index.d.mts +1 -1
  15. package/dist/providers/anthropic/index.d.ts +1 -1
  16. package/dist/providers/anthropic/index.js +95 -1
  17. package/dist/providers/anthropic/index.js.map +1 -1
  18. package/dist/providers/anthropic/index.mjs +95 -1
  19. package/dist/providers/anthropic/index.mjs.map +1 -1
  20. package/dist/providers/azure/index.d.mts +1 -1
  21. package/dist/providers/azure/index.d.ts +1 -1
  22. package/dist/providers/azure/index.js +51 -5
  23. package/dist/providers/azure/index.js.map +1 -1
  24. package/dist/providers/azure/index.mjs +51 -5
  25. package/dist/providers/azure/index.mjs.map +1 -1
  26. package/dist/providers/google/index.d.mts +1 -1
  27. package/dist/providers/google/index.d.ts +1 -1
  28. package/dist/providers/google/index.js +76 -0
  29. package/dist/providers/google/index.js.map +1 -1
  30. package/dist/providers/google/index.mjs +76 -0
  31. package/dist/providers/google/index.mjs.map +1 -1
  32. package/dist/providers/ollama/index.d.mts +2 -2
  33. package/dist/providers/ollama/index.d.ts +2 -2
  34. package/dist/providers/ollama/index.js +51 -8
  35. package/dist/providers/ollama/index.js.map +1 -1
  36. package/dist/providers/ollama/index.mjs +51 -8
  37. package/dist/providers/ollama/index.mjs.map +1 -1
  38. package/dist/providers/openai/index.d.mts +1 -1
  39. package/dist/providers/openai/index.d.ts +1 -1
  40. package/dist/providers/openai/index.js +301 -3
  41. package/dist/providers/openai/index.js.map +1 -1
  42. package/dist/providers/openai/index.mjs +301 -3
  43. package/dist/providers/openai/index.mjs.map +1 -1
  44. package/dist/providers/openrouter/index.d.mts +1 -1
  45. package/dist/providers/openrouter/index.d.ts +1 -1
  46. package/dist/providers/openrouter/index.js +301 -3
  47. package/dist/providers/openrouter/index.js.map +1 -1
  48. package/dist/providers/openrouter/index.mjs +301 -3
  49. package/dist/providers/openrouter/index.mjs.map +1 -1
  50. package/dist/providers/xai/index.d.mts +1 -1
  51. package/dist/providers/xai/index.d.ts +1 -1
  52. package/dist/providers/xai/index.js +51 -5
  53. package/dist/providers/xai/index.js.map +1 -1
  54. package/dist/providers/xai/index.mjs +51 -5
  55. package/dist/providers/xai/index.mjs.map +1 -1
  56. package/dist/{types-D20jKwJW.d.mts → types-COAOEe_y.d.mts} +68 -8
  57. package/dist/{types-D20jKwJW.d.ts → types-COAOEe_y.d.ts} +68 -8
  58. package/package.json +1 -1
@@ -271,8 +271,49 @@ function generateId(prefix = "id") {
271
271
  function generateMessageId() {
272
272
  return generateId("msg");
273
273
  }
274
+ function generateToolCallId() {
275
+ return generateId("call");
276
+ }
274
277
 
275
278
  // src/adapters/base.ts
279
+ function stringifyForDebug(value) {
280
+ return JSON.stringify(
281
+ value,
282
+ (_key, currentValue) => {
283
+ if (typeof currentValue === "bigint") {
284
+ return currentValue.toString();
285
+ }
286
+ if (currentValue instanceof Error) {
287
+ return {
288
+ name: currentValue.name,
289
+ message: currentValue.message,
290
+ stack: currentValue.stack
291
+ };
292
+ }
293
+ return currentValue;
294
+ },
295
+ 2
296
+ );
297
+ }
298
+ function logProviderPayload(provider, label, payload, enabled) {
299
+ if (!enabled) {
300
+ return;
301
+ }
302
+ if (label.toLowerCase().includes("stream ")) {
303
+ return;
304
+ }
305
+ try {
306
+ console.log(
307
+ `[llm-sdk:${provider}] ${label}
308
+ ${stringifyForDebug(payload)}`
309
+ );
310
+ } catch (error) {
311
+ console.log(
312
+ `[llm-sdk:${provider}] ${label} (failed to stringify payload)`,
313
+ error
314
+ );
315
+ }
316
+ }
276
317
  function parameterToJsonSchema(param) {
277
318
  const schema = {
278
319
  type: param.type
@@ -297,9 +338,44 @@ function parameterToJsonSchema(param) {
297
338
  )
298
339
  ])
299
340
  );
341
+ schema.additionalProperties = false;
300
342
  }
301
343
  return schema;
302
344
  }
345
+ function normalizeObjectJsonSchema(schema) {
346
+ if (!schema || typeof schema !== "object") {
347
+ return {
348
+ type: "object",
349
+ properties: {},
350
+ required: [],
351
+ additionalProperties: false
352
+ };
353
+ }
354
+ const normalized = { ...schema };
355
+ const type = normalized.type;
356
+ if (type === "object") {
357
+ const properties = normalized.properties && typeof normalized.properties === "object" && !Array.isArray(normalized.properties) ? normalized.properties : {};
358
+ normalized.properties = Object.fromEntries(
359
+ Object.entries(properties).map(([key, value]) => [
360
+ key,
361
+ normalizeObjectJsonSchema(value)
362
+ ])
363
+ );
364
+ const propertyKeys = Object.keys(properties);
365
+ const required = Array.isArray(normalized.required) ? normalized.required.filter(
366
+ (value) => typeof value === "string"
367
+ ) : [];
368
+ normalized.required = Array.from(/* @__PURE__ */ new Set([...required, ...propertyKeys]));
369
+ if (normalized.additionalProperties === void 0) {
370
+ normalized.additionalProperties = false;
371
+ }
372
+ } else if (type === "array" && normalized.items && typeof normalized.items === "object") {
373
+ normalized.items = normalizeObjectJsonSchema(
374
+ normalized.items
375
+ );
376
+ }
377
+ return normalized;
378
+ }
303
379
  function formatTools(actions) {
304
380
  return actions.map((action) => ({
305
381
  type: "function",
@@ -314,7 +390,8 @@ function formatTools(actions) {
314
390
  parameterToJsonSchema(param)
315
391
  ])
316
392
  ) : {},
317
- required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : []
393
+ required: action.parameters ? Object.entries(action.parameters).filter(([, param]) => param.required).map(([key]) => key) : [],
394
+ additionalProperties: false
318
395
  }
319
396
  }
320
397
  }));
@@ -411,7 +488,152 @@ var OpenAIAdapter = class {
411
488
  }
412
489
  return this.client;
413
490
  }
491
+ shouldUseResponsesApi(request) {
492
+ return request.providerToolOptions?.openai?.nativeToolSearch?.enabled === true && request.providerToolOptions.openai.nativeToolSearch.useResponsesApi !== false && Array.isArray(request.toolDefinitions) && request.toolDefinitions.length > 0;
493
+ }
494
+ buildResponsesInput(request) {
495
+ const sourceMessages = request.rawMessages && request.rawMessages.length > 0 ? request.rawMessages : formatMessagesForOpenAI2(request.messages, void 0);
496
+ const input = [];
497
+ for (const message of sourceMessages) {
498
+ if (message.role === "system") {
499
+ continue;
500
+ }
501
+ if (message.role === "assistant") {
502
+ const content = typeof message.content === "string" ? message.content : Array.isArray(message.content) ? message.content : message.content ? JSON.stringify(message.content) : "";
503
+ if (content) {
504
+ input.push({
505
+ type: "message",
506
+ role: "assistant",
507
+ content
508
+ });
509
+ }
510
+ const toolCalls = Array.isArray(message.tool_calls) ? message.tool_calls : [];
511
+ for (const toolCall of toolCalls) {
512
+ input.push({
513
+ type: "function_call",
514
+ call_id: toolCall.id,
515
+ name: toolCall.function?.name,
516
+ arguments: toolCall.function?.arguments ?? "{}"
517
+ });
518
+ }
519
+ continue;
520
+ }
521
+ if (message.role === "tool") {
522
+ input.push({
523
+ type: "function_call_output",
524
+ call_id: message.tool_call_id,
525
+ output: typeof message.content === "string" ? message.content : JSON.stringify(message.content ?? null)
526
+ });
527
+ continue;
528
+ }
529
+ input.push({
530
+ type: "message",
531
+ role: message.role === "developer" ? "developer" : "user",
532
+ content: typeof message.content === "string" ? message.content : Array.isArray(message.content) ? message.content : JSON.stringify(message.content ?? "")
533
+ });
534
+ }
535
+ return input;
536
+ }
537
+ buildResponsesTools(tools) {
538
+ const nativeTools = tools.filter((tool) => tool.available !== false).map((tool) => ({
539
+ type: "function",
540
+ name: tool.name,
541
+ description: tool.description,
542
+ parameters: normalizeObjectJsonSchema(
543
+ tool.inputSchema ?? {
544
+ type: "object",
545
+ properties: {},
546
+ required: []
547
+ }
548
+ ),
549
+ strict: true,
550
+ defer_loading: tool.deferLoading === true
551
+ }));
552
+ return [{ type: "tool_search" }, ...nativeTools];
553
+ }
554
+ parseResponsesResult(response) {
555
+ const content = typeof response?.output_text === "string" ? response.output_text : "";
556
+ const toolCalls = Array.isArray(response?.output) ? response.output.filter((item) => item?.type === "function_call").map((item) => ({
557
+ id: item.call_id ?? item.id ?? generateToolCallId(),
558
+ name: item.name,
559
+ args: (() => {
560
+ try {
561
+ return JSON.parse(item.arguments ?? "{}");
562
+ } catch {
563
+ return {};
564
+ }
565
+ })()
566
+ })) : [];
567
+ return {
568
+ content,
569
+ toolCalls,
570
+ usage: response?.usage ? {
571
+ promptTokens: response.usage.input_tokens ?? 0,
572
+ completionTokens: response.usage.output_tokens ?? 0,
573
+ totalTokens: response.usage.total_tokens ?? (response.usage.input_tokens ?? 0) + (response.usage.output_tokens ?? 0)
574
+ } : void 0,
575
+ rawResponse: response
576
+ };
577
+ }
578
+ async completeWithResponses(request) {
579
+ const client = await this.getClient();
580
+ const openaiToolOptions = request.providerToolOptions?.openai;
581
+ const payload = {
582
+ model: request.config?.model || this.model,
583
+ instructions: request.systemPrompt,
584
+ input: this.buildResponsesInput(request),
585
+ tools: this.buildResponsesTools(request.toolDefinitions ?? []),
586
+ tool_choice: openaiToolOptions?.toolChoice === "required" ? "required" : openaiToolOptions?.toolChoice === "auto" ? "auto" : void 0,
587
+ parallel_tool_calls: openaiToolOptions?.parallelToolCalls,
588
+ temperature: request.config?.temperature ?? this.config.temperature,
589
+ max_output_tokens: request.config?.maxTokens ?? this.config.maxTokens,
590
+ stream: false
591
+ };
592
+ logProviderPayload("openai", "request payload", payload, request.debug);
593
+ const response = await client.responses.create(payload);
594
+ logProviderPayload("openai", "response payload", response, request.debug);
595
+ return this.parseResponsesResult(response);
596
+ }
414
597
  async *stream(request) {
598
+ if (this.shouldUseResponsesApi(request)) {
599
+ const messageId2 = generateMessageId();
600
+ yield { type: "message:start", id: messageId2 };
601
+ try {
602
+ const result = await this.completeWithResponses(request);
603
+ if (result.content) {
604
+ yield { type: "message:delta", content: result.content };
605
+ }
606
+ for (const toolCall of result.toolCalls) {
607
+ yield {
608
+ type: "action:start",
609
+ id: toolCall.id,
610
+ name: toolCall.name
611
+ };
612
+ yield {
613
+ type: "action:args",
614
+ id: toolCall.id,
615
+ args: JSON.stringify(toolCall.args)
616
+ };
617
+ }
618
+ yield { type: "message:end" };
619
+ yield {
620
+ type: "done",
621
+ usage: result.usage ? {
622
+ prompt_tokens: result.usage.promptTokens,
623
+ completion_tokens: result.usage.completionTokens,
624
+ total_tokens: result.usage.totalTokens
625
+ } : void 0
626
+ };
627
+ return;
628
+ } catch (error) {
629
+ yield {
630
+ type: "error",
631
+ message: error instanceof Error ? error.message : "Unknown error",
632
+ code: "OPENAI_RESPONSES_ERROR"
633
+ };
634
+ return;
635
+ }
636
+ }
415
637
  const client = await this.getClient();
416
638
  let messages;
417
639
  if (request.rawMessages && request.rawMessages.length > 0) {
@@ -476,20 +698,32 @@ var OpenAIAdapter = class {
476
698
  const messageId = generateMessageId();
477
699
  yield { type: "message:start", id: messageId };
478
700
  try {
479
- const stream = await client.chat.completions.create({
701
+ const openaiToolOptions = request.providerToolOptions?.openai;
702
+ const toolChoice = openaiToolOptions?.toolChoice && typeof openaiToolOptions.toolChoice === "object" ? {
703
+ type: "function",
704
+ function: {
705
+ name: openaiToolOptions.toolChoice.name
706
+ }
707
+ } : openaiToolOptions?.toolChoice;
708
+ const payload = {
480
709
  model: request.config?.model || this.model,
481
710
  messages,
482
711
  tools: tools.length > 0 ? tools : void 0,
712
+ tool_choice: tools.length > 0 ? toolChoice : void 0,
713
+ parallel_tool_calls: tools.length > 0 ? openaiToolOptions?.parallelToolCalls : void 0,
483
714
  temperature: request.config?.temperature ?? this.config.temperature,
484
715
  max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
485
716
  stream: true,
486
717
  stream_options: { include_usage: true }
487
- });
718
+ };
719
+ logProviderPayload("openai", "request payload", payload, request.debug);
720
+ const stream = await client.chat.completions.create(payload);
488
721
  let currentToolCall = null;
489
722
  const collectedCitations = [];
490
723
  let citationIndex = 0;
491
724
  let usage;
492
725
  for await (const chunk of stream) {
726
+ logProviderPayload("openai", "stream chunk", chunk, request.debug);
493
727
  if (request.signal?.aborted) {
494
728
  break;
495
729
  }
@@ -571,6 +805,70 @@ var OpenAIAdapter = class {
571
805
  };
572
806
  }
573
807
  }
808
+ async complete(request) {
809
+ if (this.shouldUseResponsesApi(request)) {
810
+ return this.completeWithResponses(request);
811
+ }
812
+ const client = await this.getClient();
813
+ let messages;
814
+ if (request.rawMessages && request.rawMessages.length > 0) {
815
+ messages = request.rawMessages;
816
+ if (request.systemPrompt && !messages.some((message2) => message2.role === "system")) {
817
+ messages = [
818
+ { role: "system", content: request.systemPrompt },
819
+ ...messages
820
+ ];
821
+ }
822
+ } else {
823
+ messages = formatMessagesForOpenAI2(
824
+ request.messages,
825
+ request.systemPrompt
826
+ );
827
+ }
828
+ const tools = request.actions?.length ? formatTools(request.actions) : [];
829
+ const openaiToolOptions = request.providerToolOptions?.openai;
830
+ const toolChoice = openaiToolOptions?.toolChoice && typeof openaiToolOptions.toolChoice === "object" ? {
831
+ type: "function",
832
+ function: {
833
+ name: openaiToolOptions.toolChoice.name
834
+ }
835
+ } : openaiToolOptions?.toolChoice;
836
+ const payload = {
837
+ model: request.config?.model || this.model,
838
+ messages,
839
+ tools: tools.length > 0 ? tools : void 0,
840
+ tool_choice: tools.length > 0 ? toolChoice : void 0,
841
+ parallel_tool_calls: tools.length > 0 ? openaiToolOptions?.parallelToolCalls : void 0,
842
+ temperature: request.config?.temperature ?? this.config.temperature,
843
+ max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
844
+ stream: false
845
+ };
846
+ logProviderPayload("openai", "request payload", payload, request.debug);
847
+ const response = await client.chat.completions.create(payload);
848
+ logProviderPayload("openai", "response payload", response, request.debug);
849
+ const choice = response.choices?.[0];
850
+ const message = choice?.message;
851
+ return {
852
+ content: message?.content ?? "",
853
+ toolCalls: message?.tool_calls?.map((toolCall) => ({
854
+ id: toolCall.id ?? generateToolCallId(),
855
+ name: toolCall.function?.name ?? "",
856
+ args: (() => {
857
+ try {
858
+ return JSON.parse(toolCall.function?.arguments ?? "{}");
859
+ } catch {
860
+ return {};
861
+ }
862
+ })()
863
+ })) ?? [],
864
+ usage: response.usage ? {
865
+ promptTokens: response.usage.prompt_tokens,
866
+ completionTokens: response.usage.completion_tokens,
867
+ totalTokens: response.usage.total_tokens
868
+ } : void 0,
869
+ rawResponse: response
870
+ };
871
+ }
574
872
  };
575
873
  function extractDomain(url) {
576
874
  try {