@yourgpt/llm-sdk 2.1.9 → 2.1.10-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/dist/adapters/index.d.mts +38 -4
  2. package/dist/adapters/index.d.ts +38 -4
  3. package/dist/adapters/index.js +318 -8
  4. package/dist/adapters/index.mjs +318 -8
  5. package/dist/{base-iGi9Va6Z.d.ts → base-DN1EfKnE.d.mts} +2 -1
  6. package/dist/{base-D-U61JaB.d.mts → base-DuUNxtVg.d.ts} +2 -1
  7. package/dist/fallback/index.d.mts +4 -4
  8. package/dist/fallback/index.d.ts +4 -4
  9. package/dist/index.d.mts +7 -7
  10. package/dist/index.d.ts +7 -7
  11. package/dist/index.js +43 -23
  12. package/dist/index.mjs +43 -23
  13. package/dist/providers/anthropic/index.d.mts +3 -3
  14. package/dist/providers/anthropic/index.d.ts +3 -3
  15. package/dist/providers/anthropic/index.js +17 -0
  16. package/dist/providers/anthropic/index.mjs +17 -0
  17. package/dist/providers/azure/index.d.mts +3 -3
  18. package/dist/providers/azure/index.d.ts +3 -3
  19. package/dist/providers/fireworks/index.d.mts +1 -1
  20. package/dist/providers/fireworks/index.d.ts +1 -1
  21. package/dist/providers/google/index.d.mts +3 -3
  22. package/dist/providers/google/index.d.ts +3 -3
  23. package/dist/providers/google/index.js +311 -8
  24. package/dist/providers/google/index.mjs +311 -8
  25. package/dist/providers/ollama/index.d.mts +4 -4
  26. package/dist/providers/ollama/index.d.ts +4 -4
  27. package/dist/providers/openai/index.d.mts +3 -3
  28. package/dist/providers/openai/index.d.ts +3 -3
  29. package/dist/providers/openai/index.js +321 -8
  30. package/dist/providers/openai/index.mjs +321 -8
  31. package/dist/providers/openrouter/index.d.mts +7 -3
  32. package/dist/providers/openrouter/index.d.ts +7 -3
  33. package/dist/providers/openrouter/index.js +601 -11
  34. package/dist/providers/openrouter/index.mjs +601 -11
  35. package/dist/providers/togetherai/index.d.mts +3 -3
  36. package/dist/providers/togetherai/index.d.ts +3 -3
  37. package/dist/providers/togetherai/index.js +311 -8
  38. package/dist/providers/togetherai/index.mjs +311 -8
  39. package/dist/providers/xai/index.d.mts +3 -3
  40. package/dist/providers/xai/index.d.ts +3 -3
  41. package/dist/providers/xai/index.js +311 -8
  42. package/dist/providers/xai/index.mjs +311 -8
  43. package/dist/{types-D4YfrQJR.d.mts → types-BNCmlJMs.d.mts} +1 -1
  44. package/dist/{types-DRqxMIjF.d.mts → types-CMMQ8s2O.d.mts} +1 -1
  45. package/dist/{types-CR8mi9I0.d.ts → types-CMvvDo-E.d.mts} +12 -1
  46. package/dist/{types-CR8mi9I0.d.mts → types-CMvvDo-E.d.ts} +12 -1
  47. package/dist/{types-BctsnC3g.d.ts → types-DhktekQ3.d.ts} +1 -1
  48. package/dist/{types-38yolWJn.d.ts → types-Pj-vpmoT.d.ts} +1 -1
  49. package/dist/yourgpt/index.d.mts +1 -1
  50. package/dist/yourgpt/index.d.ts +1 -1
  51. package/package.json +1 -1
@@ -453,6 +453,7 @@ var OpenAIAdapter = class _OpenAIAdapter {
453
453
  if (baseUrl.includes("generativelanguage.googleapis.com")) return "google";
454
454
  if (baseUrl.includes("x.ai")) return "xai";
455
455
  if (baseUrl.includes("azure")) return "azure";
456
+ if (baseUrl.includes("openrouter.ai")) return "openrouter";
456
457
  return "openai";
457
458
  }
458
459
  async getClient() {
@@ -552,6 +553,256 @@ var OpenAIAdapter = class _OpenAIAdapter {
552
553
  rawResponse: response
553
554
  };
554
555
  }
556
+ /**
557
+ * OpenAI reasoning models on OpenRouter (o1/o3/o4/gpt-5 family) hide their
558
+ * reasoning content on the chat-completions endpoint. To surface reasoning
559
+ * SUMMARIES (not raw CoT, which OpenAI never exposes) we have to use the
560
+ * Responses API, which streams `response.reasoning_summary_text.delta` events.
561
+ *
562
+ * Match by prefix on the OpenRouter model id. Excludes openai/gpt-4o,
563
+ * openai/gpt-4.1, openai/chatgpt-* — those continue on chat-completions.
564
+ */
565
+ isOpenAIReasoningModelOnOpenRouter(activeModel) {
566
+ if (this.provider !== "openrouter") return false;
567
+ return activeModel.startsWith("openai/o1") || activeModel.startsWith("openai/o3") || activeModel.startsWith("openai/o4") || activeModel.startsWith("openai/gpt-5");
568
+ }
569
+ /**
570
+ * Convert ActionDefinition[] (the chat-completions tool shape used by the
571
+ * adapter) to the Responses API tool shape.
572
+ */
573
+ buildResponsesToolsFromActions(actions) {
574
+ if (!actions || actions.length === 0) return void 0;
575
+ const formatted = formatTools(actions);
576
+ return formatted.map((t) => ({
577
+ type: "function",
578
+ name: t.function.name,
579
+ description: t.function.description,
580
+ parameters: t.function.parameters
581
+ }));
582
+ }
583
+ /**
584
+ * Streaming Responses API path for OpenAI reasoning models on OpenRouter.
585
+ *
586
+ * Maps Responses API SSE events back to the same StreamEvent shapes the
587
+ * chat-completions path emits, so downstream consumers (processChunk.ts,
588
+ * frontend tool handlers, plan approval, specialist delegations) see
589
+ * identical events regardless of which path produced them.
590
+ *
591
+ * response.reasoning_summary_text.delta → thinking:start (once) + thinking:delta
592
+ * response.output_text.delta → message:delta
593
+ * response.output_item.added (function_call) → action:start (queued buffer)
594
+ * response.function_call_arguments.delta → action:args (progressive)
595
+ * response.output_item.done (function_call) → final action:args + action:end
596
+ * response.completed → message:end + done(usage)
597
+ * response.error → error
598
+ */
599
+ async *streamWithResponsesAPI(request, activeModel, messageId) {
600
+ const client = await this.getClient();
601
+ const maxTokensValue = request.config?.maxTokens ?? this.config.maxTokens;
602
+ const payload = {
603
+ model: activeModel,
604
+ input: this.buildResponsesInput(request),
605
+ stream: true,
606
+ reasoning: {
607
+ effort: request.config?.reasoningEffort ?? "medium",
608
+ summary: "auto"
609
+ }
610
+ };
611
+ if (request.systemPrompt) payload.instructions = request.systemPrompt;
612
+ if (typeof maxTokensValue === "number")
613
+ payload.max_output_tokens = maxTokensValue;
614
+ const tools = this.buildResponsesToolsFromActions(request.actions);
615
+ if (tools && tools.length > 0) payload.tools = tools;
616
+ logProviderPayload(
617
+ "openai",
618
+ "responses-api request payload",
619
+ payload,
620
+ request.debug
621
+ );
622
+ let stream;
623
+ try {
624
+ stream = await client.responses.create(payload);
625
+ } catch (error) {
626
+ yield {
627
+ type: "error",
628
+ message: error instanceof Error ? error.message : "Unknown error",
629
+ code: "OPENAI_RESPONSES_ERROR"
630
+ };
631
+ return;
632
+ }
633
+ const toolBuffers = /* @__PURE__ */ new Map();
634
+ const itemIdToCallId = /* @__PURE__ */ new Map();
635
+ let usage;
636
+ let reasoningStarted = false;
637
+ let textStarted = false;
638
+ let finishEmitted = false;
639
+ const resolveCallId = (evt) => {
640
+ if (evt?.call_id) return evt.call_id;
641
+ if (evt?.item_id) return itemIdToCallId.get(evt.item_id) ?? evt.item_id;
642
+ if (evt?.item?.call_id) return evt.item.call_id;
643
+ if (evt?.item?.id) return evt.item.id;
644
+ return "";
645
+ };
646
+ try {
647
+ for await (const evt of stream) {
648
+ logProviderPayload(
649
+ "openai",
650
+ "responses-api stream chunk",
651
+ evt,
652
+ request.debug
653
+ );
654
+ if (request.signal?.aborted) break;
655
+ const t = evt?.type ?? "";
656
+ if (t === "response.reasoning_summary_text.delta") {
657
+ const delta = evt.delta ?? "";
658
+ if (!delta) continue;
659
+ if (!reasoningStarted) {
660
+ yield { type: "thinking:start" };
661
+ reasoningStarted = true;
662
+ }
663
+ yield { type: "thinking:delta", content: delta };
664
+ continue;
665
+ }
666
+ if (t === "response.reasoning_summary_text.done" || t === "response.reasoning.done") {
667
+ continue;
668
+ }
669
+ if (t === "response.output_text.delta") {
670
+ const text = evt.delta ?? "";
671
+ if (!text) continue;
672
+ if (reasoningStarted && !textStarted) {
673
+ yield { type: "thinking:end" };
674
+ textStarted = true;
675
+ }
676
+ yield { type: "message:delta", content: text };
677
+ continue;
678
+ }
679
+ if (t === "response.output_item.added") {
680
+ const item = evt.item;
681
+ if (item?.type === "function_call") {
682
+ const callId = item.call_id ?? item.id ?? "";
683
+ const itemId = item.id ?? callId;
684
+ if (callId) {
685
+ if (itemId && itemId !== callId) {
686
+ itemIdToCallId.set(itemId, callId);
687
+ }
688
+ if (!toolBuffers.has(callId)) {
689
+ toolBuffers.set(callId, {
690
+ id: callId,
691
+ name: item.name ?? "",
692
+ arguments: item.arguments ?? "",
693
+ emittedStart: false
694
+ });
695
+ }
696
+ const buf = toolBuffers.get(callId);
697
+ if (buf.name && !buf.emittedStart) {
698
+ yield { type: "action:start", id: buf.id, name: buf.name };
699
+ buf.emittedStart = true;
700
+ }
701
+ }
702
+ }
703
+ continue;
704
+ }
705
+ if (t === "response.function_call_arguments.delta") {
706
+ const callId = resolveCallId(evt);
707
+ const delta = evt.delta ?? "";
708
+ if (!callId || !delta) continue;
709
+ let buf = toolBuffers.get(callId);
710
+ if (!buf) {
711
+ buf = { id: callId, name: "", arguments: "", emittedStart: false };
712
+ toolBuffers.set(callId, buf);
713
+ }
714
+ buf.arguments += delta;
715
+ if (buf.emittedStart) {
716
+ yield {
717
+ type: "action:args",
718
+ id: buf.id,
719
+ args: buf.arguments
720
+ };
721
+ }
722
+ continue;
723
+ }
724
+ if (t === "response.output_item.done") {
725
+ const item = evt.item;
726
+ if (item?.type === "function_call") {
727
+ const callId = item.call_id ?? item.id ?? "";
728
+ const buf = toolBuffers.get(callId);
729
+ const name = buf?.name || item.name || "";
730
+ const argsStr = buf?.arguments || item.arguments || "{}";
731
+ if (callId && name) {
732
+ if (!buf?.emittedStart) {
733
+ yield { type: "action:start", id: callId, name };
734
+ }
735
+ yield {
736
+ type: "action:args",
737
+ id: callId,
738
+ args: argsStr
739
+ };
740
+ yield {
741
+ type: "action:end",
742
+ id: callId,
743
+ name
744
+ };
745
+ }
746
+ toolBuffers.delete(callId);
747
+ }
748
+ continue;
749
+ }
750
+ if (t === "response.completed") {
751
+ const u = evt.response?.usage;
752
+ if (u) {
753
+ usage = {
754
+ prompt_tokens: u.input_tokens ?? 0,
755
+ completion_tokens: u.output_tokens ?? 0,
756
+ total_tokens: u.total_tokens ?? (u.input_tokens ?? 0) + (u.output_tokens ?? 0)
757
+ };
758
+ }
759
+ for (const buf of toolBuffers.values()) {
760
+ if (!buf.id || !buf.name) continue;
761
+ if (!buf.emittedStart) {
762
+ yield { type: "action:start", id: buf.id, name: buf.name };
763
+ }
764
+ yield {
765
+ type: "action:args",
766
+ id: buf.id,
767
+ args: buf.arguments || "{}"
768
+ };
769
+ yield { type: "action:end", id: buf.id, name: buf.name };
770
+ }
771
+ toolBuffers.clear();
772
+ if (reasoningStarted && !textStarted) {
773
+ yield { type: "thinking:end" };
774
+ }
775
+ yield { type: "message:end" };
776
+ yield { type: "done", usage };
777
+ finishEmitted = true;
778
+ continue;
779
+ }
780
+ if (t === "response.error" || t === "error") {
781
+ const msg = evt.error?.message || evt.message || "Responses API error";
782
+ yield {
783
+ type: "error",
784
+ message: msg,
785
+ code: "OPENAI_RESPONSES_ERROR"
786
+ };
787
+ return;
788
+ }
789
+ }
790
+ } catch (error) {
791
+ yield {
792
+ type: "error",
793
+ message: error instanceof Error ? error.message : "Unknown error",
794
+ code: "OPENAI_RESPONSES_ERROR"
795
+ };
796
+ return;
797
+ }
798
+ if (!finishEmitted) {
799
+ if (reasoningStarted && !textStarted) {
800
+ yield { type: "thinking:end" };
801
+ }
802
+ yield { type: "message:end" };
803
+ yield { type: "done", usage };
804
+ }
805
+ }
555
806
  async completeWithResponses(request) {
556
807
  const client = await this.getClient();
557
808
  const openaiToolOptions = request.providerToolOptions?.openai;
@@ -685,16 +936,37 @@ var OpenAIAdapter = class _OpenAIAdapter {
685
936
  name: openaiToolOptions.toolChoice.name
686
937
  }
687
938
  } : openaiToolOptions?.toolChoice;
939
+ const isOpenRouter = this.provider === "openrouter";
940
+ const activeModel = request.config?.model || this.model;
941
+ const modelSlug = activeModel.replace("openai/", "");
942
+ const isOSeries = /^o[1-9]/.test(modelSlug);
943
+ const isOpenAIOnOpenRouter = isOpenRouter && activeModel.startsWith("openai/");
944
+ if (!this.config.disableThinking && this.isOpenAIReasoningModelOnOpenRouter(activeModel)) {
945
+ yield* this.streamWithResponsesAPI(request, activeModel, messageId);
946
+ return;
947
+ }
948
+ const maxTokensValue = request.config?.maxTokens ?? this.config.maxTokens;
688
949
  const payload = {
689
- model: request.config?.model || this.model,
950
+ model: activeModel,
690
951
  messages,
691
952
  tools: tools.length > 0 ? tools : void 0,
692
953
  tool_choice: tools.length > 0 ? toolChoice : void 0,
693
954
  parallel_tool_calls: tools.length > 0 ? openaiToolOptions?.parallelToolCalls : void 0,
694
- temperature: request.config?.temperature ?? this.config.temperature,
695
- max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
696
955
  stream: true,
697
- stream_options: { include_usage: true }
956
+ stream_options: { include_usage: true },
957
+ // o-series: use max_completion_tokens + reasoning_effort, no temperature
958
+ // regular models: use max_tokens + temperature
959
+ ...isOSeries ? {
960
+ max_completion_tokens: maxTokensValue,
961
+ reasoning_effort: request.config?.reasoningEffort ?? "medium"
962
+ } : {
963
+ temperature: request.config?.temperature ?? this.config.temperature,
964
+ max_tokens: maxTokensValue
965
+ },
966
+ // Non-OpenAI OpenRouter models support OR's reasoning/include_reasoning params.
967
+ // When disableThinking=true we must explicitly send include_reasoning:false because
968
+ // models like Qwen3 and DeepSeek-R1 reason by default even without the reasoning param.
969
+ ...isOpenRouter && !isOpenAIOnOpenRouter ? this.config.disableThinking ? { include_reasoning: false } : { reasoning: { max_tokens: 8e3 }, include_reasoning: true } : {}
698
970
  };
699
971
  logProviderPayload("openai", "request payload", payload, request.debug);
700
972
  const stream = await client.chat.completions.create(payload);
@@ -702,6 +974,7 @@ var OpenAIAdapter = class _OpenAIAdapter {
702
974
  const collectedCitations = [];
703
975
  let citationIndex = 0;
704
976
  let usage;
977
+ let adapterReasoningStarted = false;
705
978
  for await (const chunk of stream) {
706
979
  logProviderPayload("openai", "stream chunk", chunk, request.debug);
707
980
  if (request.signal?.aborted) {
@@ -712,6 +985,22 @@ var OpenAIAdapter = class _OpenAIAdapter {
712
985
  if (delta?.content) {
713
986
  yield { type: "message:delta", content: delta.content };
714
987
  }
988
+ if (isOpenRouter) {
989
+ const rc = delta?.reasoning_content ?? delta?.reasoning ?? null;
990
+ if (rc) {
991
+ const rcText = typeof rc === "string" ? rc : Array.isArray(rc) && rc[0]?.text ? rc[0].text : "";
992
+ if (rcText) {
993
+ if (!adapterReasoningStarted) {
994
+ yield { type: "thinking:start" };
995
+ adapterReasoningStarted = true;
996
+ }
997
+ yield { type: "thinking:delta", content: rcText };
998
+ }
999
+ } else if (adapterReasoningStarted && (delta?.content || choice?.finish_reason)) {
1000
+ yield { type: "thinking:end" };
1001
+ adapterReasoningStarted = false;
1002
+ }
1003
+ }
715
1004
  const annotations = delta?.annotations;
716
1005
  if (annotations && annotations.length > 0) {
717
1006
  for (const annotation of annotations) {
@@ -759,6 +1048,11 @@ var OpenAIAdapter = class _OpenAIAdapter {
759
1048
  };
760
1049
  } else if (currentToolCall && toolCall.function?.arguments) {
761
1050
  currentToolCall.arguments += toolCall.function.arguments;
1051
+ yield {
1052
+ type: "action:args",
1053
+ id: currentToolCall.id,
1054
+ args: currentToolCall.arguments
1055
+ };
762
1056
  }
763
1057
  }
764
1058
  }
@@ -834,15 +1128,24 @@ var OpenAIAdapter = class _OpenAIAdapter {
834
1128
  name: openaiToolOptions.toolChoice.name
835
1129
  }
836
1130
  } : openaiToolOptions?.toolChoice;
1131
+ const activeModel2 = request.config?.model || this.model;
1132
+ const modelSlug2 = activeModel2.replace("openai/", "");
1133
+ const isOSeries2 = /^o[1-9]/.test(modelSlug2);
1134
+ const maxTokensValue2 = request.config?.maxTokens ?? this.config.maxTokens;
837
1135
  const payload = {
838
- model: request.config?.model || this.model,
1136
+ model: activeModel2,
839
1137
  messages,
840
1138
  tools: tools.length > 0 ? tools : void 0,
841
1139
  tool_choice: tools.length > 0 ? toolChoice : void 0,
842
1140
  parallel_tool_calls: tools.length > 0 ? openaiToolOptions?.parallelToolCalls : void 0,
843
- temperature: request.config?.temperature ?? this.config.temperature,
844
- max_tokens: request.config?.maxTokens ?? this.config.maxTokens,
845
- stream: false
1141
+ stream: false,
1142
+ ...isOSeries2 ? {
1143
+ max_completion_tokens: maxTokensValue2,
1144
+ reasoning_effort: request.config?.reasoningEffort ?? "medium"
1145
+ } : {
1146
+ temperature: request.config?.temperature ?? this.config.temperature,
1147
+ max_tokens: maxTokensValue2
1148
+ }
846
1149
  };
847
1150
  logProviderPayload("openai", "request payload", payload, request.debug);
848
1151
  const response = await client.chat.completions.create(payload);
@@ -1,4 +1,4 @@
1
- import { L as LLMAdapter } from './base-D-U61JaB.mjs';
1
+ import { L as LLMAdapter } from './base-DN1EfKnE.mjs';
2
2
 
3
3
  /**
4
4
  * Fallback Chain & Routing Strategy Types
@@ -1,4 +1,4 @@
1
- import { L as LLMAdapter, T as ToolDefinition, U as UnifiedToolCall, h as UnifiedToolResult } from './base-D-U61JaB.mjs';
1
+ import { L as LLMAdapter, T as ToolDefinition, U as UnifiedToolCall, h as UnifiedToolResult } from './base-DN1EfKnE.mjs';
2
2
 
3
3
  /**
4
4
  * Provider Types
@@ -191,11 +191,22 @@ interface TokenUsage {
191
191
  /**
192
192
  * Stream chunk from model.doStream()
193
193
  */
194
- type StreamChunk = TextDeltaChunk | ToolCallChunk | ToolResultChunk | FinishChunk | ErrorChunk;
194
+ type StreamChunk = TextDeltaChunk | ToolCallStartChunk | ToolCallDeltaChunk | ToolCallChunk | ToolResultChunk | FinishChunk | ErrorChunk;
195
195
  interface TextDeltaChunk {
196
196
  type: "text-delta";
197
197
  text: string;
198
198
  }
199
+ interface ToolCallStartChunk {
200
+ type: "tool-call-start";
201
+ toolCallId: string;
202
+ toolName: string;
203
+ }
204
+ interface ToolCallDeltaChunk {
205
+ type: "tool-call-delta";
206
+ toolCallId: string;
207
+ /** Accumulated args text so far (not a delta — full accumulated string) */
208
+ argsText: string;
209
+ }
199
210
  interface ToolCallChunk {
200
211
  type: "tool-call";
201
212
  toolCall: ToolCall;
@@ -191,11 +191,22 @@ interface TokenUsage {
191
191
  /**
192
192
  * Stream chunk from model.doStream()
193
193
  */
194
- type StreamChunk = TextDeltaChunk | ToolCallChunk | ToolResultChunk | FinishChunk | ErrorChunk;
194
+ type StreamChunk = TextDeltaChunk | ToolCallStartChunk | ToolCallDeltaChunk | ToolCallChunk | ToolResultChunk | FinishChunk | ErrorChunk;
195
195
  interface TextDeltaChunk {
196
196
  type: "text-delta";
197
197
  text: string;
198
198
  }
199
+ interface ToolCallStartChunk {
200
+ type: "tool-call-start";
201
+ toolCallId: string;
202
+ toolName: string;
203
+ }
204
+ interface ToolCallDeltaChunk {
205
+ type: "tool-call-delta";
206
+ toolCallId: string;
207
+ /** Accumulated args text so far (not a delta — full accumulated string) */
208
+ argsText: string;
209
+ }
199
210
  interface ToolCallChunk {
200
211
  type: "tool-call";
201
212
  toolCall: ToolCall;
@@ -1,4 +1,4 @@
1
- import { L as LLMAdapter, T as ToolDefinition, U as UnifiedToolCall, h as UnifiedToolResult } from './base-iGi9Va6Z.js';
1
+ import { L as LLMAdapter, T as ToolDefinition, U as UnifiedToolCall, h as UnifiedToolResult } from './base-DuUNxtVg.js';
2
2
 
3
3
  /**
4
4
  * Provider Types
@@ -1,4 +1,4 @@
1
- import { L as LLMAdapter } from './base-iGi9Va6Z.js';
1
+ import { L as LLMAdapter } from './base-DuUNxtVg.js';
2
2
 
3
3
  /**
4
4
  * Fallback Chain & Routing Strategy Types
@@ -1,4 +1,4 @@
1
- import { e as StorageMessage, d as StorageAdapter, v as StorageFile } from '../types-CR8mi9I0.mjs';
1
+ import { e as StorageMessage, d as StorageAdapter, v as StorageFile } from '../types-CMvvDo-E.mjs';
2
2
  import 'zod';
3
3
 
4
4
  /**
@@ -1,4 +1,4 @@
1
- import { e as StorageMessage, d as StorageAdapter, v as StorageFile } from '../types-CR8mi9I0.js';
1
+ import { e as StorageMessage, d as StorageAdapter, v as StorageFile } from '../types-CMvvDo-E.js';
2
2
  import 'zod';
3
3
 
4
4
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@yourgpt/llm-sdk",
3
- "version": "2.1.9",
3
+ "version": "2.1.10-alpha.0",
4
4
  "description": "AI SDK for building AI Agents with any LLM",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.mjs",