@reverbia/sdk 1.0.0-next.20251202095402 → 1.0.0-next.20251202225102

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -834,12 +834,32 @@ var postApiV1Embeddings = (options) => {
834
834
  }
835
835
  });
836
836
  };
837
+ var postApiV1ImagesGenerations = (options) => {
838
+ return (options.client ?? client).post({
839
+ url: "/api/v1/images/generations",
840
+ ...options,
841
+ headers: {
842
+ "Content-Type": "application/json",
843
+ ...options.headers
844
+ }
845
+ });
846
+ };
837
847
  var getApiV1Models = (options) => {
838
848
  return (options?.client ?? client).get({
839
849
  url: "/api/v1/models",
840
850
  ...options
841
851
  });
842
852
  };
853
+ var postApiV1Search = (options) => {
854
+ return (options.client ?? client).post({
855
+ url: "/api/v1/search",
856
+ ...options,
857
+ headers: {
858
+ "Content-Type": "application/json",
859
+ ...options.headers
860
+ }
861
+ });
862
+ };
843
863
  var getHealth = (options) => {
844
864
  return (options?.client ?? client).get({
845
865
  url: "/health",
@@ -850,5 +870,7 @@ export {
850
870
  getApiV1Models,
851
871
  getHealth,
852
872
  postApiV1ChatCompletions,
853
- postApiV1Embeddings
873
+ postApiV1Embeddings,
874
+ postApiV1ImagesGenerations,
875
+ postApiV1Search
854
876
  };
@@ -46680,6 +46680,7 @@ __export(index_exports, {
46680
46680
  selectTool: () => selectTool,
46681
46681
  useChat: () => useChat,
46682
46682
  useEncryption: () => useEncryption,
46683
+ useImageGeneration: () => useImageGeneration,
46683
46684
  useMemory: () => useMemory,
46684
46685
  useModels: () => useModels
46685
46686
  });
@@ -47775,15 +47776,12 @@ function useChat(options) {
47775
47776
  const lastUserMessage = [...messages].reverse().find((m) => m.role === "user");
47776
47777
  if (lastUserMessage?.content) {
47777
47778
  setIsSelectingTool(true);
47779
+ const contentString = lastUserMessage.content?.map((part) => part.text || "").join("") || "";
47778
47780
  try {
47779
- const selectionResult = await selectTool(
47780
- lastUserMessage.content,
47781
- tools,
47782
- {
47783
- model: toolSelectorModel,
47784
- signal: abortController.signal
47785
- }
47786
- );
47781
+ const selectionResult = await selectTool(contentString, tools, {
47782
+ model: toolSelectorModel,
47783
+ signal: abortController.signal
47784
+ });
47787
47785
  if (selectionResult.toolSelected && selectionResult.toolName) {
47788
47786
  const selectedTool = tools.find(
47789
47787
  (t) => t.name === selectionResult.toolName
@@ -47805,22 +47803,32 @@ function useChat(options) {
47805
47803
  if (toolExecutionResult.success && toolExecutionResult.result !== void 0) {
47806
47804
  const toolResultContext = {
47807
47805
  role: "system",
47808
- content: `Tool "${toolExecutionResult.toolName}" was executed with the following result:
47806
+ content: [
47807
+ {
47808
+ type: "text",
47809
+ text: `Tool "${toolExecutionResult.toolName}" was executed with the following result:
47809
47810
  ${JSON.stringify(
47810
- toolExecutionResult.result,
47811
- null,
47812
- 2
47813
- )}
47811
+ toolExecutionResult.result,
47812
+ null,
47813
+ 2
47814
+ )}
47814
47815
 
47815
47816
  Use this information to respond to the user's request.`
47817
+ }
47818
+ ]
47816
47819
  };
47817
47820
  messagesWithToolContext = [...messages, toolResultContext];
47818
47821
  } else if (toolExecutionResult.error) {
47819
47822
  const toolErrorContext = {
47820
47823
  role: "system",
47821
- content: `Tool "${toolExecutionResult.toolName}" was executed but encountered an error: ${toolExecutionResult.error}
47824
+ content: [
47825
+ {
47826
+ type: "text",
47827
+ text: `Tool "${toolExecutionResult.toolName}" was executed but encountered an error: ${toolExecutionResult.error}
47822
47828
 
47823
47829
  Please inform the user about this issue and try to help them alternatively.`
47830
+ }
47831
+ ]
47824
47832
  };
47825
47833
  messagesWithToolContext = [...messages, toolErrorContext];
47826
47834
  }
@@ -47839,7 +47847,7 @@ Please inform the user about this issue and try to help them alternatively.`
47839
47847
  const usedModel = localModel;
47840
47848
  const formattedMessages = messagesWithToolContext.map((m) => ({
47841
47849
  role: m.role || "user",
47842
- content: m.content || ""
47850
+ content: m.content?.map((p) => p.text || "").join("") || ""
47843
47851
  }));
47844
47852
  await generateLocalChatCompletion(formattedMessages, {
47845
47853
  model: usedModel,
@@ -47858,7 +47866,7 @@ Please inform the user about this issue and try to help them alternatively.`
47858
47866
  index: 0,
47859
47867
  message: {
47860
47868
  role: "assistant",
47861
- content: accumulatedContent
47869
+ content: [{ type: "text", text: accumulatedContent }]
47862
47870
  },
47863
47871
  finish_reason: "stop"
47864
47872
  }
@@ -47972,7 +47980,7 @@ Please inform the user about this issue and try to help them alternatively.`
47972
47980
  index: 0,
47973
47981
  message: {
47974
47982
  role: "assistant",
47975
- content: accumulatedContent
47983
+ content: [{ type: "text", text: accumulatedContent }]
47976
47984
  },
47977
47985
  finish_reason: finishReason
47978
47986
  }
@@ -48423,6 +48431,16 @@ var postApiV1Embeddings = (options) => {
48423
48431
  }
48424
48432
  });
48425
48433
  };
48434
+ var postApiV1ImagesGenerations = (options) => {
48435
+ return (options.client ?? client).post({
48436
+ url: "/api/v1/images/generations",
48437
+ ...options,
48438
+ headers: {
48439
+ "Content-Type": "application/json",
48440
+ ...options.headers
48441
+ }
48442
+ });
48443
+ };
48426
48444
  var getApiV1Models = (options) => {
48427
48445
  return (options?.client ?? client).get({
48428
48446
  url: "/api/v1/models",
@@ -48593,9 +48611,12 @@ function useMemory(options = {}) {
48593
48611
  messages: [
48594
48612
  {
48595
48613
  role: "system",
48596
- content: FACT_EXTRACTION_PROMPT
48614
+ content: [{ type: "text", text: FACT_EXTRACTION_PROMPT }]
48597
48615
  },
48598
- ...messages
48616
+ ...messages.map((m) => ({
48617
+ role: m.role,
48618
+ content: [{ type: "text", text: m.content }]
48619
+ }))
48599
48620
  ],
48600
48621
  model: model || completionsModel
48601
48622
  },
@@ -48616,7 +48637,13 @@ function useMemory(options = {}) {
48616
48637
  );
48617
48638
  return null;
48618
48639
  }
48619
- const content = completion.data.choices?.[0]?.message?.content?.trim() || "";
48640
+ const messageContent = completion.data.choices?.[0]?.message?.content;
48641
+ let content = "";
48642
+ if (Array.isArray(messageContent)) {
48643
+ content = messageContent.map((p) => p.text || "").join("").trim();
48644
+ } else if (typeof messageContent === "string") {
48645
+ content = messageContent.trim();
48646
+ }
48620
48647
  if (!content) {
48621
48648
  console.error("No content in memory extraction response");
48622
48649
  return null;
@@ -48897,6 +48924,88 @@ function useModels(options = {}) {
48897
48924
  };
48898
48925
  }
48899
48926
 
48927
+ // src/react/useImageGeneration.ts
48928
+ var import_react5 = require("react");
48929
+ function useImageGeneration(options = {}) {
48930
+ const { getToken, baseUrl = BASE_URL, onFinish, onError } = options;
48931
+ const [isLoading, setIsLoading] = (0, import_react5.useState)(false);
48932
+ const abortControllerRef = (0, import_react5.useRef)(null);
48933
+ (0, import_react5.useEffect)(() => {
48934
+ return () => {
48935
+ if (abortControllerRef.current) {
48936
+ abortControllerRef.current.abort();
48937
+ abortControllerRef.current = null;
48938
+ }
48939
+ };
48940
+ }, []);
48941
+ const stop = (0, import_react5.useCallback)(() => {
48942
+ if (abortControllerRef.current) {
48943
+ abortControllerRef.current.abort();
48944
+ abortControllerRef.current = null;
48945
+ }
48946
+ }, []);
48947
+ const generateImage = (0, import_react5.useCallback)(
48948
+ async (args) => {
48949
+ if (abortControllerRef.current) {
48950
+ abortControllerRef.current.abort();
48951
+ }
48952
+ const abortController = new AbortController();
48953
+ abortControllerRef.current = abortController;
48954
+ setIsLoading(true);
48955
+ try {
48956
+ if (!getToken) {
48957
+ throw new Error("Token getter function is required.");
48958
+ }
48959
+ const token = await getToken();
48960
+ if (!token) {
48961
+ throw new Error("No access token available.");
48962
+ }
48963
+ const response = await postApiV1ImagesGenerations({
48964
+ baseUrl,
48965
+ body: args,
48966
+ headers: {
48967
+ Authorization: `Bearer ${token}`
48968
+ },
48969
+ signal: abortController.signal
48970
+ });
48971
+ if (response.error) {
48972
+ const errorMsg = response.error.error || "Failed to generate image";
48973
+ throw new Error(errorMsg);
48974
+ }
48975
+ if (!response.data) {
48976
+ throw new Error("No data received from image generation API");
48977
+ }
48978
+ const result = response.data;
48979
+ if (onFinish) {
48980
+ onFinish(result);
48981
+ }
48982
+ return { data: result, error: null };
48983
+ } catch (err) {
48984
+ if (err instanceof Error && err.name === "AbortError") {
48985
+ return { data: null, error: "Request aborted" };
48986
+ }
48987
+ const errorMsg = err instanceof Error ? err.message : "Failed to generate image.";
48988
+ const errorObj = err instanceof Error ? err : new Error(errorMsg);
48989
+ if (onError) {
48990
+ onError(errorObj);
48991
+ }
48992
+ return { data: null, error: errorMsg };
48993
+ } finally {
48994
+ if (abortControllerRef.current === abortController) {
48995
+ setIsLoading(false);
48996
+ abortControllerRef.current = null;
48997
+ }
48998
+ }
48999
+ },
49000
+ [getToken, baseUrl, onFinish, onError]
49001
+ );
49002
+ return {
49003
+ isLoading,
49004
+ generateImage,
49005
+ stop
49006
+ };
49007
+ }
49008
+
48900
49009
  // src/lib/memory/chat.ts
48901
49010
  var formatMemoriesForChat = (memories, format = "compact") => {
48902
49011
  if (memories.length === 0) {
@@ -48964,6 +49073,7 @@ var extractConversationContext = (messages, maxMessages = 3) => {
48964
49073
  selectTool,
48965
49074
  useChat,
48966
49075
  useEncryption,
49076
+ useImageGeneration,
48967
49077
  useMemory,
48968
49078
  useModels
48969
49079
  });
@@ -67,6 +67,96 @@ type LlmapiChoice = {
67
67
  index?: number;
68
68
  message?: LlmapiMessage;
69
69
  };
70
+ /**
71
+ * ExtraFields contains additional metadata such as provider/model information.
72
+ */
73
+ type LlmapiImageGenerationExtraFields = {
74
+ /**
75
+ * ModelRequested is the model identifier that the client asked for.
76
+ */
77
+ model_requested?: string;
78
+ /**
79
+ * Provider is the gateway that serviced this request.
80
+ */
81
+ provider?: string;
82
+ /**
83
+ * RequestType is always "image_generation".
84
+ */
85
+ request_type?: string;
86
+ };
87
+ type LlmapiImageGenerationImage = {
88
+ /**
89
+ * B64JSON is the base64 payload for models that can only return binary.
90
+ */
91
+ b64_json?: string;
92
+ /**
93
+ * URL is the signed URL to download the image.
94
+ */
95
+ url?: string;
96
+ };
97
+ type LlmapiImageGenerationRequest = {
98
+ /**
99
+ * Model is the model identifier to use for generation (e.g., "gpt-image-1").
100
+ */
101
+ model?: string;
102
+ /**
103
+ * Prompt is the text description of the desired image.
104
+ */
105
+ prompt?: string;
106
+ /**
107
+ * Quality targets a quality preset (e.g., "auto", "high").
108
+ */
109
+ quality?: string;
110
+ /**
111
+ * ResponseFormat controls how the generated image is returned (e.g., "url" or "b64_json").
112
+ */
113
+ response_format?: string;
114
+ /**
115
+ * Size controls the dimensions of the generated image (e.g., "1024x1024").
116
+ */
117
+ size?: string;
118
+ };
119
+ type LlmapiImageGenerationResponse = {
120
+ /**
121
+ * Created is the Unix timestamp when the image was generated.
122
+ */
123
+ created?: number;
124
+ extra_fields?: LlmapiImageGenerationExtraFields;
125
+ /**
126
+ * Images contains the generated images.
127
+ */
128
+ images?: Array<LlmapiImageGenerationImage>;
129
+ /**
130
+ * Model is the model identifier that generated the image.
131
+ */
132
+ model?: string;
133
+ /**
134
+ * Provider is the gateway that produced the image.
135
+ */
136
+ provider?: string;
137
+ usage?: LlmapiImageGenerationUsage;
138
+ };
139
+ /**
140
+ * Usage documents token usage (when available).
141
+ */
142
+ type LlmapiImageGenerationUsage = {
143
+ /**
144
+ * CostMicroUSD is the inference cost for this image generation request
145
+ */
146
+ cost_micro_usd?: number;
147
+ /**
148
+ * InputTokens is the number of tokens sent in the prompt.
149
+ */
150
+ input_tokens?: number;
151
+ /**
152
+ * OutputTokens is the number of tokens returned by the model.
153
+ */
154
+ output_tokens?: number;
155
+ /**
156
+ * TotalTokens is the total number of tokens consumed.
157
+ */
158
+ total_tokens?: number;
159
+ };
70
160
  /**
71
161
  * Message is the generated message
72
162
  */
@@ -74,9 +164,33 @@ type LlmapiMessage = {
74
164
  /**
75
165
  * Content is the message content
76
166
  */
77
- content?: string;
167
+ content?: Array<LlmapiMessageContentPart>;
78
168
  role?: LlmapiRole;
79
169
  };
170
+ /**
171
+ * ImageURL is used when Type=image_url
172
+ */
173
+ type LlmapiMessageContentImage = {
174
+ /**
175
+ * Detail is the OpenAI detail hint (auto|low|high)
176
+ */
177
+ detail?: string;
178
+ /**
179
+ * URL is the image URL or data URI
180
+ */
181
+ url?: string;
182
+ };
183
+ type LlmapiMessageContentPart = {
184
+ image_url?: LlmapiMessageContentImage;
185
+ /**
186
+ * Text holds the text content when Type=text
187
+ */
188
+ text?: string;
189
+ /**
190
+ * Type is the block type (`text` or `image_url`)
191
+ */
192
+ type?: string;
193
+ };
80
194
  type LlmapiModel = {
81
195
  architecture?: LlmapiModelArchitecture;
82
196
  /**
@@ -530,6 +644,42 @@ type UseModelsResult = {
530
644
  */
531
645
  declare function useModels(options?: UseModelsOptions): UseModelsResult;
532
646
 
647
+ type UseImageGenerationOptions = {
648
+ /**
649
+ * Custom function to get auth token for API calls
650
+ */
651
+ getToken?: () => Promise<string | null>;
652
+ /**
653
+ * Optional base URL for the API requests.
654
+ */
655
+ baseUrl?: string;
656
+ /**
657
+ * Callback function to be called when the generation finishes successfully.
658
+ */
659
+ onFinish?: (response: LlmapiImageGenerationResponse) => void;
660
+ /**
661
+ * Callback function to be called when an unexpected error is encountered.
662
+ */
663
+ onError?: (error: Error) => void;
664
+ };
665
+ type GenerateImageArgs = LlmapiImageGenerationRequest;
666
+ type GenerateImageResult = {
667
+ data: LlmapiImageGenerationResponse;
668
+ error: null;
669
+ } | {
670
+ data: null;
671
+ error: string;
672
+ };
673
+ type UseImageGenerationResult = {
674
+ isLoading: boolean;
675
+ generateImage: (args: GenerateImageArgs) => Promise<GenerateImageResult>;
676
+ stop: () => void;
677
+ };
678
+ /**
679
+ * React hook for generating images using the LLM API.
680
+ */
681
+ declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
682
+
533
683
  /**
534
684
  * Format memories into a context string that can be included in chat messages
535
685
  * @param memories Array of memories with similarity scores
@@ -581,4 +731,4 @@ declare function executeTool(tool: ClientTool, params: Record<string, unknown>):
581
731
  error?: string;
582
732
  }>;
583
733
 
584
- export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };
734
+ export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels };
@@ -67,6 +67,96 @@ type LlmapiChoice = {
67
67
  index?: number;
68
68
  message?: LlmapiMessage;
69
69
  };
70
+ /**
71
+ * ExtraFields contains additional metadata such as provider/model information.
72
+ */
73
+ type LlmapiImageGenerationExtraFields = {
74
+ /**
75
+ * ModelRequested is the model identifier that the client asked for.
76
+ */
77
+ model_requested?: string;
78
+ /**
79
+ * Provider is the gateway that serviced this request.
80
+ */
81
+ provider?: string;
82
+ /**
83
+ * RequestType is always "image_generation".
84
+ */
85
+ request_type?: string;
86
+ };
87
+ type LlmapiImageGenerationImage = {
88
+ /**
89
+ * B64JSON is the base64 payload for models that can only return binary.
90
+ */
91
+ b64_json?: string;
92
+ /**
93
+ * URL is the signed URL to download the image.
94
+ */
95
+ url?: string;
96
+ };
97
+ type LlmapiImageGenerationRequest = {
98
+ /**
99
+ * Model is the model identifier to use for generation (e.g., "gpt-image-1").
100
+ */
101
+ model?: string;
102
+ /**
103
+ * Prompt is the text description of the desired image.
104
+ */
105
+ prompt?: string;
106
+ /**
107
+ * Quality targets a quality preset (e.g., "auto", "high").
108
+ */
109
+ quality?: string;
110
+ /**
111
+ * ResponseFormat controls how the generated image is returned (e.g., "url" or "b64_json").
112
+ */
113
+ response_format?: string;
114
+ /**
115
+ * Size controls the dimensions of the generated image (e.g., "1024x1024").
116
+ */
117
+ size?: string;
118
+ };
119
+ type LlmapiImageGenerationResponse = {
120
+ /**
121
+ * Created is the Unix timestamp when the image was generated.
122
+ */
123
+ created?: number;
124
+ extra_fields?: LlmapiImageGenerationExtraFields;
125
+ /**
126
+ * Images contains the generated images.
127
+ */
128
+ images?: Array<LlmapiImageGenerationImage>;
129
+ /**
130
+ * Model is the model identifier that generated the image.
131
+ */
132
+ model?: string;
133
+ /**
134
+ * Provider is the gateway that produced the image.
135
+ */
136
+ provider?: string;
137
+ usage?: LlmapiImageGenerationUsage;
138
+ };
139
+ /**
140
+ * Usage documents token usage (when available).
141
+ */
142
+ type LlmapiImageGenerationUsage = {
143
+ /**
144
+ * CostMicroUSD is the inference cost for this image generation request
145
+ */
146
+ cost_micro_usd?: number;
147
+ /**
148
+ * InputTokens is the number of tokens sent in the prompt.
149
+ */
150
+ input_tokens?: number;
151
+ /**
152
+ * OutputTokens is the number of tokens returned by the model.
153
+ */
154
+ output_tokens?: number;
155
+ /**
156
+ * TotalTokens is the total number of tokens consumed.
157
+ */
158
+ total_tokens?: number;
159
+ };
70
160
  /**
71
161
  * Message is the generated message
72
162
  */
@@ -74,9 +164,33 @@ type LlmapiMessage = {
74
164
  /**
75
165
  * Content is the message content
76
166
  */
77
- content?: string;
167
+ content?: Array<LlmapiMessageContentPart>;
78
168
  role?: LlmapiRole;
79
169
  };
170
+ /**
171
+ * ImageURL is used when Type=image_url
172
+ */
173
+ type LlmapiMessageContentImage = {
174
+ /**
175
+ * Detail is the OpenAI detail hint (auto|low|high)
176
+ */
177
+ detail?: string;
178
+ /**
179
+ * URL is the image URL or data URI
180
+ */
181
+ url?: string;
182
+ };
183
+ type LlmapiMessageContentPart = {
184
+ image_url?: LlmapiMessageContentImage;
185
+ /**
186
+ * Text holds the text content when Type=text
187
+ */
188
+ text?: string;
189
+ /**
190
+ * Type is the block type (`text` or `image_url`)
191
+ */
192
+ type?: string;
193
+ };
80
194
  type LlmapiModel = {
81
195
  architecture?: LlmapiModelArchitecture;
82
196
  /**
@@ -530,6 +644,42 @@ type UseModelsResult = {
530
644
  */
531
645
  declare function useModels(options?: UseModelsOptions): UseModelsResult;
532
646
 
647
+ type UseImageGenerationOptions = {
648
+ /**
649
+ * Custom function to get auth token for API calls
650
+ */
651
+ getToken?: () => Promise<string | null>;
652
+ /**
653
+ * Optional base URL for the API requests.
654
+ */
655
+ baseUrl?: string;
656
+ /**
657
+ * Callback function to be called when the generation finishes successfully.
658
+ */
659
+ onFinish?: (response: LlmapiImageGenerationResponse) => void;
660
+ /**
661
+ * Callback function to be called when an unexpected error is encountered.
662
+ */
663
+ onError?: (error: Error) => void;
664
+ };
665
+ type GenerateImageArgs = LlmapiImageGenerationRequest;
666
+ type GenerateImageResult = {
667
+ data: LlmapiImageGenerationResponse;
668
+ error: null;
669
+ } | {
670
+ data: null;
671
+ error: string;
672
+ };
673
+ type UseImageGenerationResult = {
674
+ isLoading: boolean;
675
+ generateImage: (args: GenerateImageArgs) => Promise<GenerateImageResult>;
676
+ stop: () => void;
677
+ };
678
+ /**
679
+ * React hook for generating images using the LLM API.
680
+ */
681
+ declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
682
+
533
683
  /**
534
684
  * Format memories into a context string that can be included in chat messages
535
685
  * @param memories Array of memories with similarity scores
@@ -581,4 +731,4 @@ declare function executeTool(tool: ClientTool, params: Record<string, unknown>):
581
731
  error?: string;
582
732
  }>;
583
733
 
584
- export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };
734
+ export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels };