@reverbia/sdk 1.0.0-next.20251208162906 → 1.0.0-next.20251209143100

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -67,6 +67,96 @@ type LlmapiChoice = {
67
67
  index?: number;
68
68
  message?: LlmapiMessage;
69
69
  };
70
+ /**
71
+ * ExtraFields contains additional metadata such as provider/model information.
72
+ */
73
+ type LlmapiImageGenerationExtraFields = {
74
+ /**
75
+ * ModelRequested is the model identifier that the client asked for.
76
+ */
77
+ model_requested?: string;
78
+ /**
79
+ * Provider is the gateway that serviced this request.
80
+ */
81
+ provider?: string;
82
+ /**
83
+ * RequestType is always "image_generation".
84
+ */
85
+ request_type?: string;
86
+ };
87
+ type LlmapiImageGenerationImage = {
88
+ /**
89
+ * B64JSON is the base64 payload for models that can only return binary.
90
+ */
91
+ b64_json?: string;
92
+ /**
93
+ * URL is the signed URL to download the image.
94
+ */
95
+ url?: string;
96
+ };
97
+ type LlmapiImageGenerationRequest = {
98
+ /**
99
+ * Model is the model identifier to use for generation (e.g., "gpt-image-1").
100
+ */
101
+ model: string;
102
+ /**
103
+ * Prompt is the text description of the desired image.
104
+ */
105
+ prompt: string;
106
+ /**
107
+ * Quality targets a quality preset (e.g., "auto", "high").
108
+ */
109
+ quality?: string;
110
+ /**
111
+ * ResponseFormat controls how the generated image is returned (e.g., "url" or "b64_json").
112
+ */
113
+ response_format?: string;
114
+ /**
115
+ * Size controls the dimensions of the generated image (e.g., "1024x1024").
116
+ */
117
+ size?: string;
118
+ };
119
+ type LlmapiImageGenerationResponse = {
120
+ /**
121
+ * Created is the Unix timestamp when the image was generated.
122
+ */
123
+ created?: number;
124
+ extra_fields?: LlmapiImageGenerationExtraFields;
125
+ /**
126
+ * Images contains the generated images.
127
+ */
128
+ images?: Array<LlmapiImageGenerationImage>;
129
+ /**
130
+ * Model is the model identifier that generated the image.
131
+ */
132
+ model?: string;
133
+ /**
134
+ * Provider is the gateway that produced the image.
135
+ */
136
+ provider?: string;
137
+ usage?: LlmapiImageGenerationUsage;
138
+ };
139
+ /**
140
+ * Usage documents token usage (when available).
141
+ */
142
+ type LlmapiImageGenerationUsage = {
143
+ /**
144
+ * CostMicroUSD is the inference cost for this image generation request
145
+ */
146
+ cost_micro_usd?: number;
147
+ /**
148
+ * InputTokens is the number of tokens sent in the prompt.
149
+ */
150
+ input_tokens?: number;
151
+ /**
152
+ * OutputTokens is the number of tokens returned by the model.
153
+ */
154
+ output_tokens?: number;
155
+ /**
156
+ * TotalTokens is the total number of tokens consumed.
157
+ */
158
+ total_tokens?: number;
159
+ };
70
160
  /**
71
161
  * Message is the generated message
72
162
  */
@@ -101,6 +191,99 @@ type LlmapiMessageContentPart = {
101
191
  */
102
192
  type?: string;
103
193
  };
194
+ type LlmapiModel = {
195
+ architecture?: LlmapiModelArchitecture;
196
+ /**
197
+ * CanonicalSlug is the canonical slug for the model
198
+ */
199
+ canonical_slug?: string;
200
+ /**
201
+ * ContextLength is the maximum context length in tokens
202
+ */
203
+ context_length?: number;
204
+ /**
205
+ * Created is the Unix timestamp of when the model was created
206
+ */
207
+ created?: number;
208
+ /**
209
+ * DefaultParameters contains default parameter values
210
+ */
211
+ default_parameters?: {
212
+ [key: string]: unknown;
213
+ };
214
+ /**
215
+ * Description describes the model and its capabilities
216
+ */
217
+ description?: string;
218
+ /**
219
+ * HuggingFaceID is the Hugging Face model identifier
220
+ */
221
+ hugging_face_id?: string;
222
+ /**
223
+ * ID is the model identifier (e.g., "openai/gpt-4")
224
+ */
225
+ id?: string;
226
+ /**
227
+ * MaxInputTokens is the maximum input tokens
228
+ */
229
+ max_input_tokens?: number;
230
+ /**
231
+ * MaxOutputTokens is the maximum output tokens
232
+ */
233
+ max_output_tokens?: number;
234
+ /**
235
+ * Name is the human-readable model name (optional)
236
+ */
237
+ name?: string;
238
+ /**
239
+ * OwnedBy is the organization that owns the model
240
+ */
241
+ owned_by?: string;
242
+ per_request_limits?: LlmapiModelPerRequestLimits;
243
+ pricing?: LlmapiModelPricing;
244
+ /**
245
+ * SupportedMethods is a list of supported API methods
246
+ */
247
+ supported_methods?: Array<string>;
248
+ /**
249
+ * SupportedParameters is a list of supported parameter names
250
+ */
251
+ supported_parameters?: Array<string>;
252
+ top_provider?: LlmapiModelTopProvider;
253
+ };
254
+ /**
255
+ * Architecture describes the model's technical capabilities
256
+ */
257
+ type LlmapiModelArchitecture = {
258
+ instruct_type?: string;
259
+ modality?: string;
260
+ prompt_formatting?: string;
261
+ tokenizer?: string;
262
+ };
263
+ /**
264
+ * PerRequestLimits contains rate limiting information
265
+ */
266
+ type LlmapiModelPerRequestLimits = {
267
+ completion_tokens?: number;
268
+ prompt_tokens?: number;
269
+ };
270
+ /**
271
+ * Pricing contains the pricing structure for using this model
272
+ */
273
+ type LlmapiModelPricing = {
274
+ completion?: string;
275
+ image?: string;
276
+ prompt?: string;
277
+ request?: string;
278
+ };
279
+ /**
280
+ * TopProvider contains configuration details for the primary provider
281
+ */
282
+ type LlmapiModelTopProvider = {
283
+ context_length?: number;
284
+ is_moderated?: boolean;
285
+ max_completion_tokens?: number;
286
+ };
104
287
  /**
105
288
  * Role is the message role (system, user, assistant)
106
289
  */
@@ -214,4 +397,70 @@ type UseChatResult = BaseUseChatResult & {
214
397
  */
215
398
  declare function useChat(options?: UseChatOptions): UseChatResult;
216
399
 
217
- export { useChat };
400
+ type UseImageGenerationOptions = {
401
+ /**
402
+ * Custom function to get auth token for API calls
403
+ */
404
+ getToken?: () => Promise<string | null>;
405
+ /**
406
+ * Optional base URL for the API requests.
407
+ */
408
+ baseUrl?: string;
409
+ /**
410
+ * Callback function to be called when the generation finishes successfully.
411
+ */
412
+ onFinish?: (response: LlmapiImageGenerationResponse) => void;
413
+ /**
414
+ * Callback function to be called when an unexpected error is encountered.
415
+ */
416
+ onError?: (error: Error) => void;
417
+ };
418
+ type GenerateImageArgs = LlmapiImageGenerationRequest;
419
+ type GenerateImageResult = {
420
+ data: LlmapiImageGenerationResponse;
421
+ error: null;
422
+ } | {
423
+ data: null;
424
+ error: string;
425
+ };
426
+ type UseImageGenerationResult = {
427
+ isLoading: boolean;
428
+ generateImage: (args: GenerateImageArgs) => Promise<GenerateImageResult>;
429
+ stop: () => void;
430
+ };
431
+ /**
432
+ * React hook for generating images using the LLM API.
433
+ */
434
+ declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
435
+
436
+ type UseModelsOptions = {
437
+ /**
438
+ * Custom function to get auth token for API calls
439
+ */
440
+ getToken?: () => Promise<string | null>;
441
+ /**
442
+ * Optional base URL for the API requests.
443
+ */
444
+ baseUrl?: string;
445
+ /**
446
+ * Optional filter for specific provider (e.g. "openai")
447
+ */
448
+ provider?: string;
449
+ /**
450
+ * Whether to fetch models automatically on mount (default: true)
451
+ */
452
+ autoFetch?: boolean;
453
+ };
454
+ type UseModelsResult = {
455
+ models: LlmapiModel[];
456
+ isLoading: boolean;
457
+ error: Error | null;
458
+ refetch: () => Promise<void>;
459
+ };
460
+ /**
461
+ * React hook for fetching available LLM models.
462
+ * Automatically fetches all available models.
463
+ */
464
+ declare function useModels(options?: UseModelsOptions): UseModelsResult;
465
+
466
+ export { type UseModelsOptions, type UseModelsResult, useChat, useImageGeneration, useModels };
@@ -67,6 +67,96 @@ type LlmapiChoice = {
67
67
  index?: number;
68
68
  message?: LlmapiMessage;
69
69
  };
70
+ /**
71
+ * ExtraFields contains additional metadata such as provider/model information.
72
+ */
73
+ type LlmapiImageGenerationExtraFields = {
74
+ /**
75
+ * ModelRequested is the model identifier that the client asked for.
76
+ */
77
+ model_requested?: string;
78
+ /**
79
+ * Provider is the gateway that serviced this request.
80
+ */
81
+ provider?: string;
82
+ /**
83
+ * RequestType is always "image_generation".
84
+ */
85
+ request_type?: string;
86
+ };
87
+ type LlmapiImageGenerationImage = {
88
+ /**
89
+ * B64JSON is the base64 payload for models that can only return binary.
90
+ */
91
+ b64_json?: string;
92
+ /**
93
+ * URL is the signed URL to download the image.
94
+ */
95
+ url?: string;
96
+ };
97
+ type LlmapiImageGenerationRequest = {
98
+ /**
99
+ * Model is the model identifier to use for generation (e.g., "gpt-image-1").
100
+ */
101
+ model: string;
102
+ /**
103
+ * Prompt is the text description of the desired image.
104
+ */
105
+ prompt: string;
106
+ /**
107
+ * Quality targets a quality preset (e.g., "auto", "high").
108
+ */
109
+ quality?: string;
110
+ /**
111
+ * ResponseFormat controls how the generated image is returned (e.g., "url" or "b64_json").
112
+ */
113
+ response_format?: string;
114
+ /**
115
+ * Size controls the dimensions of the generated image (e.g., "1024x1024").
116
+ */
117
+ size?: string;
118
+ };
119
+ type LlmapiImageGenerationResponse = {
120
+ /**
121
+ * Created is the Unix timestamp when the image was generated.
122
+ */
123
+ created?: number;
124
+ extra_fields?: LlmapiImageGenerationExtraFields;
125
+ /**
126
+ * Images contains the generated images.
127
+ */
128
+ images?: Array<LlmapiImageGenerationImage>;
129
+ /**
130
+ * Model is the model identifier that generated the image.
131
+ */
132
+ model?: string;
133
+ /**
134
+ * Provider is the gateway that produced the image.
135
+ */
136
+ provider?: string;
137
+ usage?: LlmapiImageGenerationUsage;
138
+ };
139
+ /**
140
+ * Usage documents token usage (when available).
141
+ */
142
+ type LlmapiImageGenerationUsage = {
143
+ /**
144
+ * CostMicroUSD is the inference cost for this image generation request
145
+ */
146
+ cost_micro_usd?: number;
147
+ /**
148
+ * InputTokens is the number of tokens sent in the prompt.
149
+ */
150
+ input_tokens?: number;
151
+ /**
152
+ * OutputTokens is the number of tokens returned by the model.
153
+ */
154
+ output_tokens?: number;
155
+ /**
156
+ * TotalTokens is the total number of tokens consumed.
157
+ */
158
+ total_tokens?: number;
159
+ };
70
160
  /**
71
161
  * Message is the generated message
72
162
  */
@@ -101,6 +191,99 @@ type LlmapiMessageContentPart = {
101
191
  */
102
192
  type?: string;
103
193
  };
194
+ type LlmapiModel = {
195
+ architecture?: LlmapiModelArchitecture;
196
+ /**
197
+ * CanonicalSlug is the canonical slug for the model
198
+ */
199
+ canonical_slug?: string;
200
+ /**
201
+ * ContextLength is the maximum context length in tokens
202
+ */
203
+ context_length?: number;
204
+ /**
205
+ * Created is the Unix timestamp of when the model was created
206
+ */
207
+ created?: number;
208
+ /**
209
+ * DefaultParameters contains default parameter values
210
+ */
211
+ default_parameters?: {
212
+ [key: string]: unknown;
213
+ };
214
+ /**
215
+ * Description describes the model and its capabilities
216
+ */
217
+ description?: string;
218
+ /**
219
+ * HuggingFaceID is the Hugging Face model identifier
220
+ */
221
+ hugging_face_id?: string;
222
+ /**
223
+ * ID is the model identifier (e.g., "openai/gpt-4")
224
+ */
225
+ id?: string;
226
+ /**
227
+ * MaxInputTokens is the maximum input tokens
228
+ */
229
+ max_input_tokens?: number;
230
+ /**
231
+ * MaxOutputTokens is the maximum output tokens
232
+ */
233
+ max_output_tokens?: number;
234
+ /**
235
+ * Name is the human-readable model name (optional)
236
+ */
237
+ name?: string;
238
+ /**
239
+ * OwnedBy is the organization that owns the model
240
+ */
241
+ owned_by?: string;
242
+ per_request_limits?: LlmapiModelPerRequestLimits;
243
+ pricing?: LlmapiModelPricing;
244
+ /**
245
+ * SupportedMethods is a list of supported API methods
246
+ */
247
+ supported_methods?: Array<string>;
248
+ /**
249
+ * SupportedParameters is a list of supported parameter names
250
+ */
251
+ supported_parameters?: Array<string>;
252
+ top_provider?: LlmapiModelTopProvider;
253
+ };
254
+ /**
255
+ * Architecture describes the model's technical capabilities
256
+ */
257
+ type LlmapiModelArchitecture = {
258
+ instruct_type?: string;
259
+ modality?: string;
260
+ prompt_formatting?: string;
261
+ tokenizer?: string;
262
+ };
263
+ /**
264
+ * PerRequestLimits contains rate limiting information
265
+ */
266
+ type LlmapiModelPerRequestLimits = {
267
+ completion_tokens?: number;
268
+ prompt_tokens?: number;
269
+ };
270
+ /**
271
+ * Pricing contains the pricing structure for using this model
272
+ */
273
+ type LlmapiModelPricing = {
274
+ completion?: string;
275
+ image?: string;
276
+ prompt?: string;
277
+ request?: string;
278
+ };
279
+ /**
280
+ * TopProvider contains configuration details for the primary provider
281
+ */
282
+ type LlmapiModelTopProvider = {
283
+ context_length?: number;
284
+ is_moderated?: boolean;
285
+ max_completion_tokens?: number;
286
+ };
104
287
  /**
105
288
  * Role is the message role (system, user, assistant)
106
289
  */
@@ -214,4 +397,70 @@ type UseChatResult = BaseUseChatResult & {
214
397
  */
215
398
  declare function useChat(options?: UseChatOptions): UseChatResult;
216
399
 
217
- export { useChat };
400
+ type UseImageGenerationOptions = {
401
+ /**
402
+ * Custom function to get auth token for API calls
403
+ */
404
+ getToken?: () => Promise<string | null>;
405
+ /**
406
+ * Optional base URL for the API requests.
407
+ */
408
+ baseUrl?: string;
409
+ /**
410
+ * Callback function to be called when the generation finishes successfully.
411
+ */
412
+ onFinish?: (response: LlmapiImageGenerationResponse) => void;
413
+ /**
414
+ * Callback function to be called when an unexpected error is encountered.
415
+ */
416
+ onError?: (error: Error) => void;
417
+ };
418
+ type GenerateImageArgs = LlmapiImageGenerationRequest;
419
+ type GenerateImageResult = {
420
+ data: LlmapiImageGenerationResponse;
421
+ error: null;
422
+ } | {
423
+ data: null;
424
+ error: string;
425
+ };
426
+ type UseImageGenerationResult = {
427
+ isLoading: boolean;
428
+ generateImage: (args: GenerateImageArgs) => Promise<GenerateImageResult>;
429
+ stop: () => void;
430
+ };
431
+ /**
432
+ * React hook for generating images using the LLM API.
433
+ */
434
+ declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
435
+
436
+ type UseModelsOptions = {
437
+ /**
438
+ * Custom function to get auth token for API calls
439
+ */
440
+ getToken?: () => Promise<string | null>;
441
+ /**
442
+ * Optional base URL for the API requests.
443
+ */
444
+ baseUrl?: string;
445
+ /**
446
+ * Optional filter for specific provider (e.g. "openai")
447
+ */
448
+ provider?: string;
449
+ /**
450
+ * Whether to fetch models automatically on mount (default: true)
451
+ */
452
+ autoFetch?: boolean;
453
+ };
454
+ type UseModelsResult = {
455
+ models: LlmapiModel[];
456
+ isLoading: boolean;
457
+ error: Error | null;
458
+ refetch: () => Promise<void>;
459
+ };
460
+ /**
461
+ * React hook for fetching available LLM models.
462
+ * Automatically fetches all available models.
463
+ */
464
+ declare function useModels(options?: UseModelsOptions): UseModelsResult;
465
+
466
+ export { type UseModelsOptions, type UseModelsResult, useChat, useImageGeneration, useModels };