@lastbrain/ai-ui-react 1.0.36 → 1.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/dist/components/AiContextButton.d.ts.map +1 -1
  2. package/dist/components/AiContextButton.js +1 -9
  3. package/dist/components/AiImageButton.d.ts.map +1 -1
  4. package/dist/components/AiImageButton.js +1 -16
  5. package/dist/components/AiInput.d.ts.map +1 -1
  6. package/dist/components/AiInput.js +2 -2
  7. package/dist/components/AiPromptPanel.d.ts +1 -0
  8. package/dist/components/AiPromptPanel.d.ts.map +1 -1
  9. package/dist/components/AiPromptPanel.js +278 -169
  10. package/dist/components/AiTextarea.d.ts.map +1 -1
  11. package/dist/components/AiTextarea.js +1 -1
  12. package/dist/components/ErrorToast.js +14 -14
  13. package/dist/context/AiProvider.d.ts.map +1 -1
  14. package/dist/context/AiProvider.js +87 -26
  15. package/dist/hooks/useAiClient.d.ts +1 -0
  16. package/dist/hooks/useAiClient.d.ts.map +1 -1
  17. package/dist/hooks/useAiModels.d.ts.map +1 -1
  18. package/dist/hooks/useAiModels.js +1 -2
  19. package/dist/hooks/useModelManagement.d.ts.map +1 -1
  20. package/dist/hooks/useModelManagement.js +24 -3
  21. package/dist/hooks/usePrompts.d.ts.map +1 -1
  22. package/dist/hooks/usePrompts.js +19 -3
  23. package/package.json +2 -2
  24. package/src/components/AiContextButton.tsx +2 -9
  25. package/src/components/AiImageButton.tsx +5 -17
  26. package/src/components/AiInput.tsx +3 -2
  27. package/src/components/AiPromptPanel.tsx +332 -192
  28. package/src/components/AiTextarea.tsx +2 -1
  29. package/src/components/ErrorToast.tsx +16 -16
  30. package/src/context/AiProvider.tsx +116 -31
  31. package/src/hooks/useAiModels.ts +1 -2
  32. package/src/hooks/useModelManagement.ts +33 -3
  33. package/src/hooks/usePrompts.ts +21 -3
@@ -215,7 +215,8 @@ export function AiTextarea({
215
215
  onClose={handleClosePanel}
216
216
  onSubmit={handleSubmit}
217
217
  uiMode={uiMode}
218
- models={models || []}
218
+ models={[]}
219
+ modelCategory="text"
219
220
  sourceText={textareaValue || undefined}
220
221
  baseUrl={baseUrl}
221
222
  apiKey={apiKeyId}
@@ -24,6 +24,21 @@ export function ErrorToast({
24
24
  const fadeTimeoutRef = useRef<number | null>(null);
25
25
  const autoCloseTimeoutRef = useRef<number | null>(null);
26
26
 
27
+ const handleClose = () => {
28
+ if (isClosing) return;
29
+
30
+ // Clear auto-close timeout if user closes manually
31
+ if (autoCloseTimeoutRef.current) {
32
+ window.clearTimeout(autoCloseTimeoutRef.current);
33
+ }
34
+
35
+ setIsClosing(true);
36
+ fadeTimeoutRef.current = window.setTimeout(() => {
37
+ setIsVisible(false);
38
+ onComplete?.();
39
+ }, 200);
40
+ };
41
+
27
42
  useEffect(() => {
28
43
  if (error) {
29
44
  // Show toast immediately
@@ -46,22 +61,7 @@ export function ErrorToast({
46
61
  window.clearTimeout(autoCloseTimeoutRef.current);
47
62
  }
48
63
  };
49
- }, [error]);
50
-
51
- const handleClose = () => {
52
- if (isClosing) return;
53
-
54
- // Clear auto-close timeout if user closes manually
55
- if (autoCloseTimeoutRef.current) {
56
- window.clearTimeout(autoCloseTimeoutRef.current);
57
- }
58
-
59
- setIsClosing(true);
60
- fadeTimeoutRef.current = window.setTimeout(() => {
61
- setIsVisible(false);
62
- onComplete?.();
63
- }, 200);
64
- };
64
+ }, [error, handleClose]);
65
65
 
66
66
  if (!error) return null;
67
67
 
@@ -12,7 +12,7 @@ import {
12
12
  import type { UiMode } from "../types";
13
13
  import type { ModelRef } from "@lastbrain/ai-ui-core";
14
14
  import { createClient } from "@lastbrain/ai-ui-core";
15
- import { getAvailableModels, getUserModels } from "../utils/modelManagement";
15
+ import { getUserModels } from "../utils/modelManagement";
16
16
 
17
17
  export interface AIModel {
18
18
  id: string;
@@ -108,46 +108,131 @@ export function AiProvider({
108
108
  // Utiliser createClient pour avoir buildUrl qui gère les routes correctement
109
109
  const client = createClient({ baseUrl, apiKeyId });
110
110
 
111
- // Fetch providers et available models en parallèle
112
- const [providersData, availableModelsData] = await Promise.all([
113
- client
114
- .getModels()
115
- .then((models) => {
116
- // getModels retourne directement les modèles, pas les providers
117
- // On doit reconstruire la structure providers
118
- return { providers: [{ id: "default", name: "Default", models }] };
119
- })
120
- .catch((error) => {
121
- console.error("[AiProvider] Error fetching models:", error);
122
- if (error.message?.includes("404")) {
123
- providersAvailable.current = false;
124
- }
125
- return { providers: [] };
126
- }),
127
- getAvailableModels({ baseUrl, apiKey: apiKeyId }).catch((error) => {
128
- console.warn("[AiProvider] Could not fetch available models:", error);
111
+ // Fetch ALL available models from Vercel AI Gateway + user enabled models
112
+ const [gatewayResponse, userEnabledModels] = await Promise.all([
113
+ client.getAllAvailableModels().catch((error) => {
114
+ console.error("[AiProvider] Error fetching gateway models:", error);
115
+ if (error.message?.includes("404")) {
116
+ providersAvailable.current = false;
117
+ }
118
+ return { providers: [] };
119
+ }),
120
+ client.getModels().catch((error) => {
121
+ console.warn(
122
+ "[AiProvider] Could not fetch user enabled models:",
123
+ error
124
+ );
129
125
  return [];
130
126
  }),
131
127
  ]);
132
- console.log("[AiProvider] Providers data received:", providersData);
133
128
 
134
- if (providersData.providers && Array.isArray(providersData.providers)) {
135
- setProviders(providersData.providers);
129
+ console.log("[AiProvider] Gateway response:", gatewayResponse);
130
+ console.log("[AiProvider] User enabled models:", userEnabledModels);
136
131
 
137
- // Extraire tous les modèles
138
- const models: ModelRef[] = [];
139
- for (const provider of providersData.providers) {
132
+ // Gateway providers contain ALL models from Vercel AI
133
+ if (
134
+ gatewayResponse.providers &&
135
+ Array.isArray(gatewayResponse.providers)
136
+ ) {
137
+ // Extract all models from all providers
138
+ const allGatewayModels: ModelRef[] = [];
139
+ for (const provider of gatewayResponse.providers) {
140
140
  if (provider.models && Array.isArray(provider.models)) {
141
- models.push(...provider.models);
141
+ // Transform GatewayModel to ModelRef
142
+ for (const model of provider.models) {
143
+ allGatewayModels.push({
144
+ id: model.id,
145
+ name: model.name,
146
+ type: model.type === "language" ? "text" : (model.type as any), // Map types
147
+ provider: model.provider,
148
+ tags: model.tags,
149
+ pricing: model.pricing
150
+ ? {
151
+ inputTokens: model.pricing.input,
152
+ outputTokens: model.pricing.output,
153
+ imageTokens: model.pricing.image,
154
+ }
155
+ : undefined,
156
+ });
157
+ }
142
158
  }
143
159
  }
144
- console.log("[AiProvider] Extracted models:", models.length, models);
145
- setAllModels(models);
160
+
161
+ console.log(
162
+ "[AiProvider] All gateway models extracted:",
163
+ allGatewayModels.length,
164
+ allGatewayModels
165
+ );
166
+
167
+ // Helper pour déterminer les capacités d'un modèle
168
+ const getModelCategories = (model: ModelRef): ("text" | "image")[] => {
169
+ const categories: ("text" | "image")[] = [];
170
+ const tags = model.tags || [];
171
+ const tagsLower = tags.map((t) => t.toLowerCase());
172
+
173
+ // Détecter capacité text
174
+ if (
175
+ model.type === "text" ||
176
+ model.type === "language" ||
177
+ tagsLower.includes("text") ||
178
+ tagsLower.includes("language") ||
179
+ tagsLower.includes("chat")
180
+ ) {
181
+ categories.push("text");
182
+ }
183
+
184
+ // Détecter capacité image
185
+ if (
186
+ model.type === "image" ||
187
+ tagsLower.includes("image") ||
188
+ tagsLower.includes("vision") ||
189
+ tagsLower.includes("multimodal") ||
190
+ tagsLower.includes("image-generation") ||
191
+ model.pricing?.imageTokens !== undefined
192
+ ) {
193
+ categories.push("image");
194
+ }
195
+
196
+ // Par défaut, si pas de catégorie détectée, considérer comme text
197
+ if (categories.length === 0) {
198
+ categories.push("text");
199
+ }
200
+
201
+ return categories;
202
+ };
203
+
204
+ // Transformer ModelRef en AIModel pour availableModels
205
+ // Un modèle multi-modal apparaîtra avec toutes ses catégories
206
+ const aiModels: AIModel[] = [];
207
+ for (const model of allGatewayModels) {
208
+ const categories = getModelCategories(model);
209
+
210
+ // Pour chaque catégorie supportée, créer une entrée AIModel
211
+ // mais stocker toutes les catégories dans une propriété
212
+ const primaryCategory = categories[0];
213
+ aiModels.push({
214
+ id: model.id,
215
+ name: model.name,
216
+ category: primaryCategory,
217
+ provider: model.provider,
218
+ description: `${model.provider} - ${model.name}${categories.length > 1 ? ` (${categories.join(", ")})` : ""}`,
219
+ isPro: false,
220
+ isActive: false,
221
+ tags: model.tags, // Garder les tags originaux
222
+ } as any); // Cast temporaire car AIModel n'a pas tags dans le type
223
+ }
224
+
225
+ setAllModels(allGatewayModels);
226
+ setAvailableModels(aiModels);
227
+ setProviders(
228
+ gatewayResponse.providers.map((p) => ({
229
+ id: p.name,
230
+ name: p.display_name,
231
+ models: allGatewayModels.filter((m) => m.provider === p.name),
232
+ }))
233
+ );
146
234
  }
147
235
 
148
- // Stocker available models
149
- console.log("[AiProvider] Available models:", availableModelsData.length);
150
- setAvailableModels(availableModelsData);
151
236
  hasFetchedProviders.current = true;
152
237
  } catch (error) {
153
238
  console.error("[AiProvider] Error fetching providers:", error);
@@ -87,8 +87,7 @@ export function useAiModels(options?: UseAiModelsOptions): UseAiModelsResult {
87
87
  filtered.length
88
88
  );
89
89
  return filtered;
90
- return filtered;
91
- }, [useContextData, context, options?.modelType]);
90
+ }, [useContextData, context, options]);
92
91
 
93
92
  const refetch = useCallback(() => {
94
93
  if (useContextData) {
@@ -49,9 +49,39 @@ export function useModelManagement(
49
49
  // Filtrer par catégorie si nécessaire
50
50
  const filteredModels = useMemo(() => {
51
51
  if (!useContextData) return [];
52
- return category
53
- ? context.availableModels.filter((m) => m.category === category)
54
- : context.availableModels;
52
+
53
+ if (!category) {
54
+ return context.availableModels;
55
+ }
56
+
57
+ // Filtrer les modèles qui supportent la catégorie demandée
58
+ return context.availableModels.filter((m) => {
59
+ // Vérifier la catégorie principale
60
+ if (m.category === category) return true;
61
+
62
+ // Vérifier les tags pour modèles multi-modaux
63
+ const tags = (m as any).tags || [];
64
+ const tagsLower = tags.map((t: string) => t.toLowerCase());
65
+
66
+ if (category === "text") {
67
+ return (
68
+ tagsLower.includes("text") ||
69
+ tagsLower.includes("language") ||
70
+ tagsLower.includes("chat")
71
+ );
72
+ }
73
+
74
+ if (category === "image") {
75
+ return (
76
+ tagsLower.includes("image") ||
77
+ tagsLower.includes("vision") ||
78
+ tagsLower.includes("multimodal") ||
79
+ tagsLower.includes("image-generation")
80
+ );
81
+ }
82
+
83
+ return false;
84
+ });
55
85
  }, [useContextData, category, context.availableModels]);
56
86
 
57
87
  const refreshModels = useCallback(async () => {
@@ -65,13 +65,24 @@ export function usePrompts(): UsePromptsReturn {
65
65
  setLoading(true);
66
66
  setError(null);
67
67
 
68
+ console.log("[usePrompts] fetchPrompts called:", {
69
+ options,
70
+ baseUrl,
71
+ apiKeyId,
72
+ hasApiKey: !!apiKeyId,
73
+ hasBaseUrl: !!baseUrl,
74
+ });
75
+
68
76
  // Generate cache key based on options
69
77
  const cacheKey = `prompts_${JSON.stringify(options || {})}`;
70
78
 
71
79
  // Check cache first (60 seconds TTL)
72
80
  const cached = getCached<Prompt[] | PublicPrompt[]>(cacheKey, 60000);
73
81
  if (cached) {
74
- console.log("[usePrompts] Using cached data");
82
+ console.log("[usePrompts] Using cached data:", {
83
+ count: cached.length,
84
+ prompts: cached,
85
+ });
75
86
  setPrompts(cached);
76
87
  setLoading(false);
77
88
  return;
@@ -133,6 +144,13 @@ export function usePrompts(): UsePromptsReturn {
133
144
 
134
145
  const data = await response.json();
135
146
 
147
+ console.log("[usePrompts] Server response:", {
148
+ ok: response.ok,
149
+ status: response.status,
150
+ promptsCount: data.prompts?.length || 0,
151
+ data,
152
+ });
153
+
136
154
  if (response.ok) {
137
155
  const promptsData = data.prompts || [];
138
156
  setPrompts(promptsData);
@@ -147,7 +165,7 @@ export function usePrompts(): UsePromptsReturn {
147
165
  setLoading(false);
148
166
  }
149
167
  },
150
- [baseUrl]
168
+ [baseUrl, apiKeyId]
151
169
  );
152
170
 
153
171
  const createPrompt = useCallback(
@@ -306,7 +324,7 @@ export function usePrompts(): UsePromptsReturn {
306
324
  body: JSON.stringify({ prompt_id: promptId, stat_type: statType }),
307
325
  credentials: isExternalProxy ? "include" : "same-origin",
308
326
  });
309
- } catch (err) {
327
+ } catch {
310
328
  // Silent fail for stats
311
329
  }
312
330
  },