needware-cli 1.5.15 → 1.5.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/README.md +49 -1
  2. package/dist/commands/agent.d.ts +12 -0
  3. package/dist/commands/agent.d.ts.map +1 -1
  4. package/dist/commands/agent.js +133 -7
  5. package/dist/commands/agent.js.map +1 -1
  6. package/dist/commands/index.d.ts +1 -1
  7. package/dist/commands/index.d.ts.map +1 -1
  8. package/dist/commands/index.js +1 -1
  9. package/dist/commands/index.js.map +1 -1
  10. package/dist/commands/skill.d.ts +34 -0
  11. package/dist/commands/skill.d.ts.map +1 -0
  12. package/dist/commands/skill.js +307 -0
  13. package/dist/commands/skill.js.map +1 -0
  14. package/dist/commands/system-prompt.d.ts +2 -1
  15. package/dist/commands/system-prompt.d.ts.map +1 -1
  16. package/dist/commands/system-prompt.js +12 -9
  17. package/dist/commands/system-prompt.js.map +1 -1
  18. package/dist/core/cli.d.ts.map +1 -1
  19. package/dist/core/cli.js +8 -1
  20. package/dist/core/cli.js.map +1 -1
  21. package/dist/index.js +4 -4
  22. package/dist/index.js.map +1 -1
  23. package/dist/sdk/firecrawl-sdk.d.ts +85 -0
  24. package/dist/sdk/firecrawl-sdk.d.ts.map +1 -0
  25. package/dist/sdk/firecrawl-sdk.js +207 -0
  26. package/dist/sdk/firecrawl-sdk.js.map +1 -0
  27. package/dist/tools/ai-gateway-enable-tool.d.ts +12 -0
  28. package/dist/tools/ai-gateway-enable-tool.d.ts.map +1 -0
  29. package/dist/tools/ai-gateway-enable-tool.js +33 -0
  30. package/dist/tools/ai-gateway-enable-tool.js.map +1 -0
  31. package/dist/tools/base-tool.d.ts +76 -0
  32. package/dist/tools/base-tool.d.ts.map +1 -0
  33. package/dist/tools/base-tool.js +119 -0
  34. package/dist/tools/base-tool.js.map +1 -0
  35. package/dist/tools/example-tool.d.ts +16 -0
  36. package/dist/tools/example-tool.d.ts.map +1 -0
  37. package/dist/tools/example-tool.js +67 -0
  38. package/dist/tools/example-tool.js.map +1 -0
  39. package/dist/tools/index.d.ts +9 -0
  40. package/dist/tools/index.d.ts.map +1 -0
  41. package/dist/tools/index.js +9 -0
  42. package/dist/tools/index.js.map +1 -0
  43. package/dist/tools/supabase-deploy-functions-tool.d.ts +18 -0
  44. package/dist/tools/supabase-deploy-functions-tool.d.ts.map +1 -0
  45. package/dist/tools/supabase-deploy-functions-tool.js +127 -0
  46. package/dist/tools/supabase-deploy-functions-tool.js.map +1 -0
  47. package/dist/tools/supabase-enable-tool.d.ts +12 -0
  48. package/dist/tools/supabase-enable-tool.d.ts.map +1 -0
  49. package/dist/tools/supabase-enable-tool.js +89 -0
  50. package/dist/tools/supabase-enable-tool.js.map +1 -0
  51. package/dist/tools/tool-registry.d.ts +51 -0
  52. package/dist/tools/tool-registry.d.ts.map +1 -0
  53. package/dist/tools/tool-registry.js +81 -0
  54. package/dist/tools/tool-registry.js.map +1 -0
  55. package/dist/tools/user-registration-tool.d.ts +17 -0
  56. package/dist/tools/user-registration-tool.d.ts.map +1 -0
  57. package/dist/tools/user-registration-tool.js +63 -0
  58. package/dist/tools/user-registration-tool.js.map +1 -0
  59. package/dist/types/index.d.ts +1 -0
  60. package/dist/types/index.d.ts.map +1 -1
  61. package/dist/types/index.js +1 -0
  62. package/dist/types/index.js.map +1 -1
  63. package/dist/types/tool.d.ts +60 -0
  64. package/dist/types/tool.d.ts.map +1 -0
  65. package/dist/types/tool.js +5 -0
  66. package/dist/types/tool.js.map +1 -0
  67. package/examples/gateway-config-usage.sh +71 -0
  68. package/examples/tool-usage-example.sh +44 -0
  69. package/package.json +2 -2
  70. package/skills/ai-integration/SKILL.md +793 -0
  71. package/skills/supabase-integration/SKILL.md +556 -0
  72. package/CHANGELOG_DB_CONFIG.md +0 -142
  73. package/IMPLEMENTATION_SUMMARY.md +0 -303
@@ -0,0 +1,793 @@
1
+ ---
2
+ name: ai-integration
3
+ description: Use this skill when the conversation requires integrating AI services (such as image analysis, text generation, intelligent recognition, etc.) to generate integration code based on AI gateway or API
4
+ ---
5
+
6
+ # AI Integration
7
+
8
+ ## Before Starting - Supabase Integration Check
9
+
10
+ **CRITICAL**: AI Integration requires Supabase as the backend (uses Supabase Edge Functions). Always check Supabase integration status before proceeding:
11
+
12
+ **Step 1: Check Existing Supabase Integration**
13
+
14
+ Check if Supabase is already integrated:
15
+ - Look for `src/lib/supabase.ts` file
16
+ - Check `.env` file for Supabase environment variables:
17
+ - `VITE_SUPABASE_PROJECT_ID`
18
+ - `VITE_SUPABASE_PUBLISHABLE_KEY` or `VITE_SUPABASE_ANON_KEY`
19
+ - `VITE_SUPABASE_URL`
20
+
21
+ **Step 2: Handle Based on Status**
22
+
23
+ **If Supabase is already integrated** (supabase.ts exists with valid configuration):
24
+ - ✓ Inform user: "Supabase is already integrated. Proceeding with AI integration..."
25
+ - Continue with AI integration workflow below
26
+
27
+ **If Supabase is NOT integrated** (no supabase.ts):
28
+ - ⚠️ Stop immediately and inform user: "AI integration requires Supabase backend. Setting up Supabase first..."
29
+ - Call `supabase-enable` tool or use the `supabase-integration` skill
30
+ - Follow the complete Supabase setup workflow:
31
+ 1. Install `@supabase/supabase-js` dependency
32
+ 2. Create `.env` file with Supabase credentials
33
+ 3. Create `src/lib/supabase.ts` client configuration
34
+ 4. Create `supabase/migrations` directory
35
+ - After Supabase setup is complete, proceed with AI integration
36
+
37
+ ---
38
+
39
+ ## Overview
40
+
41
+ When users need to integrate AI functionality into their projects, use this skill to generate standardized AI service integration code. Supports multiple AI capabilities: vision recognition, text analysis, content generation, etc.
42
+
43
+ **Tech Stack:** Supabase Functions (Deno + TypeScript) + Supabase Client
44
+
45
+ **Generated Code Structure:**
46
+ - Backend: `supabase/functions/<function-name>/index.ts` (Supabase Edge Function)
47
+ - Frontend: Direct invocation using `supabase.functions.invoke()` or API (supports both standard and streaming responses)
48
+
49
+ **Core Principles:** Generate reliable, scalable, and maintainable AI integration code
50
+
51
+ **Announce at the start:** "I'm using the ai-integration skill to generate AI integration code for you."
52
+
53
+ ## When to Use This Skill
54
+
55
+ ```
56
+ Trigger Conditions (use if any are met):
57
+ - User explicitly mentions "AI analysis", "intelligent recognition", "image recognition"
58
+ - Need to call large language model APIs (GPT, Claude, Gemini, etc.)
59
+ - Need visual AI capabilities (OCR, object recognition, image analysis)
60
+ - Need text AI capabilities (translation, summarization, sentiment analysis)
61
+ - User asks "how to integrate AI"
62
+ ```
63
+
64
+ ## Supabase Project Structure
65
+
66
+ After generating AI integration code, the project structure looks like this:
67
+
68
+ ```
69
+ project-root/
70
+ ├── supabase/
71
+ │ ├── functions/
72
+ │ │ ├── <function-name-1>/
73
+ │ │ │ └── index.ts # AI Feature 1
74
+ │ │ ├── <function-name-2>/
75
+ │ │ │ └── index.ts # AI Feature 2
76
+ │ │ └── <function-name-3>/
77
+ │ │ └── index.ts # AI Feature 3
78
+ │ ├── .env.local # Functions environment variables
79
+ │ └── config.toml # Supabase configuration
80
+ ├── src/
81
+ │ ├── lib/
82
+ │ │ └── supabase.ts # Supabase Client configuration
83
+ │ └── ...
84
+ └── .env # Frontend environment variables (VITE_SUPABASE_URL, etc.)
85
+ ```
86
+
87
+ ## AI Integration Architecture Patterns
88
+
89
+ ### 1. Backend API Pattern (Recommended for Production)
90
+
91
+ **Use Cases:**
92
+ - Need to protect API keys
93
+ - Need rate limiting and usage control
94
+ - Need data preprocessing or postprocessing
95
+ - Need caching or logging
96
+
97
+ **Architecture:**
98
+ ```
99
+ Frontend → Backend API → AI Gateway/Service → AI Provider
100
+ ```
101
+
102
+ ### 2. Client Direct Connection Pattern
103
+
104
+ **Use Cases:**
105
+ - Prototyping or demos
106
+ - Using user's own API keys
107
+ - Scenarios requiring extremely high real-time performance
108
+
109
+ **Architecture:**
110
+ ```
111
+ Frontend → AI Gateway/Service → AI Provider
112
+ ```
113
+
114
+ ## Standard Code Templates
115
+
116
+ ### Backend API Endpoint Template (Supabase Functions)
117
+
118
+ **File Location:** `supabase/functions/<function-name>/index.ts`
119
+
120
+ Examples:
121
+ - `supabase/functions/ai-analysis/index.ts`
122
+ - `supabase/functions/ai-process/index.ts`
123
+ - `supabase/functions/ai-service/index.ts`
124
+
125
+ ```typescript
126
+ import { serve } from "https://deno.land/std@0.168.0/http/server.ts";
127
+
128
+ const corsHeaders = {
129
+ "Access-Control-Allow-Origin": "*",
130
+ "Access-Control-Allow-Headers": "authorization, x-client-info, apikey, content-type",
131
+ };
132
+
133
+ interface AIRequest {
134
+ // Define request parameters based on specific AI functionality
135
+ input: string | object;
136
+ options?: {
137
+ model?: string;
138
+ temperature?: number;
139
+ maxTokens?: number;
140
+ };
141
+ }
142
+
143
+ serve(async (req) => {
144
+ // Handle CORS preflight request
145
+ if (req.method === "OPTIONS") {
146
+ return new Response(null, { headers: corsHeaders });
147
+ }
148
+
149
+ try {
150
+ const requestData: AIRequest = await req.json();
151
+
152
+ // Validate input
153
+ if (!requestData.input) {
154
+ return new Response(
155
+ JSON.stringify({ error: "Missing required input parameter" }),
156
+ { status: 400, headers: { ...corsHeaders, "Content-Type": "application/json" } }
157
+ );
158
+ }
159
+
160
+ // Get API key (from environment variables)
161
+ const AI_API_KEY = Deno.env.get("AI_API_KEY");
162
+ if (!AI_API_KEY) {
163
+ console.error("AI_API_KEY not configured");
164
+ return new Response(
165
+ JSON.stringify({ error: "AI service not configured" }),
166
+ { status: 500, headers: { ...corsHeaders, "Content-Type": "application/json" } }
167
+ );
168
+ }
169
+
170
+ console.log("Processing AI request...");
171
+
172
+ // Call AI service (using Needware AI gateway as example)
173
+ const response = await fetch("https://ai.gateway.needware.dev/v1/chat/completions", {
174
+ method: "POST",
175
+ headers: {
176
+ Authorization: `Bearer ${AI_API_KEY}`,
177
+ "Content-Type": "application/json",
178
+ },
179
+ body: JSON.stringify({
180
+ model: requestData.options?.model || "google/gemini-2.5-flash",
181
+ messages: [
182
+ {
183
+ role: "system",
184
+ content: "You are a professional AI assistant. Provide accurate and helpful responses based on user needs."
185
+ },
186
+ {
187
+ role: "user",
188
+ content: typeof requestData.input === 'string'
189
+ ? requestData.input
190
+ : JSON.stringify(requestData.input)
191
+ }
192
+ ],
193
+ temperature: requestData.options?.temperature || 0.7,
194
+ max_tokens: requestData.options?.maxTokens || 1000,
195
+ }),
196
+ });
197
+
198
+ // Handle error response
199
+ if (!response.ok) {
200
+ const errorText = await response.text();
201
+ console.error("AI service error:", response.status, errorText);
202
+
203
+ if (response.status === 429) {
204
+ return new Response(
205
+ JSON.stringify({ error: "Request rate too high, please try again later" }),
206
+ { status: 429, headers: { ...corsHeaders, "Content-Type": "application/json" } }
207
+ );
208
+ }
209
+ if (response.status === 402) {
210
+ return new Response(
211
+ JSON.stringify({ error: "AI service quota exhausted" }),
212
+ { status: 402, headers: { ...corsHeaders, "Content-Type": "application/json" } }
213
+ );
214
+ }
215
+
216
+ return new Response(
217
+ JSON.stringify({ error: "AI processing failed" }),
218
+ { status: 500, headers: { ...corsHeaders, "Content-Type": "application/json" } }
219
+ );
220
+ }
221
+
222
+ // Parse AI response
223
+ const data = await response.json();
224
+ const content = data.choices?.[0]?.message?.content;
225
+
226
+ if (!content) {
227
+ console.error("No content in AI response");
228
+ return new Response(
229
+ JSON.stringify({ error: "No AI response generated" }),
230
+ { status: 500, headers: { ...corsHeaders, "Content-Type": "application/json" } }
231
+ );
232
+ }
233
+
234
+ console.log("AI processing completed");
235
+
236
+ // Return processing result
237
+ return new Response(
238
+ JSON.stringify({
239
+ result: content,
240
+ model: requestData.options?.model || "google/gemini-2.5-flash"
241
+ }),
242
+ { headers: { ...corsHeaders, "Content-Type": "application/json" } }
243
+ );
244
+
245
+ } catch (error) {
246
+ console.error("Function execution error:", error);
247
+ return new Response(
248
+ JSON.stringify({ error: error instanceof Error ? error.message : "Unknown error" }),
249
+ { status: 500, headers: { ...corsHeaders, "Content-Type": "application/json" } }
250
+ );
251
+ }
252
+ });
253
+ ```
254
+
255
+ ### Frontend Usage Template (TypeScript)
256
+
257
+ **Prerequisites:** Supabase Client configured in `src/lib/supabase.ts`
258
+
259
+ ```typescript
260
+ // src/lib/supabase.ts (if not already present)
261
+ import { createClient } from '@supabase/supabase-js';
262
+
263
+ const supabaseUrl = import.meta.env.VITE_SUPABASE_URL;
264
+ const supabaseAnonKey = import.meta.env.VITE_SUPABASE_ANON_KEY;
265
+
266
+ export const supabase = createClient(supabaseUrl, supabaseAnonKey);
267
+ ```
268
+
269
+ **Direct Usage in React Component:**
270
+
271
+ ```typescript
272
+ import { useState } from 'react';
273
+ import { supabase } from '@/lib/supabase';
274
+ import { toast } from 'sonner';
275
+
276
+ export function ImageAnalyzer() {
277
+ const [isAnalyzing, setIsAnalyzing] = useState(false);
278
+ const [analysis, setAnalysis] = useState(null);
279
+
280
+ const handleAnalyze = async (imageData: string) => {
281
+ setIsAnalyzing(true);
282
+
283
+ try {
284
+ // Directly invoke Supabase Function
285
+ const { data, error } = await supabase.functions.invoke('ai-analysis', {
286
+ body: {
287
+ image: imageData,
288
+ prompt: "Please analyze this image in detail"
289
+ }
290
+ });
291
+
292
+ // Check invocation error
293
+ if (error) {
294
+ throw error;
295
+ }
296
+
297
+ // Check business error in response
298
+ if (data?.error) {
299
+ throw new Error(data.error);
300
+ }
301
+
302
+ setAnalysis(data.analysis);
303
+ toast.success("Analysis completed");
304
+ } catch (error) {
305
+ console.error("Analysis error:", error);
306
+ toast.error(error instanceof Error ? error.message : "Analysis failed");
307
+ } finally {
308
+ setIsAnalyzing(false);
309
+ }
310
+ };
311
+
312
+ // ... component render
313
+ }
314
+ ```
315
+
316
+ **Text Analysis Example:**
317
+
318
+ ```typescript
319
+ const handleTextAnalysis = async (text: string) => {
320
+ try {
321
+ const { data, error } = await supabase.functions.invoke('ai-text-analysis', {
322
+ body: {
323
+ text,
324
+ analysisType: 'summary'
325
+ }
326
+ });
327
+
328
+ if (error) throw error;
329
+ if (data?.error) throw new Error(data.error);
330
+
331
+ // Use data.result
332
+ console.log(data.result);
333
+ } catch (error) {
334
+ console.error("Text analysis error:", error);
335
+ }
336
+ };
337
+ ```
338
+
339
+ ### Streaming Chat Template (Frontend)
340
+
341
+ **Use Case:** Real-time streaming responses for chat applications
342
+
343
+ **Type Definitions and Streaming Function:**
344
+
345
+ ```typescript
346
+ // Define message type
347
+ export type Message = {
348
+ role: "user" | "assistant";
349
+ content: string;
350
+ id: string;
351
+ };
352
+
353
+ const CHAT_URL = `${import.meta.env.VITE_SUPABASE_URL}/functions/v1/chat`;
354
+
355
+ export async function streamChat({
356
+ messages,
357
+ onDelta,
358
+ onDone,
359
+ onError,
360
+ }: {
361
+ messages: Message[];
362
+ onDelta: (deltaText: string) => void;
363
+ onDone: () => void;
364
+ onError: (error: string) => void;
365
+ }) {
366
+ try {
367
+ const resp = await fetch(CHAT_URL, {
368
+ method: "POST",
369
+ headers: {
370
+ "Content-Type": "application/json",
371
+ Authorization: `Bearer ${import.meta.env.VITE_SUPABASE_ANON_KEY}`,
372
+ },
373
+ body: JSON.stringify({
374
+ messages: messages.map(m => ({ role: m.role, content: m.content }))
375
+ }),
376
+ });
377
+
378
+ if (!resp.ok) {
379
+ const errorData = await resp.json().catch(() => ({}));
380
+ if (resp.status === 429) {
381
+ onError(errorData.error || "Request rate too high, please try again later");
382
+ return;
383
+ }
384
+ if (resp.status === 402) {
385
+ onError(errorData.error || "Insufficient quota, please recharge to continue");
386
+ return;
387
+ }
388
+ onError(errorData.error || "Connection failed, please retry");
389
+ return;
390
+ }
391
+
392
+ if (!resp.body) {
393
+ onError("Unable to get response stream");
394
+ return;
395
+ }
396
+
397
+ const reader = resp.body.getReader();
398
+ const decoder = new TextDecoder();
399
+ let textBuffer = "";
400
+ let streamDone = false;
401
+
402
+ while (!streamDone) {
403
+ const { done, value } = await reader.read();
404
+ if (done) break;
405
+ textBuffer += decoder.decode(value, { stream: true });
406
+
407
+ let newlineIndex: number;
408
+ while ((newlineIndex = textBuffer.indexOf("\n")) !== -1) {
409
+ let line = textBuffer.slice(0, newlineIndex);
410
+ textBuffer = textBuffer.slice(newlineIndex + 1);
411
+
412
+ if (line.endsWith("\r")) line = line.slice(0, -1);
413
+ if (line.startsWith(":") || line.trim() === "") continue;
414
+ if (!line.startsWith("data: ")) continue;
415
+
416
+ const jsonStr = line.slice(6).trim();
417
+ if (jsonStr === "[DONE]") {
418
+ streamDone = true;
419
+ break;
420
+ }
421
+
422
+ try {
423
+ const parsed = JSON.parse(jsonStr);
424
+ const content = parsed.choices?.[0]?.delta?.content as string | undefined;
425
+ if (content) onDelta(content);
426
+ } catch {
427
+ textBuffer = line + "\n" + textBuffer;
428
+ break;
429
+ }
430
+ }
431
+ }
432
+
433
+ // Final flush
434
+ if (textBuffer.trim()) {
435
+ for (let raw of textBuffer.split("\n")) {
436
+ if (!raw) continue;
437
+ if (raw.endsWith("\r")) raw = raw.slice(0, -1);
438
+ if (raw.startsWith(":") || raw.trim() === "") continue;
439
+ if (!raw.startsWith("data: ")) continue;
440
+ const jsonStr = raw.slice(6).trim();
441
+ if (jsonStr === "[DONE]") continue;
442
+ try {
443
+ const parsed = JSON.parse(jsonStr);
444
+ const content = parsed.choices?.[0]?.delta?.content as string | undefined;
445
+ if (content) onDelta(content);
446
+ } catch { /* ignore */ }
447
+ }
448
+ }
449
+
450
+ onDone();
451
+ } catch (e) {
452
+ console.error("Stream chat error:", e);
453
+ onError("Network connection failed, please check your network and retry");
454
+ }
455
+ }
456
+ ```
457
+
458
+ **React Component Usage Example:**
459
+
460
+ ```typescript
461
+ import { useState } from 'react';
462
+ import { streamChat, Message } from '@/lib/streamChat';
463
+ import { toast } from 'sonner';
464
+
465
+ export function ChatInterface() {
466
+ const [messages, setMessages] = useState<Message[]>([]);
467
+ const [isStreaming, setIsStreaming] = useState(false);
468
+ const [currentResponse, setCurrentResponse] = useState("");
469
+
470
+ const handleSendMessage = async (userMessage: string) => {
471
+ // Add user message
472
+ const newMessage: Message = {
473
+ id: crypto.randomUUID(),
474
+ role: "user",
475
+ content: userMessage,
476
+ };
477
+ setMessages(prev => [...prev, newMessage]);
478
+
479
+ // Prepare assistant message placeholder
480
+ const assistantMessageId = crypto.randomUUID();
481
+ setCurrentResponse("");
482
+ setIsStreaming(true);
483
+
484
+ await streamChat({
485
+ messages: [...messages, newMessage],
486
+ onDelta: (deltaText) => {
487
+ setCurrentResponse(prev => prev + deltaText);
488
+ },
489
+ onDone: () => {
490
+ setMessages(prev => [...prev, {
491
+ id: assistantMessageId,
492
+ role: "assistant",
493
+ content: currentResponse,
494
+ }]);
495
+ setCurrentResponse("");
496
+ setIsStreaming(false);
497
+ },
498
+ onError: (error) => {
499
+ toast.error(error);
500
+ setIsStreaming(false);
501
+ setCurrentResponse("");
502
+ },
503
+ });
504
+ };
505
+
506
+ // ... component render
507
+ }
508
+ ```
509
+
510
+ ### Image Analysis Specialized Template
511
+
512
+ Use this template when users need image analysis functionality.
513
+
514
+ **File Location:** `supabase/functions/analyze-image/index.ts`
515
+
516
+ ```typescript
517
+ // supabase/functions/analyze-image/index.ts
518
+ import { serve } from "https://deno.land/std@0.168.0/http/server.ts";
519
+
520
+ const corsHeaders = {
521
+ "Access-Control-Allow-Origin": "*",
522
+ "Access-Control-Allow-Headers": "authorization, x-client-info, apikey, content-type",
523
+ };
524
+
525
+ serve(async (req) => {
526
+ if (req.method === "OPTIONS") {
527
+ return new Response(null, { headers: corsHeaders });
528
+ }
529
+
530
+ try {
531
+ const { image, prompt } = await req.json();
532
+
533
+ if (!image) {
534
+ return new Response(
535
+ JSON.stringify({ error: "No image provided" }),
536
+ { status: 400, headers: { ...corsHeaders, "Content-Type": "application/json" } }
537
+ );
538
+ }
539
+
540
+ const AI_API_KEY = Deno.env.get("AI_API_KEY");
541
+ if (!AI_API_KEY) {
542
+ return new Response(
543
+ JSON.stringify({ error: "AI service not configured" }),
544
+ { status: 500, headers: { ...corsHeaders, "Content-Type": "application/json" } }
545
+ );
546
+ }
547
+
548
+ console.log("Analyzing image...");
549
+
550
+ const response = await fetch("https://ai.gateway.needware.dev/v1/chat/completions", {
551
+ method: "POST",
552
+ headers: {
553
+ Authorization: `Bearer ${AI_API_KEY}`,
554
+ "Content-Type": "application/json",
555
+ },
556
+ body: JSON.stringify({
557
+ model: "google/gemini-2.5-flash", // Model with vision support
558
+ messages: [
559
+ {
560
+ role: "system",
561
+ content: "You are a professional image analysis expert. Please analyze image content in detail and provide accurate descriptions."
562
+ },
563
+ {
564
+ role: "user",
565
+ content: [
566
+ {
567
+ type: "text",
568
+ text: prompt || "Please analyze the content of this image in detail"
569
+ },
570
+ {
571
+ type: "image_url",
572
+ image_url: {
573
+ url: image // Supports data:image/... or https://...
574
+ }
575
+ }
576
+ ]
577
+ }
578
+ ],
579
+ }),
580
+ });
581
+
582
+ if (!response.ok) {
583
+ const errorText = await response.text();
584
+ console.error("AI service error:", response.status, errorText);
585
+ return new Response(
586
+ JSON.stringify({ error: "Image analysis failed" }),
587
+ { status: response.status, headers: { ...corsHeaders, "Content-Type": "application/json" } }
588
+ );
589
+ }
590
+
591
+ const data = await response.json();
592
+ const analysis = data.choices?.[0]?.message?.content;
593
+
594
+ // If expecting structured JSON, try to parse
595
+ let structuredResult;
596
+ try {
597
+ const jsonMatch = analysis.match(/```json\n?([\s\S]*?)\n?```/) || analysis.match(/\{[\s\S]*\}/);
598
+ if (jsonMatch) {
599
+ const jsonStr = jsonMatch[1] || jsonMatch[0];
600
+ structuredResult = JSON.parse(jsonStr);
601
+ }
602
+ } catch (parseError) {
603
+ console.log("Response is not JSON format, returning raw text");
604
+ structuredResult = { analysis };
605
+ }
606
+
607
+ console.log("Image analysis completed");
608
+
609
+ return new Response(
610
+ JSON.stringify({
611
+ result: structuredResult || analysis,
612
+ model: "google/gemini-2.5-flash"
613
+ }),
614
+ { headers: { ...corsHeaders, "Content-Type": "application/json" } }
615
+ );
616
+
617
+ } catch (error) {
618
+ console.error("Image analysis error:", error);
619
+ return new Response(
620
+ JSON.stringify({ error: error instanceof Error ? error.message : "Unknown error" }),
621
+ { status: 500, headers: { ...corsHeaders, "Content-Type": "application/json" } }
622
+ );
623
+ }
624
+ });
625
+ ```
626
+
627
+ ## Implementation Checklist
628
+
629
+ When users request AI integration, follow these steps:
630
+
631
+ ### Step 1: Requirements Analysis
632
+ - [ ] Confirm AI functionality type (image/text/speech/other)
633
+ - [ ] Confirm input/output formats
634
+ - [ ] Confirm if structured response is needed
635
+ - [ ] Confirm performance requirements (response time, concurrency)
636
+
637
+ ### Step 2: Choose Architecture
638
+ - [ ] Backend API pattern ✅ Recommended (more secure)
639
+ - [ ] Client direct connection pattern
640
+
641
+ ### Step 3: Choose AI Provider
642
+ - [ ] Needware AI Gateway (out-of-the-box, multi-model support)
643
+ - [ ] OpenAI (GPT series)
644
+ - [ ] Anthropic (Claude series)
645
+ - [ ] Google (Gemini series)
646
+ - [ ] Other custom APIs
647
+
648
+ ### Step 4: Install Dependencies
649
+ If @supabase/supabase-js is not present, install it
650
+
651
+ **Install Supabase Client for Frontend:**
652
+ ```bash
653
+ pnpm add @supabase/supabase-js
654
+ ```
655
+
656
+ ### Step 5: Implement Code
657
+ - [ ] Create Supabase Function (path: `supabase/functions/<function-name>/index.ts`)
658
+ - [ ] Create Supabase Client configuration (path: `src/lib/supabase.ts`)
659
+ - [ ] Use `supabase.functions.invoke()` directly in components
660
+ - [ ] Add error handling (check both `error` and `data.error`)
661
+ - [ ] Add request logging and monitoring
662
+
663
+ **Supabase Function Naming Convention:**
664
+ - Use kebab-case naming: `ai-service`, `ai-process`, `ai-handler`, etc.
665
+ - Function names should clearly express functionality
666
+ - Each feature gets its own independent function directory
667
+
668
+ ### Step 6: Configure Environment Variables
669
+
670
+ **Configure in Supabase Dashboard:**
671
+ 1. Go to Project Settings → Edge Functions → Secrets
672
+ 2. Add environment variables:
673
+ - `AI_API_KEY`: Your AI service API key
674
+ - `AI_MODEL`: (Optional) Default model name
675
+ - `AI_MAX_TOKENS`: (Optional) Maximum token count
676
+
677
+ **Or use Supabase CLI:**
678
+ ```bash
679
+ # Set secrets
680
+ supabase secrets set AI_API_KEY=your_api_key_here
681
+ supabase secrets set AI_MODEL=google/gemini-2.5-flash
682
+ supabase secrets set AI_MAX_TOKENS=2000
683
+ ```
684
+
685
+ **Local Development Environment Variables:**
686
+ Create `supabase/.env.local` file (for Supabase Functions):
687
+ ```bash
688
+ AI_API_KEY=your_api_key_here
689
+ AI_MODEL=google/gemini-2.5-flash
690
+ AI_MAX_TOKENS=2000
691
+ ```
692
+
693
+ **Frontend Environment Variables:**
694
+ Configure in `.env` or `.env.local` file:
695
+ ```bash
696
+ VITE_SUPABASE_URL=your_supabase_project_url
697
+ VITE_SUPABASE_ANON_KEY=your_supabase_anon_key
698
+ ```
699
+
700
+ ### Step 7: Testing
701
+
702
+ **Test Supabase Function Locally:**
703
+ ```bash
704
+ # Start local Supabase
705
+ supabase start
706
+
707
+ # Deploy function locally
708
+ supabase functions serve <function-name>
709
+
710
+ # Test invocation (text example)
711
+ curl -i --location --request POST 'http://localhost:54321/functions/v1/<function-name>' \
712
+ --header 'Authorization: Bearer YOUR_ANON_KEY' \
713
+ --header 'Content-Type: application/json' \
714
+ --data '{"text":"test text","options":{"model":"google/gemini-2.5-flash"}}'
715
+
716
+ # Test invocation (image example)
717
+ curl -i --location --request POST 'http://localhost:54321/functions/v1/<function-name>' \
718
+ --header 'Authorization: Bearer YOUR_ANON_KEY' \
719
+ --header 'Content-Type: application/json' \
720
+ --data '{"image":"data:image/png;base64,...","prompt":"Please analyze this image"}'
721
+ ```
722
+
723
+ **Deploy to Production:**
724
+ ```bash
725
+ # Deploy function
726
+ supabase functions deploy <function-name>
727
+
728
+ # View logs
729
+ supabase functions logs <function-name>
730
+ ```
731
+
732
+
733
+ ## Best Practices
734
+
735
+ ### Security
736
+ - ✅ **Never** expose API keys in the frontend
737
+ - ✅ Use environment variables to store sensitive information
738
+ - ✅ Implement request rate limiting
739
+ - ✅ Validate and sanitize user input
740
+ - ✅ Add request size limits
741
+
742
+ ### Cost Control
743
+ - ✅ Choose cost-effective models
744
+ - ✅ Optimize prompt length
745
+ - ✅ Limit max_tokens parameter
746
+ - ✅ Implement usage monitoring and alerts
747
+ - ✅ Consider using batch processing to reduce costs
748
+
749
+ ## Error Handling Checklist
750
+
751
+ ```typescript
752
+ // Standard error handling pattern
753
+ const handleAIError = (error: any, statusCode: number) => {
754
+ const errorMap: Record<number, string> = {
755
+ 400: "Invalid request parameters",
756
+ 401: "API key invalid or expired",
757
+ 402: "Insufficient account balance or quota exhausted",
758
+ 403: "No permission to access this API",
759
+ 404: "API endpoint does not exist",
760
+ 429: "Too many requests, please try again later",
761
+ 500: "AI service internal error",
762
+ 503: "AI service temporarily unavailable",
763
+ };
764
+
765
+ return {
766
+ error: errorMap[statusCode] || "Unknown error",
767
+ statusCode,
768
+ originalError: error?.message,
769
+ timestamp: new Date().toISOString(),
770
+ };
771
+ };
772
+ ```
773
+
774
+ ## When NOT to Use This Skill
775
+
776
+ ❌ **Do not use in the following situations:**
777
+ - Simple frontend UI components (not involving AI)
778
+ - Pure data processing logic (no AI inference needed)
779
+ - Static content display
780
+ - User is just asking about AI concepts (just provide explanation)
781
+
782
+ ## Final Reminders
783
+
784
+ **After completing AI integration, must:**
785
+ 1. ✅ Test all error scenarios
786
+ 2. ✅ Verify API key configuration
787
+ 3. ✅ Check response format correctness
788
+ 4. ✅ Test edge cases (extra-long input, special characters, etc.)
789
+ 5. ✅ Confirm smooth user experience
790
+ 6. ✅ Add usage documentation and examples
791
+
792
+ **Remember: AI integration is not "set it and forget it", requires continuous monitoring and optimization.**
793
+