needware-cli 1.6.10 → 1.6.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "needware-cli",
3
- "version": "1.6.10",
3
+ "version": "1.6.12",
4
4
  "description": "一个功能强大的 Node.js 命令行工具",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -0,0 +1,56 @@
1
+ ---
2
+ name: add-skill-integration
3
+ description: Create new skills to integrate third-party services or features into user projects. Use this skill when adding new service integrations.
4
+ ---
5
+
6
+ # Add Skill Integration
7
+
8
+ ## Overview
9
+
10
+ This skill guides you on how to install skills into user projects. Skills are modular integration guides that help integrate third-party services and features.
11
+
12
+ ## When to Use This Skill
13
+
14
+ Use this skill when users:
15
+ - Need to add new service integrations (databases, APIs, etc.)
16
+ - Want to create custom skill documentation
17
+
18
+ ## Quick Start
19
+
20
+ ### Install Skills
21
+
22
+ Use `add-skill` to install skills from a Git repository to your agent:
23
+
24
+ ```bash
25
+
26
+ # Install to Claude Code
27
+ npx add-skill https://github.com/your-org/your-skills -a claude-code -y
28
+
29
+ # GitHub shorthand
30
+ npx add-skill vercel-labs/agent-skills -a claude-code -y
31
+
32
+ # Full GitHub URL
33
+ npx add-skill https://github.com/vercel-labs/agent-skills -a claude-code -y
34
+
35
+ # Direct path to a skill in a repo
36
+ npx add-skill https://github.com/vercel-labs/agent-skills/tree/main/skills/ frontend-design -a claude-code -y
37
+
38
+ # GitLab URL
39
+ npx add-skill https://gitlab.com/org/repo -a claude-code -y
40
+
41
+ # Any git URL
42
+ npx add-skill git@github.com:vercel-labs/agent-skills.git -a claude-code -y
43
+ ### Supported Agents
44
+
45
+ - More agents see [add-skill documentation](https://github.com/vercel-labs/add-skill)
46
+
47
+ ```
48
+
49
+ ## Example References
50
+
51
+ Reference existing skill implementations:
52
+
53
+ - [Vercel Agent Skills](https://github.com/vercel-labs/agent-skills)
54
+ - [Remotion Skills](https://github.com/remotion-dev/skills)
55
+
56
+
@@ -9,24 +9,26 @@ description: Use this skill when the conversation requires integrating AI servic
9
9
 
10
10
  **CRITICAL**: AI Integration requires Supabase as the backend (uses Supabase Edge Functions). Always check Supabase integration status before proceeding:
11
11
 
12
- **Step 1: Check Existing Supabase Integration**
12
+
13
+ **Step 1: Check Supabase Integration**
13
14
 
14
15
  Check if Supabase is already integrated:
15
16
  - Look for `src/lib/supabase.ts` file
16
17
  - Check `.env` file for Supabase environment variables:
17
18
  - `VITE_SUPABASE_PROJECT_ID`
18
- - `VITE_SUPABASE_PUBLISHABLE_KEY` or `VITE_SUPABASE_ANON_KEY`
19
+ - `VITE_SUPABASE_PUBLISHABLE_KEY`
19
20
  - `VITE_SUPABASE_URL`
20
21
 
21
22
  **Step 2: Handle Based on Status**
22
23
 
23
24
  **If Supabase is already integrated** (supabase.ts exists with valid configuration):
24
- - ✓ Inform user: "Supabase is already integrated. Proceeding with AI integration..."
25
- - Continue with AI integration workflow below
25
+ - ✓ Proceed with Resend integration
26
+ - Inform user: "✓ Supabase is integrated. Proceeding with Resend setup."
26
27
 
27
- **If Supabase is NOT integrated** (no supabase.ts):
28
- - ⚠️ Stop immediately and inform user: "AI integration requires Supabase backend. Setting up Supabase first..."
29
- - use the `supabase-integration` skill
28
+ **If Supabase is NOT integrated** (no supabase.ts or missing environment variables):
29
+ - Stop immediately
30
+ - Inform user: "⚠️ Supabase integration is required before setting up Resend. Resend email functions run on Supabase Edge Functions."
31
+ - Suggest: "Please enable Supabase first by saying 'Enable Cloud' or use the supabase-integration skill."
30
32
 
31
33
  ---
32
34
 
@@ -52,31 +54,14 @@ Trigger Conditions (use if any are met):
52
54
  - Need to call large language model APIs (GPT, Claude, Gemini, etc.)
53
55
  - Need visual AI capabilities (OCR, object recognition, image analysis)
54
56
  - Need text AI capabilities (translation, summarization, sentiment analysis)
57
+ - Need image generation capabilities (text-to-image, AI art generation) 🎨
55
58
  - User asks "how to integrate AI"
56
59
  ```
57
60
 
58
- ## Supabase Project Structure
59
61
 
60
- After generating AI integration code, the project structure looks like this:
62
+ ## Model Selection Guide
61
63
 
62
- ```
63
- project-root/
64
- ├── supabase/
65
- │ ├── functions/
66
- │ │ ├── <function-name-1>/
67
- │ │ │ └── index.ts # AI Feature 1
68
- │ │ ├── <function-name-2>/
69
- │ │ │ └── index.ts # AI Feature 2
70
- │ │ └── <function-name-3>/
71
- │ │ └── index.ts # AI Feature 3
72
- │ ├── .env.local # Functions environment variables
73
- │ └── config.toml # Supabase configuration
74
- ├── src/
75
- │ ├── lib/
76
- │ │ └── supabase.ts # Supabase Client configuration
77
- │ └── ...
78
- └── .env # Frontend environment variables (VITE_SUPABASE_URL, etc.)
79
- ```
64
+ **IMPORTANT:** Use `gemini-3-pro-image-preview` model for image generation, and `google/gemini-2.5-flash` model for text/image analysis.
80
65
 
81
66
  ## AI Integration Architecture Patterns
82
67
 
@@ -515,6 +500,182 @@ export function ChatInterface() {
515
500
  }
516
501
  ```
517
502
 
503
+ ### Image Generation Specialized Template
504
+
505
+ **CRITICAL: Use `gemini-3-pro-image-preview` model for image generation tasks.**
506
+
507
+ Use this template when users need to generate images from text descriptions.
508
+
509
+ **File Location:** `supabase/functions/generate-image/index.ts`
510
+
511
+ ```typescript
512
+ // supabase/functions/generate-image/index.ts
513
+ import { serve } from "https://deno.land/std@0.190.0/http/server.ts";
514
+
515
+ // CORS headers for cross-origin requests
516
+ const corsHeaders = {
517
+ "Access-Control-Allow-Origin": "*",
518
+ "Access-Control-Allow-Headers":
519
+ "authorization, x-client-info, apikey, content-type",
520
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
521
+ "Access-Control-Max-Age": "86400",
522
+ };
523
+
524
+ // Request interface
525
+ interface ImageGenerationRequest {
526
+ prompt: string;
527
+ options?: {
528
+ size?: string;
529
+ quality?: string;
530
+ style?: string;
531
+ };
532
+ }
533
+
534
+ const handler = async (req: Request): Promise<Response> => {
535
+ // Handle CORS preflight requests
536
+ if (req.method === "OPTIONS") {
537
+ return new Response(null, {
538
+ status: 200,
539
+ headers: corsHeaders
540
+ });
541
+ }
542
+
543
+ try {
544
+ const { prompt, options }: ImageGenerationRequest = await req.json();
545
+
546
+ // Validate input
547
+ if (!prompt) {
548
+ return new Response(
549
+ JSON.stringify({ error: "No prompt provided" }),
550
+ {
551
+ status: 400,
552
+ headers: { "Content-Type": "application/json", ...corsHeaders }
553
+ }
554
+ );
555
+ }
556
+
557
+ // Get AI API key from environment
558
+ const AI_API_KEY = Deno.env.get("AI_API_KEY");
559
+ if (!AI_API_KEY) {
560
+ throw new Error("AI service not configured");
561
+ }
562
+
563
+ console.log("Generating image with prompt:", prompt);
564
+
565
+ // Call AI Gateway API with image generation model
566
+ const response = await fetch("https://ai.gateway.needware.dev/v1/chat/completions", {
567
+ method: "POST",
568
+ headers: {
569
+ "Authorization": `Bearer ${AI_API_KEY}`,
570
+ "Content-Type": "application/json",
571
+ },
572
+ body: JSON.stringify({
573
+ model: "gemini-3-pro-image-preview", // 🎨 Use image generation model
574
+ messages: [
575
+ {
576
+ role: "system",
577
+ content: "You are a professional image generation assistant. Generate high-quality images based on user descriptions."
578
+ },
579
+ {
580
+ role: "user",
581
+ content: prompt
582
+ }
583
+ ],
584
+ temperature: 0.8,
585
+ max_tokens: 4000,
586
+ }),
587
+ });
588
+
589
+ if (!response.ok) {
590
+ const errorText = await response.text();
591
+ console.error("AI service error:", response.status, errorText);
592
+ throw new Error(`Image generation failed: ${errorText}`);
593
+ }
594
+
595
+ const data = await response.json();
596
+ const result = data.choices?.[0]?.message?.content;
597
+
598
+ if (!result) {
599
+ throw new Error("No image generated");
600
+ }
601
+
602
+ console.log("Image generation completed");
603
+
604
+ return new Response(
605
+ JSON.stringify({
606
+ success: true,
607
+ imageUrl: result, // Image URL or base64 data
608
+ prompt: prompt,
609
+ model: "gemini-3-pro-image-preview"
610
+ }),
611
+ {
612
+ status: 200,
613
+ headers: { "Content-Type": "application/json", ...corsHeaders }
614
+ }
615
+ );
616
+
617
+ } catch (error: any) {
618
+ console.error("Image generation error:", error);
619
+ return new Response(
620
+ JSON.stringify({ error: error.message || "Unknown error" }),
621
+ {
622
+ status: 500,
623
+ headers: { "Content-Type": "application/json", ...corsHeaders }
624
+ }
625
+ );
626
+ }
627
+ };
628
+
629
+ serve(handler);
630
+ ```
631
+
632
+ **Frontend Usage Example:**
633
+
634
+ ```typescript
635
+ import { useState } from 'react';
636
+ import { supabase } from '@/lib/supabase';
637
+ import { toast } from 'sonner';
638
+
639
+ export function ImageGenerator() {
640
+ const [isGenerating, setIsGenerating] = useState(false);
641
+ const [generatedImage, setGeneratedImage] = useState<string | null>(null);
642
+
643
+ const handleGenerate = async (prompt: string) => {
644
+ setIsGenerating(true);
645
+
646
+ try {
647
+ const { data, error } = await supabase.functions.invoke('generate-image', {
648
+ body: {
649
+ prompt,
650
+ options: {
651
+ size: '1024x1024',
652
+ quality: 'high'
653
+ }
654
+ }
655
+ });
656
+
657
+ if (error) throw error;
658
+ if (data?.error) throw new Error(data.error);
659
+
660
+ setGeneratedImage(data.imageUrl);
661
+ toast.success("图片生成成功!");
662
+ } catch (error) {
663
+ console.error("Image generation error:", error);
664
+ toast.error(error instanceof Error ? error.message : "图片生成失败");
665
+ } finally {
666
+ setIsGenerating(false);
667
+ }
668
+ };
669
+
670
+ return (
671
+ <div>
672
+ {generatedImage && <img src={generatedImage} alt="Generated" />}
673
+ {/* UI components */}
674
+ </div>
675
+ );
676
+ }
677
+ ```
678
+
518
679
  ### Image Analysis Specialized Template
519
680
 
520
681
  Use this template when users need image analysis functionality.
@@ -660,7 +821,8 @@ serve(handler);
660
821
  When users request AI integration, follow these steps:
661
822
 
662
823
  ### Step 1: Requirements Analysis
663
- - [ ] Confirm AI functionality type (image/text/speech/other)
824
+ - [ ] Confirm AI functionality type (image analysis/image generation/text/speech/other)
825
+ - [ ] If image generation: Use `gemini-3-pro-image-preview` model 🎨
664
826
  - [ ] Confirm input/output formats
665
827
  - [ ] Confirm if structured response is needed
666
828
  - [ ] Confirm performance requirements (response time, concurrency)