create-web-ai-service 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/docs/configuration.md +96 -0
- package/docs/creating-endpoints.md +202 -0
- package/docs/getting-started.md +78 -0
- package/docs/using-plugins.md +127 -0
- package/package.json +3 -2
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# Configuration Reference
|
|
2
|
+
|
|
3
|
+
All configuration options for web-ai-service projects.
|
|
4
|
+
|
|
5
|
+
## Environment Variables
|
|
6
|
+
|
|
7
|
+
### Server
|
|
8
|
+
|
|
9
|
+
| Variable | Default | Description |
|
|
10
|
+
|----------|---------|-------------|
|
|
11
|
+
| `PORT` | `3000` | HTTP server port |
|
|
12
|
+
| `LOG_LEVEL` | `info` | Logging level: `debug`, `info`, `warn`, `error` |
|
|
13
|
+
|
|
14
|
+
### LLM Providers
|
|
15
|
+
|
|
16
|
+
Configure API keys for the providers you want to use. You need at least one.
|
|
17
|
+
|
|
18
|
+
| Variable | Description |
|
|
19
|
+
|----------|-------------|
|
|
20
|
+
| `GEMINI_API_KEY` | Google Gemini API key |
|
|
21
|
+
| `OPENAI_API_KEY` | OpenAI API key |
|
|
22
|
+
| `ANTHROPIC_API_KEY` | Anthropic Claude API key |
|
|
23
|
+
| `GROK_API_KEY` | xAI Grok API key |
|
|
24
|
+
| `LLM_TIMEOUT_MS` | Request timeout (default: 30000) |
|
|
25
|
+
|
|
26
|
+
### Supabase (Optional)
|
|
27
|
+
|
|
28
|
+
| Variable | Description |
|
|
29
|
+
|----------|-------------|
|
|
30
|
+
| `SUPABASE_URL` | Your Supabase project URL |
|
|
31
|
+
| `SUPABASE_ANON_KEY` | Public/anonymous key |
|
|
32
|
+
| `SUPABASE_SERVICE_KEY` | Service role key (optional) |
|
|
33
|
+
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## LLM Provider Options
|
|
37
|
+
|
|
38
|
+
### Supported Providers
|
|
39
|
+
|
|
40
|
+
| Provider | Models |
|
|
41
|
+
|----------|--------|
|
|
42
|
+
| `gemini` | `gemini-2.0-flash-lite`, `gemini-2.0-flash`, `gemini-1.5-pro` |
|
|
43
|
+
| `openai` | `gpt-4o`, `gpt-4o-mini`, `gpt-4-turbo` |
|
|
44
|
+
| `anthropic` | `claude-3-5-sonnet-latest`, `claude-3-haiku-20240307` |
|
|
45
|
+
| `grok` | `grok-2`, `grok-2-mini` |
|
|
46
|
+
|
|
47
|
+
### Node Configuration
|
|
48
|
+
|
|
49
|
+
```yaml
|
|
50
|
+
nodes:
|
|
51
|
+
my_llm_node:
|
|
52
|
+
type: llm
|
|
53
|
+
input: $input
|
|
54
|
+
provider: gemini # Required
|
|
55
|
+
model: gemini-2.0-flash-lite # Required
|
|
56
|
+
temperature: 0.7 # Optional (0.0-1.0)
|
|
57
|
+
maxTokens: 1024 # Optional
|
|
58
|
+
systemMessages: # Optional
|
|
59
|
+
- file: prompt.txt
|
|
60
|
+
cache: true # Cache for performance
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
---
|
|
64
|
+
|
|
65
|
+
## Workflow Structure
|
|
66
|
+
|
|
67
|
+
```yaml
|
|
68
|
+
version: "1.0"
|
|
69
|
+
|
|
70
|
+
stages:
|
|
71
|
+
- name: stage_name
|
|
72
|
+
nodes:
|
|
73
|
+
node_name:
|
|
74
|
+
type: llm | code | reduce | split | passthrough
|
|
75
|
+
input: $input | stageName.nodeName
|
|
76
|
+
# ... type-specific options
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Input References
|
|
80
|
+
|
|
81
|
+
| Pattern | Description |
|
|
82
|
+
|---------|-------------|
|
|
83
|
+
| `$input` | Full request body |
|
|
84
|
+
| `$input.field` | Specific field from body |
|
|
85
|
+
| `stage.node` | Output from previous node |
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## File Paths
|
|
90
|
+
|
|
91
|
+
| Directory | Purpose |
|
|
92
|
+
|-----------|---------|
|
|
93
|
+
| `src/endpoints/` | API endpoint definitions |
|
|
94
|
+
| `src/endpoints/*/codes/` | Code node TypeScript files |
|
|
95
|
+
| `src/endpoints/*/prompts/` | LLM system prompt files |
|
|
96
|
+
| `src/plugins/` | Shared plugin modules |
|
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
# Creating Endpoints
|
|
2
|
+
|
|
3
|
+
Endpoints are the API routes of your web-ai-service project. Each endpoint is a folder containing a YAML workflow definition and optional code/prompt files.
|
|
4
|
+
|
|
5
|
+
## Quick Example
|
|
6
|
+
|
|
7
|
+
Create a POST endpoint at `/summarize`:
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
mkdir -p src/endpoints/summarize/prompts
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
### Step 1: Create the Workflow
|
|
14
|
+
|
|
15
|
+
**`src/endpoints/summarize/POST.yaml`**:
|
|
16
|
+
|
|
17
|
+
```yaml
|
|
18
|
+
version: "1.0"
|
|
19
|
+
|
|
20
|
+
stages:
|
|
21
|
+
- name: main
|
|
22
|
+
nodes:
|
|
23
|
+
summarize:
|
|
24
|
+
type: llm
|
|
25
|
+
input: $input.text
|
|
26
|
+
provider: gemini
|
|
27
|
+
model: gemini-2.0-flash-lite
|
|
28
|
+
temperature: 0.3
|
|
29
|
+
maxTokens: 1024
|
|
30
|
+
systemMessages:
|
|
31
|
+
- file: system.txt
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
### Step 2: Create the System Prompt
|
|
35
|
+
|
|
36
|
+
**`src/endpoints/summarize/prompts/system.txt`**:
|
|
37
|
+
|
|
38
|
+
```text
|
|
39
|
+
You are a concise summarization assistant. Summarize the provided text clearly in 2-3 paragraphs.
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### Step 3: Test
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
curl -X POST http://localhost:3000/summarize \
|
|
46
|
+
-H "Content-Type: application/json" \
|
|
47
|
+
-d '{"text": "Long text to summarize..."}'
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## Adding Code Nodes
|
|
53
|
+
|
|
54
|
+
Use code nodes for custom logic like validation, data transformation, or external API calls.
|
|
55
|
+
|
|
56
|
+
### Example: Input Validation
|
|
57
|
+
|
|
58
|
+
**`src/endpoints/summarize/codes/validate.ts`**:
|
|
59
|
+
|
|
60
|
+
```typescript
|
|
61
|
+
import type { NodeOutput } from '@workflow/types';
|
|
62
|
+
|
|
63
|
+
export default async function(input: unknown): Promise<NodeOutput> {
|
|
64
|
+
const body = input as { text?: string };
|
|
65
|
+
|
|
66
|
+
if (!body.text || body.text.length < 10) {
|
|
67
|
+
throw new Error('Text must be at least 10 characters');
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
return { type: 'string', value: body.text };
|
|
71
|
+
}
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
**Updated `POST.yaml`**:
|
|
75
|
+
|
|
76
|
+
```yaml
|
|
77
|
+
version: "1.0"
|
|
78
|
+
|
|
79
|
+
stages:
|
|
80
|
+
- name: validate
|
|
81
|
+
nodes:
|
|
82
|
+
check_input:
|
|
83
|
+
type: code
|
|
84
|
+
input: $input
|
|
85
|
+
file: validate.ts
|
|
86
|
+
|
|
87
|
+
- name: process
|
|
88
|
+
nodes:
|
|
89
|
+
summarize:
|
|
90
|
+
type: llm
|
|
91
|
+
input: validate.check_input
|
|
92
|
+
provider: gemini
|
|
93
|
+
model: gemini-2.0-flash-lite
|
|
94
|
+
systemMessages:
|
|
95
|
+
- file: system.txt
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
## Multi-Stage Workflows
|
|
101
|
+
|
|
102
|
+
Chain multiple stages together. Each stage's output can be referenced by the next.
|
|
103
|
+
|
|
104
|
+
### Example: Extract → Analyze → Format
|
|
105
|
+
|
|
106
|
+
```yaml
|
|
107
|
+
version: "1.0"
|
|
108
|
+
|
|
109
|
+
stages:
|
|
110
|
+
- name: extract
|
|
111
|
+
nodes:
|
|
112
|
+
get_data:
|
|
113
|
+
type: code
|
|
114
|
+
input: $input
|
|
115
|
+
file: extract-data.ts
|
|
116
|
+
|
|
117
|
+
- name: analyze
|
|
118
|
+
nodes:
|
|
119
|
+
analyze_content:
|
|
120
|
+
type: llm
|
|
121
|
+
input: extract.get_data
|
|
122
|
+
provider: gemini
|
|
123
|
+
model: gemini-2.0-flash-lite
|
|
124
|
+
systemMessages:
|
|
125
|
+
- file: analyzer-prompt.txt
|
|
126
|
+
|
|
127
|
+
- name: format
|
|
128
|
+
nodes:
|
|
129
|
+
format_output:
|
|
130
|
+
type: code
|
|
131
|
+
input: analyze.analyze_content
|
|
132
|
+
file: format-response.ts
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
---
|
|
136
|
+
|
|
137
|
+
## Parallel Execution
|
|
138
|
+
|
|
139
|
+
Run multiple nodes in parallel within a single stage:
|
|
140
|
+
|
|
141
|
+
```yaml
|
|
142
|
+
stages:
|
|
143
|
+
- name: parallel_analysis
|
|
144
|
+
nodes:
|
|
145
|
+
sentiment:
|
|
146
|
+
type: llm
|
|
147
|
+
input: $input.text
|
|
148
|
+
provider: gemini
|
|
149
|
+
model: gemini-2.0-flash-lite
|
|
150
|
+
systemMessages:
|
|
151
|
+
- file: sentiment-prompt.txt
|
|
152
|
+
|
|
153
|
+
keywords:
|
|
154
|
+
type: llm
|
|
155
|
+
input: $input.text
|
|
156
|
+
provider: gemini
|
|
157
|
+
model: gemini-2.0-flash-lite
|
|
158
|
+
systemMessages:
|
|
159
|
+
- file: keywords-prompt.txt
|
|
160
|
+
|
|
161
|
+
- name: combine
|
|
162
|
+
nodes:
|
|
163
|
+
merge_results:
|
|
164
|
+
type: reduce
|
|
165
|
+
inputs:
|
|
166
|
+
- parallel_analysis.sentiment
|
|
167
|
+
- parallel_analysis.keywords
|
|
168
|
+
mapping:
|
|
169
|
+
sentiment: $.0
|
|
170
|
+
keywords: $.1
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
---
|
|
174
|
+
|
|
175
|
+
## Node Types Reference
|
|
176
|
+
|
|
177
|
+
| Type | Purpose | Key Properties |
|
|
178
|
+
|------|---------|----------------|
|
|
179
|
+
| `llm` | Call LLM provider | `provider`, `model`, `systemMessages`, `temperature`, `maxTokens` |
|
|
180
|
+
| `code` | Execute TypeScript | `file` |
|
|
181
|
+
| `reduce` | Combine outputs | `inputs`, `mapping` |
|
|
182
|
+
| `split` | Divide output | `mapping` |
|
|
183
|
+
| `passthrough` | Pass input unchanged | – |
|
|
184
|
+
|
|
185
|
+
---
|
|
186
|
+
|
|
187
|
+
## Input References
|
|
188
|
+
|
|
189
|
+
| Reference | Description |
|
|
190
|
+
|-----------|-------------|
|
|
191
|
+
| `$input` | Raw request body |
|
|
192
|
+
| `$input.fieldName` | Specific field from request |
|
|
193
|
+
| `stageName.nodeName` | Output from a previous node |
|
|
194
|
+
|
|
195
|
+
---
|
|
196
|
+
|
|
197
|
+
## Best Practices
|
|
198
|
+
|
|
199
|
+
1. **Validate early** – Add a code node at the start to validate inputs
|
|
200
|
+
2. **Keep prompts focused** – One task per LLM node
|
|
201
|
+
3. **Use stages logically** – Group related operations
|
|
202
|
+
4. **Error handling** – Throw descriptive errors in code nodes
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# Getting Started with Web AI Service
|
|
2
|
+
|
|
3
|
+
This guide walks you through creating your first AI-powered API with web-ai-service.
|
|
4
|
+
|
|
5
|
+
## Prerequisites
|
|
6
|
+
|
|
7
|
+
- Node.js 18+
|
|
8
|
+
- An LLM API key (Gemini, OpenAI, Anthropic, or Grok)
|
|
9
|
+
|
|
10
|
+
## Installation
|
|
11
|
+
|
|
12
|
+
### Create a New Project
|
|
13
|
+
|
|
14
|
+
```bash
|
|
15
|
+
npx create-web-ai-service my-api
|
|
16
|
+
cd my-api
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
The CLI will prompt you to:
|
|
20
|
+
1. Enter your project name
|
|
21
|
+
2. Select plugins to install (e.g., Supabase)
|
|
22
|
+
|
|
23
|
+
### Configure Environment
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
cp .env.example .env
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Edit `.env` and add your API keys:
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
# At minimum, add one LLM provider key
|
|
33
|
+
GEMINI_API_KEY=your-gemini-api-key-here
|
|
34
|
+
# or
|
|
35
|
+
OPENAI_API_KEY=your-openai-api-key-here
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Start Development
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
npm run dev
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Your API is now running at `http://localhost:3000`.
|
|
45
|
+
|
|
46
|
+
## Test the Example Endpoint
|
|
47
|
+
|
|
48
|
+
The scaffolder creates a `/hello` example endpoint. Test it:
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
# Basic request
|
|
52
|
+
curl http://localhost:3000/hello
|
|
53
|
+
|
|
54
|
+
# With a name parameter
|
|
55
|
+
curl "http://localhost:3000/hello?name=Alice"
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
## Project Structure
|
|
59
|
+
|
|
60
|
+
```
|
|
61
|
+
my-api/
|
|
62
|
+
├── src/
|
|
63
|
+
│ ├── endpoints/ # API endpoint definitions
|
|
64
|
+
│ │ └── hello/ # Example: GET /hello
|
|
65
|
+
│ │ ├── GET.yaml # Workflow definition
|
|
66
|
+
│ │ ├── codes/ # TypeScript code nodes
|
|
67
|
+
│ │ └── prompts/ # LLM system prompts
|
|
68
|
+
│ └── plugins/ # Shared code modules
|
|
69
|
+
├── .env # Environment config (gitignored)
|
|
70
|
+
├── .env.example # Template for .env
|
|
71
|
+
└── package.json
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Next Steps
|
|
75
|
+
|
|
76
|
+
- [Creating Endpoints](./creating-endpoints.md) - Learn to build custom endpoints
|
|
77
|
+
- [Using Plugins](./using-plugins.md) - Configure database and auth plugins
|
|
78
|
+
- [Configuration Reference](./configuration.md) - All environment options
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# Using Plugins
|
|
2
|
+
|
|
3
|
+
Plugins extend your web-ai-service project with additional capabilities like database access, authentication, and external services.
|
|
4
|
+
|
|
5
|
+
## Available Plugins
|
|
6
|
+
|
|
7
|
+
| Plugin | Description |
|
|
8
|
+
|--------|-------------|
|
|
9
|
+
| `supabase` | Database & Auth integration with Supabase |
|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## Supabase Plugin
|
|
14
|
+
|
|
15
|
+
### Installation
|
|
16
|
+
|
|
17
|
+
If you didn't select Supabase during project creation, add it manually:
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install @supabase/supabase-js
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
Copy the plugin file from [the template](https://github.com/yourrepo/web-ai-service) or create your own.
|
|
24
|
+
|
|
25
|
+
### Configuration
|
|
26
|
+
|
|
27
|
+
Add these variables to your `.env` file:
|
|
28
|
+
|
|
29
|
+
```bash
|
|
30
|
+
SUPABASE_URL=https://your-project.supabase.co
|
|
31
|
+
SUPABASE_ANON_KEY=your-supabase-anon-key
|
|
32
|
+
SUPABASE_SERVICE_KEY=your-supabase-service-key # Optional, for admin operations
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
### Usage in Code Nodes
|
|
36
|
+
|
|
37
|
+
```typescript
|
|
38
|
+
import { supabase } from '@code-plugins/supabase.js';
|
|
39
|
+
import type { NodeOutput } from '@workflow/types';
|
|
40
|
+
|
|
41
|
+
export default async function(input: unknown): Promise<NodeOutput> {
|
|
42
|
+
const { data, error } = await supabase
|
|
43
|
+
.from('articles')
|
|
44
|
+
.select('*')
|
|
45
|
+
.limit(10);
|
|
46
|
+
|
|
47
|
+
if (error) {
|
|
48
|
+
throw new Error(`Database error: ${error.message}`);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
return { type: 'json', value: data };
|
|
52
|
+
}
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
### Custom Clients
|
|
56
|
+
|
|
57
|
+
Create a client with different credentials:
|
|
58
|
+
|
|
59
|
+
```typescript
|
|
60
|
+
import { getSupabaseClient } from '@code-plugins/supabase.js';
|
|
61
|
+
|
|
62
|
+
// Use custom URL and key
|
|
63
|
+
const customClient = getSupabaseClient(
|
|
64
|
+
'https://other-project.supabase.co',
|
|
65
|
+
'other-anon-key'
|
|
66
|
+
);
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## Creating Custom Plugins
|
|
72
|
+
|
|
73
|
+
Plugins are TypeScript modules in `src/plugins/`. Create your own:
|
|
74
|
+
|
|
75
|
+
### Example: Redis Cache Plugin
|
|
76
|
+
|
|
77
|
+
**`src/plugins/redis.ts`**:
|
|
78
|
+
|
|
79
|
+
```typescript
|
|
80
|
+
import Redis from 'ioredis';
|
|
81
|
+
|
|
82
|
+
let client: Redis | null = null;
|
|
83
|
+
|
|
84
|
+
function getRedisUrl(): string {
|
|
85
|
+
const url = process.env.REDIS_URL;
|
|
86
|
+
if (!url) {
|
|
87
|
+
throw new Error('REDIS_URL environment variable is required');
|
|
88
|
+
}
|
|
89
|
+
return url;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
export function getRedisClient(): Redis {
|
|
93
|
+
if (!client) {
|
|
94
|
+
client = new Redis(getRedisUrl());
|
|
95
|
+
}
|
|
96
|
+
return client;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
export const redis = new Proxy({} as Redis, {
|
|
100
|
+
get(_target, prop) {
|
|
101
|
+
return Reflect.get(getRedisClient(), prop);
|
|
102
|
+
},
|
|
103
|
+
});
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
Add to `.env`:
|
|
107
|
+
|
|
108
|
+
```bash
|
|
109
|
+
REDIS_URL=redis://localhost:6379
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
Use in code nodes:
|
|
113
|
+
|
|
114
|
+
```typescript
|
|
115
|
+
import { redis } from '@code-plugins/redis.js';
|
|
116
|
+
|
|
117
|
+
const cached = await redis.get('my-key');
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
---
|
|
121
|
+
|
|
122
|
+
## Plugin Best Practices
|
|
123
|
+
|
|
124
|
+
1. **Lazy initialization** – Don't connect until first use
|
|
125
|
+
2. **Environment validation** – Throw clear errors for missing config
|
|
126
|
+
3. **Singleton pattern** – Reuse connections across requests
|
|
127
|
+
4. **Type exports** – Re-export useful types for consumers
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "create-web-ai-service",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.4",
|
|
4
4
|
"description": "CLI scaffolder for creating new web-ai-service projects",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -10,7 +10,8 @@
|
|
|
10
10
|
"files": [
|
|
11
11
|
"dist",
|
|
12
12
|
"bin",
|
|
13
|
-
"templates"
|
|
13
|
+
"templates",
|
|
14
|
+
"docs"
|
|
14
15
|
],
|
|
15
16
|
"scripts": {
|
|
16
17
|
"build": "tsc",
|