@aigne/example-workflow-sequential 1.13.6 → 1.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.local.example +41 -2
- package/README.md +22 -9
- package/package.json +6 -6
package/.env.local.example
CHANGED
|
@@ -1,5 +1,44 @@
|
|
|
1
1
|
# Change the name of this file to .env.local and fill in the following values
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
# Uncomment the lines below to enable debug logging
|
|
4
|
+
# DEBUG="aigne:*"
|
|
4
5
|
|
|
5
|
-
|
|
6
|
+
# Use different Models
|
|
7
|
+
|
|
8
|
+
# OpenAI
|
|
9
|
+
MODEL="openai:gpt-4.1"
|
|
10
|
+
OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
|
|
11
|
+
|
|
12
|
+
# Anthropic claude
|
|
13
|
+
# MODEL="anthropic:claude-3-7-sonnet-latest"
|
|
14
|
+
# ANTHROPIC_API_KEY=""
|
|
15
|
+
|
|
16
|
+
# Gemini
|
|
17
|
+
# MODEL="gemini:gemini-2.0-flash"
|
|
18
|
+
# GEMINI_API_KEY=""
|
|
19
|
+
|
|
20
|
+
# Bedrock nova
|
|
21
|
+
# MODEL=bedrock:us.amazon.nova-premier-v1:0
|
|
22
|
+
# AWS_ACCESS_KEY_ID=""
|
|
23
|
+
# AWS_SECRET_ACCESS_KEY=""
|
|
24
|
+
# AWS_REGION=us-west-2
|
|
25
|
+
|
|
26
|
+
# DeepSeek
|
|
27
|
+
# MODEL="deepseek:deepseek-chat"
|
|
28
|
+
# DEEPSEEK_API_KEY=""
|
|
29
|
+
|
|
30
|
+
# OpenRouter
|
|
31
|
+
# MODEL="openrouter:openai/gpt-4o"
|
|
32
|
+
# OPEN_ROUTER_API_KEY=""
|
|
33
|
+
|
|
34
|
+
# xAI
|
|
35
|
+
# MODEL="xai:grok-2-latest"
|
|
36
|
+
# XAI_API_KEY=""
|
|
37
|
+
|
|
38
|
+
# Ollama
|
|
39
|
+
# MODEL="ollama:llama3.2"
|
|
40
|
+
# OLLAMA_DEFAULT_BASE_URL="http://localhost:11434/v1";
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Setup proxy if needed
|
|
44
|
+
# HTTPS_PROXY=http://localhost:7890
|
package/README.md
CHANGED
|
@@ -24,11 +24,11 @@ class formatProof processing
|
|
|
24
24
|
|
|
25
25
|
## Prerequisites
|
|
26
26
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
27
|
+
* [Node.js](https://nodejs.org) and npm installed on your machine
|
|
28
|
+
* An [OpenAI API key](https://platform.openai.com/api-keys) for interacting with OpenAI's services
|
|
29
|
+
* Optional dependencies (if running the example from source code):
|
|
30
|
+
* [Bun](https://bun.sh) for running unit tests & examples
|
|
31
|
+
* [Pnpm](https://pnpm.io) for package management
|
|
32
32
|
|
|
33
33
|
## Quick Start (No Installation Required)
|
|
34
34
|
|
|
@@ -69,6 +69,21 @@ Setup your OpenAI API key in the `.env.local` file:
|
|
|
69
69
|
OPENAI_API_KEY="" # Set your OpenAI API key here
|
|
70
70
|
```
|
|
71
71
|
|
|
72
|
+
#### Using Different Models
|
|
73
|
+
|
|
74
|
+
You can use different AI models by setting the `MODEL` environment variable along with the corresponding API key. The framework supports multiple providers:
|
|
75
|
+
|
|
76
|
+
* **OpenAI**: `MODEL="openai:gpt-4.1"` with `OPENAI_API_KEY`
|
|
77
|
+
* **Anthropic**: `MODEL="anthropic:claude-3-7-sonnet-latest"` with `ANTHROPIC_API_KEY`
|
|
78
|
+
* **Google Gemini**: `MODEL="gemini:gemini-2.0-flash"` with `GEMINI_API_KEY`
|
|
79
|
+
* **AWS Bedrock**: `MODEL="bedrock:us.amazon.nova-premier-v1:0"` with AWS credentials
|
|
80
|
+
* **DeepSeek**: `MODEL="deepseek:deepseek-chat"` with `DEEPSEEK_API_KEY`
|
|
81
|
+
* **OpenRouter**: `MODEL="openrouter:openai/gpt-4o"` with `OPEN_ROUTER_API_KEY`
|
|
82
|
+
* **xAI**: `MODEL="xai:grok-2-latest"` with `XAI_API_KEY`
|
|
83
|
+
* **Ollama**: `MODEL="ollama:llama3.2"` with `OLLAMA_DEFAULT_BASE_URL`
|
|
84
|
+
|
|
85
|
+
For detailed configuration examples, please refer to the `.env.local.example` file in this directory.
|
|
86
|
+
|
|
72
87
|
### Run the Example
|
|
73
88
|
|
|
74
89
|
```bash
|
|
@@ -88,7 +103,7 @@ The example supports the following command-line parameters:
|
|
|
88
103
|
| Parameter | Description | Default |
|
|
89
104
|
|-----------|-------------|---------|
|
|
90
105
|
| `--chat` | Run in interactive chat mode | Disabled (one-shot mode) |
|
|
91
|
-
| `--model <provider[:model]>` | AI model to use in format 'provider[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
|
|
106
|
+
| `--model <provider[:model]>` | AI model to use in format 'provider\[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
|
|
92
107
|
| `--temperature <value>` | Temperature for model generation | Provider default |
|
|
93
108
|
| `--top-p <value>` | Top-p sampling value | Provider default |
|
|
94
109
|
| `--presence-penalty <value>` | Presence penalty value | Provider default |
|
|
@@ -114,12 +129,10 @@ echo "Create marketing content for our new AI-powered fitness app" | pnpm start
|
|
|
114
129
|
The following example demonstrates how to build a sequential workflow:
|
|
115
130
|
|
|
116
131
|
```typescript
|
|
117
|
-
import assert from "node:assert";
|
|
118
132
|
import { AIAgent, AIGNE, TeamAgent, ProcessMode } from "@aigne/core";
|
|
119
133
|
import { OpenAIChatModel } from "@aigne/core/models/openai-chat-model.js";
|
|
120
134
|
|
|
121
135
|
const { OPENAI_API_KEY } = process.env;
|
|
122
|
-
assert(OPENAI_API_KEY, "Please set the OPENAI_API_KEY environment variable");
|
|
123
136
|
|
|
124
137
|
const model = new OpenAIChatModel({
|
|
125
138
|
apiKey: OPENAI_API_KEY,
|
|
@@ -172,7 +185,7 @@ const aigne = new AIGNE({ model });
|
|
|
172
185
|
// 创建一个 TeamAgent 来处理顺序工作流
|
|
173
186
|
const teamAgent = TeamAgent.from({
|
|
174
187
|
skills: [conceptExtractor, writer, formatProof],
|
|
175
|
-
mode: ProcessMode.sequential // 默认值,可以省略
|
|
188
|
+
mode: ProcessMode.sequential, // 默认值,可以省略
|
|
176
189
|
});
|
|
177
190
|
|
|
178
191
|
const result = await aigne.invoke(teamAgent, {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aigne/example-workflow-sequential",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.14.0",
|
|
4
4
|
"description": "A demonstration of using AIGNE Framework to build a sequential workflow",
|
|
5
5
|
"author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
|
|
6
6
|
"homepage": "https://github.com/AIGNE-io/aigne-framework/tree/main/examples/workflow-sequential",
|
|
@@ -16,14 +16,14 @@
|
|
|
16
16
|
"README.md"
|
|
17
17
|
],
|
|
18
18
|
"dependencies": {
|
|
19
|
-
"@aigne/agent-library": "^1.17.
|
|
20
|
-
"@aigne/
|
|
21
|
-
"@aigne/
|
|
22
|
-
"@aigne/
|
|
19
|
+
"@aigne/agent-library": "^1.17.5",
|
|
20
|
+
"@aigne/core": "^1.28.0",
|
|
21
|
+
"@aigne/openai": "^0.6.0",
|
|
22
|
+
"@aigne/cli": "^1.18.0"
|
|
23
23
|
},
|
|
24
24
|
"devDependencies": {
|
|
25
25
|
"@types/bun": "^1.2.9",
|
|
26
|
-
"@aigne/test-utils": "^0.4.
|
|
26
|
+
"@aigne/test-utils": "^0.4.12"
|
|
27
27
|
},
|
|
28
28
|
"scripts": {
|
|
29
29
|
"start": "bun run index.ts",
|