@aigne/example-workflow-reflection 1.12.6 → 1.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,44 @@
1
1
  # Change the name of this file to .env.local and fill in the following values
2
2
 
3
- DEBUG=aigne:mcp
3
+ # Uncomment the lines below to enable debug logging
4
+ # DEBUG="aigne:*"
4
5
 
5
- OPENAI_API_KEY="" # Your OpenAI API key
6
+ # Use different Models
7
+
8
+ # OpenAI
9
+ MODEL="openai:gpt-4.1"
10
+ OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
11
+
12
+ # Anthropic claude
13
+ # MODEL="anthropic:claude-3-7-sonnet-latest"
14
+ # ANTHROPIC_API_KEY=""
15
+
16
+ # Gemini
17
+ # MODEL="gemini:gemini-2.0-flash"
18
+ # GEMINI_API_KEY=""
19
+
20
+ # Bedrock nova
21
+ # MODEL=bedrock:us.amazon.nova-premier-v1:0
22
+ # AWS_ACCESS_KEY_ID=""
23
+ # AWS_SECRET_ACCESS_KEY=""
24
+ # AWS_REGION=us-west-2
25
+
26
+ # DeepSeek
27
+ # MODEL="deepseek:deepseek-chat"
28
+ # DEEPSEEK_API_KEY=""
29
+
30
+ # OpenRouter
31
+ # MODEL="openrouter:openai/gpt-4o"
32
+ # OPEN_ROUTER_API_KEY=""
33
+
34
+ # xAI
35
+ # MODEL="xai:grok-2-latest"
36
+ # XAI_API_KEY=""
37
+
38
+ # Ollama
39
+ # MODEL="ollama:llama3.2"
40
+ # OLLAMA_DEFAULT_BASE_URL="http://localhost:11434/v1";
41
+
42
+
43
+ # Setup proxy if needed
44
+ # HTTPS_PROXY=http://localhost:7890
package/README.md CHANGED
@@ -23,11 +23,11 @@ class reviewer processing
23
23
 
24
24
  ## Prerequisites
25
25
 
26
- - [Node.js](https://nodejs.org) and npm installed on your machine
27
- - An [OpenAI API key](https://platform.openai.com/api-keys) for interacting with OpenAI's services
28
- - Optional dependencies (if running the example from source code):
29
- - [Bun](https://bun.sh) for running unit tests & examples
30
- - [Pnpm](https://pnpm.io) for package management
26
+ * [Node.js](https://nodejs.org) and npm installed on your machine
27
+ * An [OpenAI API key](https://platform.openai.com/api-keys) for interacting with OpenAI's services
28
+ * Optional dependencies (if running the example from source code):
29
+ * [Bun](https://bun.sh) for running unit tests & examples
30
+ * [Pnpm](https://pnpm.io) for package management
31
31
 
32
32
  ## Quick Start (No Installation Required)
33
33
 
@@ -68,6 +68,21 @@ Setup your OpenAI API key in the `.env.local` file:
68
68
  OPENAI_API_KEY="" # Set your OpenAI API key here
69
69
  ```
70
70
 
71
+ #### Using Different Models
72
+
73
+ You can use different AI models by setting the `MODEL` environment variable along with the corresponding API key. The framework supports multiple providers:
74
+
75
+ * **OpenAI**: `MODEL="openai:gpt-4.1"` with `OPENAI_API_KEY`
76
+ * **Anthropic**: `MODEL="anthropic:claude-3-7-sonnet-latest"` with `ANTHROPIC_API_KEY`
77
+ * **Google Gemini**: `MODEL="gemini:gemini-2.0-flash"` with `GEMINI_API_KEY`
78
+ * **AWS Bedrock**: `MODEL="bedrock:us.amazon.nova-premier-v1:0"` with AWS credentials
79
+ * **DeepSeek**: `MODEL="deepseek:deepseek-chat"` with `DEEPSEEK_API_KEY`
80
+ * **OpenRouter**: `MODEL="openrouter:openai/gpt-4o"` with `OPEN_ROUTER_API_KEY`
81
+ * **xAI**: `MODEL="xai:grok-2-latest"` with `XAI_API_KEY`
82
+ * **Ollama**: `MODEL="ollama:llama3.2"` with `OLLAMA_DEFAULT_BASE_URL`
83
+
84
+ For detailed configuration examples, please refer to the `.env.local.example` file in this directory.
85
+
71
86
  ### Run the Example
72
87
 
73
88
  ```bash
@@ -87,7 +102,7 @@ The example supports the following command-line parameters:
87
102
  | Parameter | Description | Default |
88
103
  |-----------|-------------|---------|
89
104
  | `--chat` | Run in interactive chat mode | Disabled (one-shot mode) |
90
- | `--model <provider[:model]>` | AI model to use in format 'provider[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
105
+ | `--model <provider[:model]>` | AI model to use in format 'provider\[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
91
106
  | `--temperature <value>` | Temperature for model generation | Provider default |
92
107
  | `--top-p <value>` | Top-p sampling value | Provider default |
93
108
  | `--presence-penalty <value>` | Presence penalty value | Provider default |
@@ -113,13 +128,11 @@ echo "Write a function to validate email addresses" | pnpm start
113
128
  The following example demonstrates how to build a reflection workflow:
114
129
 
115
130
  ```typescript
116
- import assert from "node:assert";
117
131
  import { AIAgent, AIGNE, UserInputTopic, UserOutputTopic } from "@aigne/core";
118
132
  import { OpenAIChatModel } from "@aigne/core/models/openai-chat-model.js";
119
133
  import { z } from "zod";
120
134
 
121
135
  const { OPENAI_API_KEY } = process.env;
122
- assert(OPENAI_API_KEY, "Please set the OPENAI_API_KEY environment variable");
123
136
 
124
137
  const model = new OpenAIChatModel({
125
138
  apiKey: OPENAI_API_KEY,
@@ -156,7 +169,8 @@ User's question:
156
169
 
157
170
  const reviewer = AIAgent.from({
158
171
  subscribeTopic: "review_request",
159
- publishTopic: (output) => (output.approval ? UserOutputTopic : "rewrite_request"),
172
+ publishTopic: (output) =>
173
+ output.approval ? UserOutputTopic : "rewrite_request",
160
174
  instructions: `\
161
175
  You are a code reviewer. You focus on correctness, efficiency and safety of the code.
162
176
 
@@ -177,14 +191,19 @@ Please review the code. If previous feedback was provided, see if it was address
177
191
  correctness: z.string().describe("Your comments on correctness"),
178
192
  efficiency: z.string().describe("Your comments on efficiency"),
179
193
  safety: z.string().describe("Your comments on safety"),
180
- suggested_changes: z.string().describe("Your comments on suggested changes"),
194
+ suggested_changes: z
195
+ .string()
196
+ .describe("Your comments on suggested changes"),
181
197
  }),
182
198
  }),
183
199
  includeInputInOutput: true,
184
200
  });
185
201
 
186
202
  const aigne = new AIGNE({ model, agents: [coder, reviewer] });
187
- aigne.publish(UserInputTopic, "Write a function to find the sum of all even numbers in a list.");
203
+ aigne.publish(
204
+ UserInputTopic,
205
+ "Write a function to find the sum of all even numbers in a list.",
206
+ );
188
207
 
189
208
  const { message } = await aigne.subscribe(UserOutputTopic);
190
209
  console.log(message);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/example-workflow-reflection",
3
- "version": "1.12.6",
3
+ "version": "1.13.0",
4
4
  "description": "A demonstration of using AIGNE Framework to build a reflection workflow",
5
5
  "author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
6
6
  "homepage": "https://github.com/AIGNE-io/aigne-framework/tree/main/examples/workflow-reflection",
@@ -17,18 +17,18 @@
17
17
  ],
18
18
  "dependencies": {
19
19
  "zod": "^3.24.4",
20
- "@aigne/cli": "^1.16.0",
21
- "@aigne/agent-library": "^1.17.3",
22
- "@aigne/openai": "^0.4.3",
23
- "@aigne/core": "^1.26.0"
20
+ "@aigne/cli": "^1.18.0",
21
+ "@aigne/core": "^1.28.0",
22
+ "@aigne/agent-library": "^1.17.5",
23
+ "@aigne/openai": "^0.6.0"
24
24
  },
25
25
  "devDependencies": {
26
26
  "@types/bun": "^1.2.9",
27
- "@aigne/test-utils": "^0.4.10"
27
+ "@aigne/test-utils": "^0.4.12"
28
28
  },
29
29
  "scripts": {
30
30
  "start": "bun run index.ts",
31
31
  "lint": "tsc --noEmit",
32
- "test:llm": "bun test"
32
+ "test:llm": "bun run index.ts"
33
33
  }
34
34
  }
package/index.test.ts DELETED
@@ -1,10 +0,0 @@
1
- import { expect, test } from "bun:test";
2
- import { runExampleTest } from "@aigne/test-utils/run-example-test.js";
3
- test(
4
- "should successfully run the workflow-reflection",
5
- async () => {
6
- const { code } = await runExampleTest();
7
- expect(code).toBe(0);
8
- },
9
- { timeout: 600000 },
10
- );