@aigne/example-workflow-code-execution 1.10.1 → 1.11.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Workflow code-execution Demo
2
2
 
3
- This is a demonstration of using [AIGNE Framework](https://github.com/AIGNE-io/aigne-framework) to build a code-execution workflow.
3
+ This is a demonstration of using [AIGNE Framework](https://github.com/AIGNE-io/aigne-framework) to build a code-execution workflow. The example now supports both one-shot and interactive chat modes, along with customizable model settings and pipeline input/output.
4
4
 
5
5
  ```mermaid
6
6
  flowchart LR
@@ -52,7 +52,14 @@ Coder ->> User: The value of \(10!\) (10 factorial) is 3,628,800.
52
52
  ```bash
53
53
  export OPENAI_API_KEY=YOUR_OPENAI_API_KEY # Set your OpenAI API key
54
54
 
55
- npx -y @aigne/example-workflow-code-execution # Run the example
55
+ # Run in one-shot mode (default)
56
+ npx -y @aigne/example-workflow-code-execution
57
+
58
+ # Run in interactive chat mode
59
+ npx -y @aigne/example-workflow-code-execution --chat
60
+
61
+ # Use pipeline input
62
+ echo "Calculate 15!" | npx -y @aigne/example-workflow-code-execution
56
63
  ```
57
64
 
58
65
  ## Installation
@@ -82,7 +89,41 @@ OPENAI_API_KEY="" # Set your OpenAI API key here
82
89
  ### Run the Example
83
90
 
84
91
  ```bash
85
- pnpm start
92
+ pnpm start # Run in one-shot mode (default)
93
+
94
+ # Run in interactive chat mode
95
+ pnpm start -- --chat
96
+
97
+ # Use pipeline input
98
+ echo "Calculate 15!" | pnpm start
99
+ ```
100
+
101
+ ### Run Options
102
+
103
+ The example supports the following command-line parameters:
104
+
105
+ | Parameter | Description | Default |
106
+ |-----------|-------------|---------|
107
+ | `--chat` | Run in interactive chat mode | Disabled (one-shot mode) |
108
+ | `--model <provider[:model]>` | AI model to use in format 'provider[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
109
+ | `--temperature <value>` | Temperature for model generation | Provider default |
110
+ | `--top-p <value>` | Top-p sampling value | Provider default |
111
+ | `--presence-penalty <value>` | Presence penalty value | Provider default |
112
+ | `--frequency-penalty <value>` | Frequency penalty value | Provider default |
113
+ | `--log-level <level>` | Set logging level (ERROR, WARN, INFO, DEBUG, TRACE) | INFO |
114
+ | `--input`, `-i <input>` | Specify input directly | None |
115
+
116
+ #### Examples
117
+
118
+ ```bash
119
+ # Run in chat mode (interactive)
120
+ pnpm start -- --chat
121
+
122
+ # Set logging level
123
+ pnpm start -- --log-level DEBUG
124
+
125
+ # Use pipeline input
126
+ echo "Calculate 15!" | pnpm start
86
127
  ```
87
128
 
88
129
  ## Example
package/index.ts CHANGED
@@ -1,12 +1,9 @@
1
1
  #!/usr/bin/env bunwrapper
2
2
 
3
- import { runChatLoopInTerminal } from "@aigne/cli/utils/run-chat-loop.js";
4
- import { AIAgent, AIGNE, FunctionAgent } from "@aigne/core";
5
- import { loadModel } from "@aigne/core/loader/index.js";
3
+ import { runWithAIGNE } from "@aigne/cli/utils/run-with-aigne.js";
4
+ import { AIAgent, FunctionAgent } from "@aigne/core";
6
5
  import { z } from "zod";
7
6
 
8
- const model = await loadModel();
9
-
10
7
  const sandbox = FunctionAgent.from({
11
8
  name: "evaluateJs",
12
9
  description: `
@@ -38,12 +35,10 @@ Work with the sandbox to execute your code.
38
35
  memory: true,
39
36
  });
40
37
 
41
- const aigne = new AIGNE({ model });
42
-
43
- const user = aigne.invoke(coder);
44
-
45
- await runChatLoopInTerminal(user, {
46
- welcome:
47
- "Welcome to the code execution workflow! you can ask me anything can be resolved by running code.",
48
- defaultQuestion: "10! = ?",
38
+ await runWithAIGNE(coder, {
39
+ chatLoopOptions: {
40
+ welcome:
41
+ "Welcome to the code execution workflow! you can ask me anything can be resolved by running code.",
42
+ defaultQuestion: "10! = ?",
43
+ },
49
44
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/example-workflow-code-execution",
3
- "version": "1.10.1",
3
+ "version": "1.11.1",
4
4
  "description": "A demonstration of using AIGNE Framework to build a code-execution workflow",
5
5
  "author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
6
6
  "homepage": "https://github.com/AIGNE-io/aigne-framework/tree/main/examples/workflow-code-execution",
@@ -16,13 +16,14 @@
16
16
  "README.md"
17
17
  ],
18
18
  "dependencies": {
19
- "openai": "^4.97.0",
20
19
  "zod": "^3.24.4",
21
- "@aigne/cli": "^1.9.1",
22
- "@aigne/core": "^1.15.0"
20
+ "@aigne/cli": "^1.10.1",
21
+ "@aigne/core": "^1.17.0",
22
+ "@aigne/openai": "^0.2.0"
23
23
  },
24
24
  "devDependencies": {
25
- "@aigne/test-utils": "^0.2.0"
25
+ "@types/bun": "^1.2.9",
26
+ "@aigne/test-utils": "^0.3.1"
26
27
  },
27
28
  "scripts": {
28
29
  "start": "bun run index.ts",
package/usages.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import assert from "node:assert";
2
2
  import { AIAgent, AIGNE, FunctionAgent } from "@aigne/core";
3
- import { OpenAIChatModel } from "@aigne/core/models/openai-chat-model.js";
3
+ import { OpenAIChatModel } from "@aigne/openai";
4
4
  import { z } from "zod";
5
5
 
6
6
  const { OPENAI_API_KEY } = process.env;