@aigne/example-workflow-sequential 1.17.88-beta.3 → 1.17.88-beta.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -5
- package/package.json +6 -6
package/README.md
CHANGED
|
@@ -47,7 +47,7 @@ class formatProof processing
|
|
|
47
47
|
npx -y @aigne/example-workflow-sequential
|
|
48
48
|
|
|
49
49
|
# Run in interactive chat mode
|
|
50
|
-
npx -y @aigne/example-workflow-sequential --
|
|
50
|
+
npx -y @aigne/example-workflow-sequential --interactive
|
|
51
51
|
|
|
52
52
|
# Use pipeline input
|
|
53
53
|
echo "Create marketing content for our new AI-powered fitness app" | npx -y @aigne/example-workflow-sequential
|
|
@@ -55,7 +55,7 @@ echo "Create marketing content for our new AI-powered fitness app" | npx -y @aig
|
|
|
55
55
|
|
|
56
56
|
### Connect to an AI Model
|
|
57
57
|
|
|
58
|
-
As an example, running `npx -y @aigne/example-workflow-sequential --
|
|
58
|
+
As an example, running `npx -y @aigne/example-workflow-sequential --interactive` requires an AI model. If this is your first run, you need to connect one.
|
|
59
59
|
|
|
60
60
|

|
|
61
61
|
|
|
@@ -116,7 +116,7 @@ pnpm install
|
|
|
116
116
|
pnpm start # Run in one-shot mode (default)
|
|
117
117
|
|
|
118
118
|
# Run in interactive chat mode
|
|
119
|
-
pnpm start -- --
|
|
119
|
+
pnpm start -- --interactive
|
|
120
120
|
|
|
121
121
|
# Use pipeline input
|
|
122
122
|
echo "Create marketing content for our new AI-powered fitness app" | pnpm start
|
|
@@ -128,7 +128,7 @@ The example supports the following command-line parameters:
|
|
|
128
128
|
|
|
129
129
|
| Parameter | Description | Default |
|
|
130
130
|
|-----------|-------------|---------|
|
|
131
|
-
| `--
|
|
131
|
+
| `--interactive` | Run in interactive chat mode | Disabled (one-shot mode) |
|
|
132
132
|
| `--model <provider[:model]>` | AI model to use in format 'provider\[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
|
|
133
133
|
| `--temperature <value>` | Temperature for model generation | Provider default |
|
|
134
134
|
| `--top-p <value>` | Top-p sampling value | Provider default |
|
|
@@ -141,7 +141,7 @@ The example supports the following command-line parameters:
|
|
|
141
141
|
|
|
142
142
|
```bash
|
|
143
143
|
# Run in chat mode (interactive)
|
|
144
|
-
pnpm start -- --
|
|
144
|
+
pnpm start -- --interactive
|
|
145
145
|
|
|
146
146
|
# Set logging level
|
|
147
147
|
pnpm start -- --log-level DEBUG
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aigne/example-workflow-sequential",
|
|
3
|
-
"version": "1.17.88-beta.
|
|
3
|
+
"version": "1.17.88-beta.30",
|
|
4
4
|
"description": "A demonstration of using AIGNE Framework to build a sequential workflow",
|
|
5
5
|
"author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
|
|
6
6
|
"homepage": "https://github.com/AIGNE-io/aigne-framework/tree/main/examples/workflow-sequential",
|
|
@@ -16,14 +16,14 @@
|
|
|
16
16
|
"README.md"
|
|
17
17
|
],
|
|
18
18
|
"dependencies": {
|
|
19
|
-
"@aigne/agent-library": "^1.24.0-beta.
|
|
20
|
-
"@aigne/cli": "^1.59.0-beta.
|
|
21
|
-
"@aigne/core": "^1.72.0-beta.
|
|
22
|
-
"@aigne/openai": "^0.16.16-beta.
|
|
19
|
+
"@aigne/agent-library": "^1.24.0-beta.25",
|
|
20
|
+
"@aigne/cli": "^1.59.0-beta.29",
|
|
21
|
+
"@aigne/core": "^1.72.0-beta.23",
|
|
22
|
+
"@aigne/openai": "^0.16.16-beta.23"
|
|
23
23
|
},
|
|
24
24
|
"devDependencies": {
|
|
25
25
|
"@types/bun": "^1.2.22",
|
|
26
|
-
"@aigne/test-utils": "^0.5.69-beta.
|
|
26
|
+
"@aigne/test-utils": "^0.5.69-beta.23"
|
|
27
27
|
},
|
|
28
28
|
"scripts": {
|
|
29
29
|
"start": "bun run index.ts",
|