@aigne/example-workflow-router 1.19.6-beta.9 → 1.19.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/README.md +5 -5
  2. package/package.json +7 -7
package/README.md CHANGED
@@ -52,7 +52,7 @@ class other processing
52
52
  npx -y @aigne/example-workflow-router
53
53
 
54
54
  # Run in interactive chat mode
55
- npx -y @aigne/example-workflow-router --chat
55
+ npx -y @aigne/example-workflow-router --interactive
56
56
 
57
57
  # Use pipeline input
58
58
  echo "How do I return a product?" | npx -y @aigne/example-workflow-router
@@ -60,7 +60,7 @@ echo "How do I return a product?" | npx -y @aigne/example-workflow-router
60
60
 
61
61
  ### Connect to an AI Model
62
62
 
63
- As an example, running `npx -y @aigne/example-workflow-router --chat` requires an AI model. If this is your first run, you need to connect one.
63
+ As an example, running `npx -y @aigne/example-workflow-router --interactive` requires an AI model. If this is your first run, you need to connect one.
64
64
 
65
65
  ![run example](./run-example.png)
66
66
 
@@ -121,7 +121,7 @@ pnpm install
121
121
  pnpm start # Run in one-shot mode (default)
122
122
 
123
123
  # Run in interactive chat mode
124
- pnpm start -- --chat
124
+ pnpm start -- --interactive
125
125
 
126
126
  # Use pipeline input
127
127
  echo "How do I return a product?" | pnpm start
@@ -133,7 +133,7 @@ The example supports the following command-line parameters:
133
133
 
134
134
  | Parameter | Description | Default |
135
135
  |-----------|-------------|---------|
136
- | `--chat` | Run in interactive chat mode | Disabled (one-shot mode) |
136
+ | `--interactive` | Run in interactive chat mode | Disabled (one-shot mode) |
137
137
  | `--model <provider[:model]>` | AI model to use in format 'provider\[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
138
138
  | `--temperature <value>` | Temperature for model generation | Provider default |
139
139
  | `--top-p <value>` | Top-p sampling value | Provider default |
@@ -146,7 +146,7 @@ The example supports the following command-line parameters:
146
146
 
147
147
  ```bash
148
148
  # Run in chat mode (interactive)
149
- pnpm start -- --chat
149
+ pnpm start -- --interactive
150
150
 
151
151
  # Set logging level
152
152
  pnpm start -- --log-level DEBUG
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/example-workflow-router",
3
- "version": "1.19.6-beta.9",
3
+ "version": "1.19.6",
4
4
  "description": "A demonstration of using AIGNE Framework to build a router workflow",
5
5
  "author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
6
6
  "homepage": "https://github.com/AIGNE-io/aigne-framework/tree/main/examples/workflow-router",
@@ -16,15 +16,15 @@
16
16
  "README.md"
17
17
  ],
18
18
  "dependencies": {
19
- "@aigne/agent-library": "^1.24.0-beta.7",
20
- "@aigne/cli": "^1.59.0-beta.8",
21
- "@aigne/core": "^1.72.0-beta.6",
22
- "@aigne/openai": "^0.16.16-beta.6",
23
- "@aigne/default-memory": "^1.3.6-beta.6"
19
+ "@aigne/agent-library": "^1.24.0",
20
+ "@aigne/cli": "^1.59.0",
21
+ "@aigne/core": "^1.72.0",
22
+ "@aigne/default-memory": "^1.4.0",
23
+ "@aigne/openai": "^0.16.16"
24
24
  },
25
25
  "devDependencies": {
26
26
  "@types/bun": "^1.2.22",
27
- "@aigne/test-utils": "^0.5.69-beta.6"
27
+ "@aigne/test-utils": "^0.5.69"
28
28
  },
29
29
  "scripts": {
30
30
  "start": "bun run index.ts",