@aigne/example-workflow-sequential 1.10.2 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/README.md +44 -3
  2. package/index.ts +12 -17
  3. package/package.json +6 -6
  4. package/usages.ts +1 -1
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Workflow Sequential Demo
2
2
 
3
- This is a demonstration of using [AIGNE Framework](https://github.com/AIGNE-io/aigne-framework) to build a sequential workflow.
3
+ This is a demonstration of using [AIGNE Framework](https://github.com/AIGNE-io/aigne-framework) to build a sequential workflow. The example now supports both one-shot and interactive chat modes, along with customizable model settings and pipeline input/output.
4
4
 
5
5
  ```mermaid
6
6
  flowchart LR
@@ -35,7 +35,14 @@ class formatProof processing
35
35
  ```bash
36
36
  export OPENAI_API_KEY=YOUR_OPENAI_API_KEY # Set your OpenAI API key
37
37
 
38
- npx -y @aigne/example-workflow-sequential # Run the example
38
+ # Run in one-shot mode (default)
39
+ npx -y @aigne/example-workflow-sequential
40
+
41
+ # Run in interactive chat mode
42
+ npx -y @aigne/example-workflow-sequential --chat
43
+
44
+ # Use pipeline input
45
+ echo "Create marketing content for our new AI-powered fitness app" | npx -y @aigne/example-workflow-sequential
39
46
  ```
40
47
 
41
48
  ## Installation
@@ -65,7 +72,41 @@ OPENAI_API_KEY="" # Set your OpenAI API key here
65
72
  ### Run the Example
66
73
 
67
74
  ```bash
68
- pnpm start
75
+ pnpm start # Run in one-shot mode (default)
76
+
77
+ # Run in interactive chat mode
78
+ pnpm start -- --chat
79
+
80
+ # Use pipeline input
81
+ echo "Create marketing content for our new AI-powered fitness app" | pnpm start
82
+ ```
83
+
84
+ ### Run Options
85
+
86
+ The example supports the following command-line parameters:
87
+
88
+ | Parameter | Description | Default |
89
+ |-----------|-------------|---------|
90
+ | `--chat` | Run in interactive chat mode | Disabled (one-shot mode) |
91
+ | `--model <provider[:model]>` | AI model to use in format 'provider[:model]' where model is optional. Examples: 'openai' or 'openai:gpt-4o-mini' | openai |
92
+ | `--temperature <value>` | Temperature for model generation | Provider default |
93
+ | `--top-p <value>` | Top-p sampling value | Provider default |
94
+ | `--presence-penalty <value>` | Presence penalty value | Provider default |
95
+ | `--frequency-penalty <value>` | Frequency penalty value | Provider default |
96
+ | `--log-level <level>` | Set logging level (ERROR, WARN, INFO, DEBUG, TRACE) | INFO |
97
+ | `--input`, `-i <input>` | Specify input directly | None |
98
+
99
+ #### Examples
100
+
101
+ ```bash
102
+ # Run in chat mode (interactive)
103
+ pnpm start -- --chat
104
+
105
+ # Set logging level
106
+ pnpm start -- --log-level DEBUG
107
+
108
+ # Use pipeline input
109
+ echo "Create marketing content for our new AI-powered fitness app" | pnpm start
69
110
  ```
70
111
 
71
112
  ## Example
package/index.ts CHANGED
@@ -1,10 +1,7 @@
1
1
  #!/usr/bin/env bunwrapper
2
2
 
3
- import { runChatLoopInTerminal } from "@aigne/cli/utils/run-chat-loop.js";
4
- import { AIAgent, AIGNE, ProcessMode, TeamAgent } from "@aigne/core";
5
- import { loadModel } from "@aigne/core/loader/index.js";
6
-
7
- const model = await loadModel();
3
+ import { runWithAIGNE } from "@aigne/cli/utils/run-with-aigne.js";
4
+ import { AIAgent, ProcessMode, TeamAgent } from "@aigne/core";
8
5
 
9
6
  const conceptExtractor = AIAgent.from({
10
7
  instructions: `\
@@ -48,17 +45,15 @@ Draft copy:
48
45
  outputKey: "content",
49
46
  });
50
47
 
51
- const aigne = new AIGNE({ model });
52
-
53
- const userAgent = aigne.invoke(
54
- TeamAgent.from({
55
- skills: [conceptExtractor, writer, formatProof],
56
- mode: ProcessMode.sequential,
57
- }),
58
- );
48
+ const agent = TeamAgent.from({
49
+ skills: [conceptExtractor, writer, formatProof],
50
+ mode: ProcessMode.sequential,
51
+ });
59
52
 
60
- await runChatLoopInTerminal(userAgent, {
61
- welcome: `Hello, I'm a marketing assistant. I can help you with product descriptions, marketing copy, and editing.`,
62
- defaultQuestion: "AIGNE is a No-code Generative AI Apps Engine",
63
- inputKey: "product",
53
+ await runWithAIGNE(agent, {
54
+ chatLoopOptions: {
55
+ welcome: `Hello, I'm a marketing assistant. I can help you with product descriptions, marketing copy, and editing.`,
56
+ defaultQuestion: "AIGNE is a No-code Generative AI Apps Engine",
57
+ inputKey: "product",
58
+ },
64
59
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/example-workflow-sequential",
3
- "version": "1.10.2",
3
+ "version": "1.11.0",
4
4
  "description": "A demonstration of using AIGNE Framework to build a sequential workflow",
5
5
  "author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
6
6
  "homepage": "https://github.com/AIGNE-io/aigne-framework/tree/main/examples/workflow-sequential",
@@ -16,13 +16,13 @@
16
16
  "README.md"
17
17
  ],
18
18
  "dependencies": {
19
- "openai": "^4.97.0",
20
- "zod": "^3.24.4",
21
- "@aigne/cli": "^1.9.1",
22
- "@aigne/core": "^1.15.0"
19
+ "@aigne/cli": "^1.10.0",
20
+ "@aigne/openai": "^0.1.0",
21
+ "@aigne/core": "^1.16.0"
23
22
  },
24
23
  "devDependencies": {
25
- "@aigne/test-utils": "^0.2.0"
24
+ "@types/bun": "^1.2.9",
25
+ "@aigne/test-utils": "^0.3.0"
26
26
  },
27
27
  "scripts": {
28
28
  "start": "bun run index.ts",
package/usages.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import assert from "node:assert";
2
2
  import { AIAgent, AIGNE, ProcessMode, TeamAgent } from "@aigne/core";
3
- import { OpenAIChatModel } from "@aigne/core/models/openai-chat-model.js";
3
+ import { OpenAIChatModel } from "@aigne/openai";
4
4
 
5
5
  const { OPENAI_API_KEY } = process.env;
6
6
  assert(OPENAI_API_KEY, "Please set the OPENAI_API_KEY environment variable");