@perstack/runtime 0.0.62 → 0.0.64
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +163 -45
- package/bin/cli.ts +93 -0
- package/dist/bin/cli.d.ts +1 -0
- package/dist/bin/cli.js +247 -0
- package/dist/bin/cli.js.map +1 -0
- package/dist/chunk-C7AFEVYF.js +2355 -0
- package/dist/chunk-C7AFEVYF.js.map +1 -0
- package/dist/src/index.d.ts +61 -49
- package/dist/src/index.js +158 -2144
- package/dist/src/index.js.map +1 -1
- package/package.json +27 -14
- package/LICENSE +0 -202
package/README.md
CHANGED
|
@@ -10,24 +10,55 @@ This package serves as the engine of Perstack. It orchestrates the lifecycle of
|
|
|
10
10
|
npm install @perstack/runtime
|
|
11
11
|
```
|
|
12
12
|
|
|
13
|
-
## Usage
|
|
13
|
+
## CLI Usage
|
|
14
14
|
|
|
15
|
-
The
|
|
15
|
+
The runtime can be executed as a standalone CLI:
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
perstack-runtime run <expertKey> <query> [options]
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
### Options
|
|
22
|
+
|
|
23
|
+
| Option | Description |
|
|
24
|
+
| ----------------------- | ----------------------- |
|
|
25
|
+
| `--config <path>` | Path to perstack.toml |
|
|
26
|
+
| `--provider <provider>` | LLM provider |
|
|
27
|
+
| `--model <model>` | Model name |
|
|
28
|
+
| `--temperature <temp>` | Temperature (0.0-1.0) |
|
|
29
|
+
| `--max-steps <n>` | Maximum steps |
|
|
30
|
+
| `--max-retries <n>` | Maximum retries |
|
|
31
|
+
| `--timeout <ms>` | Timeout in milliseconds |
|
|
32
|
+
| `--job-id <id>` | Job ID |
|
|
33
|
+
| `--run-id <id>` | Run ID |
|
|
34
|
+
| `--env-path <path...>` | Environment file paths |
|
|
35
|
+
|
|
36
|
+
### Example
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
perstack-runtime run my-expert "What is the weather?" --config ./perstack.toml
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
Output is JSON events (one per line) to stdout.
|
|
43
|
+
|
|
44
|
+
## Programmatic Usage
|
|
45
|
+
|
|
46
|
+
The primary entry point is the `run` function. It takes a `JobSetting` object and an optional `RunOptions` object.
|
|
16
47
|
|
|
17
48
|
```typescript
|
|
18
49
|
import { run } from "@perstack/runtime"
|
|
19
|
-
import { type
|
|
50
|
+
import { type JobSetting } from "@perstack/core"
|
|
20
51
|
|
|
21
|
-
// Configure the
|
|
22
|
-
const setting:
|
|
23
|
-
|
|
52
|
+
// Configure the job
|
|
53
|
+
const setting: JobSetting = {
|
|
54
|
+
jobId: "job-123",
|
|
24
55
|
expertKey: "researcher",
|
|
25
56
|
input: { text: "Research quantum computing" },
|
|
26
57
|
// ... configuration for model, experts, etc.
|
|
27
58
|
}
|
|
28
59
|
|
|
29
|
-
// Execute the
|
|
30
|
-
const
|
|
60
|
+
// Execute the job
|
|
61
|
+
const finalJob = await run({ setting }, {
|
|
31
62
|
eventListener: (event) => {
|
|
32
63
|
console.log(`[${event.type}]`, event)
|
|
33
64
|
}
|
|
@@ -43,8 +74,9 @@ type RunEvent = {
|
|
|
43
74
|
type: EventType // e.g., "startRun", "callTools"
|
|
44
75
|
id: string // Unique event ID
|
|
45
76
|
timestamp: number // Unix timestamp
|
|
46
|
-
|
|
47
|
-
|
|
77
|
+
jobId: string // ID of the Job
|
|
78
|
+
runId: string // ID of the current Run
|
|
79
|
+
stepNumber: number // Current step number within this Run
|
|
48
80
|
// ... plus payload specific to the event type
|
|
49
81
|
}
|
|
50
82
|
```
|
|
@@ -72,11 +104,11 @@ eventListener: (event) => {
|
|
|
72
104
|
|
|
73
105
|
The runtime manages skills through specialized Skill Managers. Each skill type has its own manager class:
|
|
74
106
|
|
|
75
|
-
| Type | Manager | Purpose
|
|
76
|
-
| --------------- | ------------------------- |
|
|
77
|
-
| MCP (stdio/SSE) | `McpSkillManager` | External tools via MCP protocol
|
|
78
|
-
| Interactive | `InteractiveSkillManager` | User input tools (
|
|
79
|
-
| Delegate | `DelegateSkillManager` | Expert-to-Expert calls
|
|
107
|
+
| Type | Manager | Purpose |
|
|
108
|
+
| --------------- | ------------------------- | ----------------------------------- |
|
|
109
|
+
| MCP (stdio/SSE) | `McpSkillManager` | External tools via MCP protocol |
|
|
110
|
+
| Interactive | `InteractiveSkillManager` | User input tools (Coordinator only) |
|
|
111
|
+
| Delegate | `DelegateSkillManager` | Expert-to-Expert calls |
|
|
80
112
|
|
|
81
113
|
All managers extend `BaseSkillManager` which provides:
|
|
82
114
|
- `init()` — Initialize the skill (connect MCP servers, parse definitions)
|
|
@@ -84,6 +116,8 @@ All managers extend `BaseSkillManager` which provides:
|
|
|
84
116
|
- `getToolDefinitions()` — Get available tools
|
|
85
117
|
- `callTool()` — Execute a tool call
|
|
86
118
|
|
|
119
|
+
**Note:** Interactive skills are only available to the Coordinator Expert. See [Experts documentation](https://docs.perstack.ai/understanding-perstack/experts#why-no-interactive-tools-for-delegates) for details.
|
|
120
|
+
|
|
87
121
|
### Initialization Flow
|
|
88
122
|
|
|
89
123
|
```
|
|
@@ -92,7 +126,7 @@ getSkillManagers(expert, experts, setting)
|
|
|
92
126
|
├─► Initialize MCP skills (parallel)
|
|
93
127
|
│ └─► McpSkillManager × N
|
|
94
128
|
│
|
|
95
|
-
├─► Initialize Interactive skills (
|
|
129
|
+
├─► Initialize Interactive skills (Coordinator only)
|
|
96
130
|
│ └─► InteractiveSkillManager × N
|
|
97
131
|
│
|
|
98
132
|
└─► Initialize Delegate skills (parallel)
|
|
@@ -113,15 +147,20 @@ graph TD
|
|
|
113
147
|
User((User)) -->|Provides| Input[Input / Query]
|
|
114
148
|
|
|
115
149
|
subgraph Runtime [Runtime Engine]
|
|
116
|
-
subgraph
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
150
|
+
subgraph Job [Job]
|
|
151
|
+
subgraph Run1 [Run: Coordinator]
|
|
152
|
+
State[State Machine]
|
|
153
|
+
Context[Execution Context]
|
|
154
|
+
|
|
155
|
+
subgraph Skills [Skill Layer]
|
|
156
|
+
SM[Skill Manager]
|
|
157
|
+
MCP[MCP Client]
|
|
158
|
+
MCPServer[MCP Server]
|
|
159
|
+
end
|
|
124
160
|
end
|
|
161
|
+
|
|
162
|
+
Run2["Run: Delegate A"]
|
|
163
|
+
Run3["Run: Delegate B"]
|
|
125
164
|
end
|
|
126
165
|
end
|
|
127
166
|
|
|
@@ -130,8 +169,8 @@ graph TD
|
|
|
130
169
|
Workspace[Workspace / FS]
|
|
131
170
|
end
|
|
132
171
|
|
|
133
|
-
Def -->|Instantiates|
|
|
134
|
-
Input -->|Starts|
|
|
172
|
+
Def -->|Instantiates| Run1
|
|
173
|
+
Input -->|Starts| Run1
|
|
135
174
|
|
|
136
175
|
State -->|Reasoning| LLM
|
|
137
176
|
State -->|Act| SM
|
|
@@ -139,13 +178,34 @@ graph TD
|
|
|
139
178
|
MCP -->|Connect| MCPServer
|
|
140
179
|
MCPServer -->|Access| Workspace
|
|
141
180
|
|
|
142
|
-
SM -.->|Delegate|
|
|
143
|
-
|
|
144
|
-
State ~~~ Context
|
|
181
|
+
SM -.->|Delegate| Run2
|
|
182
|
+
SM -.->|Delegate| Run3
|
|
145
183
|
```
|
|
146
184
|
|
|
147
185
|
## Core Concepts
|
|
148
186
|
|
|
187
|
+
### Execution Hierarchy
|
|
188
|
+
|
|
189
|
+
```
|
|
190
|
+
Job (jobId)
|
|
191
|
+
├── Run 1 (Coordinator Expert)
|
|
192
|
+
│ └── Checkpoints...
|
|
193
|
+
├── Run 2 (Delegated Expert A)
|
|
194
|
+
│ └── Checkpoints...
|
|
195
|
+
└── Run 3 (Delegated Expert B)
|
|
196
|
+
└── Checkpoints...
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
| Concept | Description |
|
|
200
|
+
| -------------- | -------------------------------------------- |
|
|
201
|
+
| **Job** | Top-level execution unit. Contains all Runs. |
|
|
202
|
+
| **Run** | Single Expert execution. |
|
|
203
|
+
| **Checkpoint** | Snapshot at step end. Enables pause/resume. |
|
|
204
|
+
|
|
205
|
+
For details on step counting, Coordinator vs. Delegated Expert differences, and the full execution model, see [Runtime](https://docs.perstack.ai/understanding-perstack/runtime).
|
|
206
|
+
|
|
207
|
+
### Events, Steps, Checkpoints
|
|
208
|
+
|
|
149
209
|
The runtime's execution model can be visualized as a timeline where **Events** are points, **Steps** are the lines connecting them, and **Checkpoints** are the anchors.
|
|
150
210
|
|
|
151
211
|
```mermaid
|
|
@@ -164,13 +224,13 @@ graph LR
|
|
|
164
224
|
style CP2 fill:#f96,stroke:#333,stroke-width:4px
|
|
165
225
|
```
|
|
166
226
|
|
|
167
|
-
|
|
227
|
+
#### 1. Events
|
|
168
228
|
**Events** are granular moments in time that occur *during* execution. They represent specific actions or observations, such as "started reasoning", "called tool", or "finished tool".
|
|
169
229
|
|
|
170
|
-
|
|
230
|
+
#### 2. Step
|
|
171
231
|
A **Step** is the continuous process that connects these events. It represents one atomic cycle of the agent's loop (Reasoning -> Act -> Observe, repeat).
|
|
172
232
|
|
|
173
|
-
|
|
233
|
+
#### 3. Checkpoint
|
|
174
234
|
A **Checkpoint** is the immutable result at the end of a Step. It serves as the anchor point that:
|
|
175
235
|
- Finalizes the previous Step.
|
|
176
236
|
- Becomes the starting point for the next Step.
|
|
@@ -194,7 +254,7 @@ stateDiagram-v2
|
|
|
194
254
|
CallingTools --> ResolvingToolResults: resolveToolResults
|
|
195
255
|
CallingTools --> ResolvingThought: resolveThought
|
|
196
256
|
CallingTools --> GeneratingRunResult: attemptCompletion
|
|
197
|
-
CallingTools --> CallingDelegate:
|
|
257
|
+
CallingTools --> CallingDelegate: callDelegates
|
|
198
258
|
CallingTools --> CallingInteractiveTool: callInteractiveTool
|
|
199
259
|
|
|
200
260
|
ResolvingToolResults --> FinishingStep: finishToolCall
|
|
@@ -216,24 +276,82 @@ Events trigger state transitions. They are emitted by the runtime logic or exter
|
|
|
216
276
|
- **Lifecycle**: `startRun`, `startGeneration`, `continueToNextStep`, `completeRun`
|
|
217
277
|
- **Tool Execution**: `callTools`, `resolveToolResults`, `finishToolCall`, `resumeToolCalls`, `finishAllToolCalls`
|
|
218
278
|
- **Special Types**: `resolveThought`
|
|
219
|
-
- **
|
|
279
|
+
- **Delegation**: `callDelegate` (triggers new Run(s) for delegate(s), parallel when multiple)
|
|
280
|
+
- **Interactive**: `callInteractiveTool` (Coordinator only)
|
|
220
281
|
- **Interruption**: `stopRunByInteractiveTool`, `stopRunByDelegate`, `stopRunByExceededMaxSteps`
|
|
221
282
|
- **Error Handling**: `retry`
|
|
222
283
|
|
|
223
284
|
## Checkpoint Status
|
|
224
285
|
|
|
225
|
-
The `status` field in a Checkpoint indicates the current state
|
|
286
|
+
The `status` field in a Checkpoint indicates the current state:
|
|
226
287
|
|
|
227
|
-
-
|
|
228
|
-
-
|
|
229
|
-
-
|
|
230
|
-
-
|
|
231
|
-
- **stoppedByDelegate**: The run is paused, waiting for a delegated sub-agent to complete.
|
|
232
|
-
- **stoppedByExceededMaxSteps**: The run stopped because it reached the maximum allowed steps.
|
|
233
|
-
- **stoppedByError**: The run stopped due to an unrecoverable error.
|
|
288
|
+
- `init`, `proceeding` — Run lifecycle
|
|
289
|
+
- `completed` — Task finished successfully
|
|
290
|
+
- `stoppedByInteractiveTool`, `stoppedByDelegate` — Waiting for external input
|
|
291
|
+
- `stoppedByExceededMaxSteps`, `stoppedByError` — Run stopped
|
|
234
292
|
|
|
235
|
-
|
|
293
|
+
For stop reasons and error handling, see [Error Handling](https://docs.perstack.ai/using-experts/error-handling).
|
|
294
|
+
|
|
295
|
+
## Runtime Adapters
|
|
296
|
+
|
|
297
|
+
The runtime supports multiple execution backends through the adapter pattern. External runtime adapters are provided as separate packages:
|
|
298
|
+
|
|
299
|
+
| Package | Runtime Name | Description |
|
|
300
|
+
| ----------------------- | ------------- | -------------------------- |
|
|
301
|
+
| `@perstack/runtime` | `perstack` | Built-in runtime (default) |
|
|
302
|
+
| `@perstack/cursor` | `cursor` | Cursor IDE headless mode |
|
|
303
|
+
| `@perstack/claude-code` | `claude-code` | Claude Code CLI |
|
|
304
|
+
| `@perstack/gemini` | `gemini` | Gemini CLI |
|
|
305
|
+
|
|
306
|
+
### Registration Pattern
|
|
307
|
+
|
|
308
|
+
External adapters must be registered before use:
|
|
309
|
+
|
|
310
|
+
```typescript
|
|
311
|
+
import { CursorAdapter } from "@perstack/cursor"
|
|
312
|
+
import { getAdapter, isAdapterAvailable, registerAdapter } from "@perstack/runtime"
|
|
313
|
+
|
|
314
|
+
// Register external adapter
|
|
315
|
+
registerAdapter("cursor", () => new CursorAdapter())
|
|
316
|
+
|
|
317
|
+
// Check availability
|
|
318
|
+
if (isAdapterAvailable("cursor")) {
|
|
319
|
+
const adapter = getAdapter("cursor")
|
|
320
|
+
const result = await adapter.checkPrerequisites()
|
|
321
|
+
if (result.ok) {
|
|
322
|
+
await adapter.run({ setting, eventListener })
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
```
|
|
326
|
+
|
|
327
|
+
### Creating Custom Adapters
|
|
328
|
+
|
|
329
|
+
Extend `BaseAdapter` from `@perstack/core` for CLI-based runtimes:
|
|
236
330
|
|
|
237
|
-
|
|
238
|
-
|
|
331
|
+
```typescript
|
|
332
|
+
import { BaseAdapter, type AdapterRunParams, type AdapterRunResult, type PrerequisiteResult } from "@perstack/core"
|
|
333
|
+
|
|
334
|
+
class MyAdapter extends BaseAdapter {
|
|
335
|
+
readonly name = "my-runtime"
|
|
336
|
+
|
|
337
|
+
async checkPrerequisites(): Promise<PrerequisiteResult> {
|
|
338
|
+
const result = await this.execCommand(["my-cli", "--version"])
|
|
339
|
+
return result.exitCode === 0
|
|
340
|
+
? { ok: true }
|
|
341
|
+
: { ok: false, error: { type: "cli-not-found", message: "..." } }
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
async run(params: AdapterRunParams): Promise<AdapterRunResult> {
|
|
345
|
+
// Implementation
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
```
|
|
349
|
+
|
|
350
|
+
See [Multi-Runtime Support](https://docs.perstack.ai/using-experts/multi-runtime) for details.
|
|
351
|
+
|
|
352
|
+
## Related Documentation
|
|
239
353
|
|
|
354
|
+
- [Runtime](https://docs.perstack.ai/understanding-perstack/runtime) — Full execution model
|
|
355
|
+
- [State Management](https://docs.perstack.ai/using-experts/state-management) — Jobs, Runs, and Checkpoints
|
|
356
|
+
- [Running Experts](https://docs.perstack.ai/using-experts/running-experts) — CLI usage
|
|
357
|
+
- [Multi-Runtime](https://docs.perstack.ai/using-experts/multi-runtime) — Multi-runtime support
|
package/bin/cli.ts
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import type { Checkpoint, RunEvent, RuntimeEvent } from "@perstack/core"
|
|
4
|
+
import { parseWithFriendlyError, runCommandInputSchema } from "@perstack/core"
|
|
5
|
+
import { Command } from "commander"
|
|
6
|
+
import pkg from "../package.json" with { type: "json" }
|
|
7
|
+
import { resolveRunContext } from "../src/cli/context.js"
|
|
8
|
+
import { run } from "../src/run.js"
|
|
9
|
+
|
|
10
|
+
const defaultEventListener = (event: RunEvent | RuntimeEvent) => console.log(JSON.stringify(event))
|
|
11
|
+
|
|
12
|
+
const checkpointStore = new Map<string, Checkpoint>()
|
|
13
|
+
const storeCheckpoint = async (checkpoint: Checkpoint) => {
|
|
14
|
+
checkpointStore.set(checkpoint.id, checkpoint)
|
|
15
|
+
}
|
|
16
|
+
const retrieveCheckpoint = async (_jobId: string, checkpointId: string) => {
|
|
17
|
+
const checkpoint = checkpointStore.get(checkpointId)
|
|
18
|
+
if (!checkpoint) {
|
|
19
|
+
throw new Error(`Checkpoint not found: ${checkpointId}`)
|
|
20
|
+
}
|
|
21
|
+
return checkpoint
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const program = new Command()
|
|
25
|
+
.name("perstack-runtime")
|
|
26
|
+
.description("Perstack Runtime CLI - Execute Experts directly")
|
|
27
|
+
.version(pkg.version)
|
|
28
|
+
|
|
29
|
+
program
|
|
30
|
+
.command("run")
|
|
31
|
+
.description("Run an Expert with JSON event output")
|
|
32
|
+
.argument("<expertKey>", "Expert key to run")
|
|
33
|
+
.argument("<query>", "Query to run")
|
|
34
|
+
.option("--config <configPath>", "Path to perstack.toml config file")
|
|
35
|
+
.option("--provider <provider>", "Provider to use")
|
|
36
|
+
.option("--model <model>", "Model to use")
|
|
37
|
+
.option("--temperature <temperature>", "Temperature for the model, default is 0.3")
|
|
38
|
+
.option(
|
|
39
|
+
"--max-steps <maxSteps>",
|
|
40
|
+
"Maximum number of steps to run, default is undefined (no limit)",
|
|
41
|
+
)
|
|
42
|
+
.option("--max-retries <maxRetries>", "Maximum number of generation retries, default is 5")
|
|
43
|
+
.option(
|
|
44
|
+
"--timeout <timeout>",
|
|
45
|
+
"Timeout for each generation in milliseconds, default is 60000 (1 minute)",
|
|
46
|
+
)
|
|
47
|
+
.option("--job-id <jobId>", "Job ID for identifying the job")
|
|
48
|
+
.option("--run-id <runId>", "Run ID for identifying the run")
|
|
49
|
+
.option("--env-path <envPath...>", "Path to the environment file, default is .env and .env.local")
|
|
50
|
+
.option("--verbose", "Enable verbose logging")
|
|
51
|
+
.action(async (expertKey, query, options) => {
|
|
52
|
+
const input = parseWithFriendlyError(runCommandInputSchema, { expertKey, query, options })
|
|
53
|
+
try {
|
|
54
|
+
const { perstackConfig, env, providerConfig, model, experts } = await resolveRunContext({
|
|
55
|
+
configPath: input.options.config,
|
|
56
|
+
provider: input.options.provider,
|
|
57
|
+
model: input.options.model,
|
|
58
|
+
envPath: input.options.envPath,
|
|
59
|
+
})
|
|
60
|
+
await run(
|
|
61
|
+
{
|
|
62
|
+
setting: {
|
|
63
|
+
jobId: input.options.jobId,
|
|
64
|
+
runId: input.options.runId,
|
|
65
|
+
expertKey: input.expertKey,
|
|
66
|
+
input: { text: input.query },
|
|
67
|
+
experts,
|
|
68
|
+
model,
|
|
69
|
+
providerConfig,
|
|
70
|
+
temperature: input.options.temperature ?? perstackConfig.temperature,
|
|
71
|
+
maxSteps: input.options.maxSteps ?? perstackConfig.maxSteps,
|
|
72
|
+
maxRetries: input.options.maxRetries ?? perstackConfig.maxRetries,
|
|
73
|
+
timeout: input.options.timeout ?? perstackConfig.timeout,
|
|
74
|
+
perstackApiBaseUrl: perstackConfig.perstackApiBaseUrl,
|
|
75
|
+
perstackApiKey: env.PERSTACK_API_KEY,
|
|
76
|
+
perstackBaseSkillCommand: perstackConfig.perstackBaseSkillCommand,
|
|
77
|
+
env,
|
|
78
|
+
proxyUrl: process.env.PERSTACK_PROXY_URL,
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
{ eventListener: defaultEventListener, storeCheckpoint, retrieveCheckpoint },
|
|
82
|
+
)
|
|
83
|
+
} catch (error) {
|
|
84
|
+
if (error instanceof Error) {
|
|
85
|
+
console.error(error.message)
|
|
86
|
+
} else {
|
|
87
|
+
console.error(error)
|
|
88
|
+
}
|
|
89
|
+
process.exit(1)
|
|
90
|
+
}
|
|
91
|
+
})
|
|
92
|
+
|
|
93
|
+
program.parse()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
package/dist/bin/cli.js
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { package_default, run } from '../chunk-C7AFEVYF.js';
|
|
3
|
+
import { parseWithFriendlyError, runCommandInputSchema, perstackConfigSchema } from '@perstack/core';
|
|
4
|
+
import { Command } from 'commander';
|
|
5
|
+
import dotenv from 'dotenv';
|
|
6
|
+
import { readFile } from 'fs/promises';
|
|
7
|
+
import path from 'path';
|
|
8
|
+
import TOML from 'smol-toml';
|
|
9
|
+
|
|
10
|
+
function getEnv(envPath) {
|
|
11
|
+
const env = Object.fromEntries(
|
|
12
|
+
Object.entries(process.env).filter(([_, value]) => !!value).map(([key, value]) => [key, value])
|
|
13
|
+
);
|
|
14
|
+
dotenv.config({ path: envPath, processEnv: env, quiet: true });
|
|
15
|
+
return env;
|
|
16
|
+
}
|
|
17
|
+
async function getPerstackConfig(configPath) {
|
|
18
|
+
const configString = await findPerstackConfigString(configPath);
|
|
19
|
+
if (configString === null) {
|
|
20
|
+
throw new Error("perstack.toml not found. Create one or specify --config path.");
|
|
21
|
+
}
|
|
22
|
+
return await parsePerstackConfig(configString);
|
|
23
|
+
}
|
|
24
|
+
async function findPerstackConfigString(configPath) {
|
|
25
|
+
if (configPath) {
|
|
26
|
+
try {
|
|
27
|
+
const tomlString = await readFile(path.resolve(process.cwd(), configPath), "utf-8");
|
|
28
|
+
return tomlString;
|
|
29
|
+
} catch {
|
|
30
|
+
throw new Error(`Given config path "${configPath}" is not found`);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
return await findPerstackConfigStringRecursively(path.resolve(process.cwd()));
|
|
34
|
+
}
|
|
35
|
+
async function findPerstackConfigStringRecursively(cwd) {
|
|
36
|
+
try {
|
|
37
|
+
const tomlString = await readFile(path.resolve(cwd, "perstack.toml"), "utf-8");
|
|
38
|
+
return tomlString;
|
|
39
|
+
} catch {
|
|
40
|
+
if (cwd === path.parse(cwd).root) {
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
return await findPerstackConfigStringRecursively(path.dirname(cwd));
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
async function parsePerstackConfig(config) {
|
|
47
|
+
const toml = TOML.parse(config ?? "");
|
|
48
|
+
return parseWithFriendlyError(perstackConfigSchema, toml, "perstack.toml");
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// src/cli/provider-config.ts
|
|
52
|
+
function getProviderConfig(provider, env, providerTable) {
|
|
53
|
+
const setting = providerTable?.setting ?? {};
|
|
54
|
+
switch (provider) {
|
|
55
|
+
case "anthropic": {
|
|
56
|
+
const apiKey = env.ANTHROPIC_API_KEY;
|
|
57
|
+
if (!apiKey) throw new Error("ANTHROPIC_API_KEY is not set");
|
|
58
|
+
return {
|
|
59
|
+
providerName: "anthropic",
|
|
60
|
+
apiKey,
|
|
61
|
+
baseUrl: setting.baseUrl ?? env.ANTHROPIC_BASE_URL,
|
|
62
|
+
headers: setting.headers
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
case "google": {
|
|
66
|
+
const apiKey = env.GOOGLE_GENERATIVE_AI_API_KEY;
|
|
67
|
+
if (!apiKey) throw new Error("GOOGLE_GENERATIVE_AI_API_KEY is not set");
|
|
68
|
+
return {
|
|
69
|
+
providerName: "google",
|
|
70
|
+
apiKey,
|
|
71
|
+
baseUrl: setting.baseUrl ?? env.GOOGLE_GENERATIVE_AI_BASE_URL,
|
|
72
|
+
headers: setting.headers
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
case "openai": {
|
|
76
|
+
const apiKey = env.OPENAI_API_KEY;
|
|
77
|
+
if (!apiKey) throw new Error("OPENAI_API_KEY is not set");
|
|
78
|
+
return {
|
|
79
|
+
providerName: "openai",
|
|
80
|
+
apiKey,
|
|
81
|
+
baseUrl: setting.baseUrl ?? env.OPENAI_BASE_URL,
|
|
82
|
+
organization: setting.organization ?? env.OPENAI_ORGANIZATION,
|
|
83
|
+
project: setting.project ?? env.OPENAI_PROJECT,
|
|
84
|
+
name: setting.name,
|
|
85
|
+
headers: setting.headers
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
case "ollama": {
|
|
89
|
+
return {
|
|
90
|
+
providerName: "ollama",
|
|
91
|
+
baseUrl: setting.baseUrl ?? env.OLLAMA_BASE_URL,
|
|
92
|
+
headers: setting.headers
|
|
93
|
+
};
|
|
94
|
+
}
|
|
95
|
+
case "azure-openai": {
|
|
96
|
+
const apiKey = env.AZURE_API_KEY;
|
|
97
|
+
if (!apiKey) throw new Error("AZURE_API_KEY is not set");
|
|
98
|
+
const resourceName = setting.resourceName ?? env.AZURE_RESOURCE_NAME;
|
|
99
|
+
const baseUrl = setting.baseUrl ?? env.AZURE_BASE_URL;
|
|
100
|
+
if (!resourceName && !baseUrl) throw new Error("AZURE_RESOURCE_NAME or baseUrl is not set");
|
|
101
|
+
return {
|
|
102
|
+
providerName: "azure-openai",
|
|
103
|
+
apiKey,
|
|
104
|
+
resourceName,
|
|
105
|
+
apiVersion: setting.apiVersion ?? env.AZURE_API_VERSION,
|
|
106
|
+
baseUrl,
|
|
107
|
+
headers: setting.headers,
|
|
108
|
+
useDeploymentBasedUrls: setting.useDeploymentBasedUrls
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
case "amazon-bedrock": {
|
|
112
|
+
const accessKeyId = env.AWS_ACCESS_KEY_ID;
|
|
113
|
+
const secretAccessKey = env.AWS_SECRET_ACCESS_KEY;
|
|
114
|
+
const sessionToken = env.AWS_SESSION_TOKEN;
|
|
115
|
+
if (!accessKeyId) throw new Error("AWS_ACCESS_KEY_ID is not set");
|
|
116
|
+
if (!secretAccessKey) throw new Error("AWS_SECRET_ACCESS_KEY is not set");
|
|
117
|
+
const region = setting.region ?? env.AWS_REGION;
|
|
118
|
+
if (!region) throw new Error("AWS_REGION is not set");
|
|
119
|
+
return {
|
|
120
|
+
providerName: "amazon-bedrock",
|
|
121
|
+
accessKeyId,
|
|
122
|
+
secretAccessKey,
|
|
123
|
+
region,
|
|
124
|
+
sessionToken
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
case "google-vertex": {
|
|
128
|
+
return {
|
|
129
|
+
providerName: "google-vertex",
|
|
130
|
+
project: setting.project ?? env.GOOGLE_VERTEX_PROJECT,
|
|
131
|
+
location: setting.location ?? env.GOOGLE_VERTEX_LOCATION,
|
|
132
|
+
baseUrl: setting.baseUrl ?? env.GOOGLE_VERTEX_BASE_URL,
|
|
133
|
+
headers: setting.headers
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
case "deepseek": {
|
|
137
|
+
const apiKey = env.DEEPSEEK_API_KEY;
|
|
138
|
+
if (!apiKey) throw new Error("DEEPSEEK_API_KEY is not set");
|
|
139
|
+
return {
|
|
140
|
+
providerName: "deepseek",
|
|
141
|
+
apiKey,
|
|
142
|
+
baseUrl: setting.baseUrl ?? env.DEEPSEEK_BASE_URL,
|
|
143
|
+
headers: setting.headers
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// src/cli/context.ts
|
|
150
|
+
var defaultProvider = "anthropic";
|
|
151
|
+
var defaultModel = "claude-sonnet-4-5";
|
|
152
|
+
async function resolveRunContext(input) {
|
|
153
|
+
const perstackConfig = await getPerstackConfig(input.configPath);
|
|
154
|
+
const env = getEnv(input.envPath ?? perstackConfig.envPath ?? [".env", ".env.local"]);
|
|
155
|
+
const provider = input.provider ?? perstackConfig.provider?.providerName ?? defaultProvider;
|
|
156
|
+
const model = input.model ?? perstackConfig.model ?? defaultModel;
|
|
157
|
+
const providerConfig = getProviderConfig(provider, env, perstackConfig.provider);
|
|
158
|
+
const experts = Object.fromEntries(
|
|
159
|
+
Object.entries(perstackConfig.experts ?? {}).map(([name, expert]) => {
|
|
160
|
+
return [
|
|
161
|
+
name,
|
|
162
|
+
{
|
|
163
|
+
key: name,
|
|
164
|
+
name,
|
|
165
|
+
version: expert.version ?? "1.0.0",
|
|
166
|
+
description: expert.description,
|
|
167
|
+
instruction: expert.instruction,
|
|
168
|
+
skills: expert.skills ?? {},
|
|
169
|
+
delegates: expert.delegates ?? [],
|
|
170
|
+
tags: expert.tags ?? []
|
|
171
|
+
}
|
|
172
|
+
];
|
|
173
|
+
})
|
|
174
|
+
);
|
|
175
|
+
return {
|
|
176
|
+
perstackConfig,
|
|
177
|
+
env,
|
|
178
|
+
providerConfig,
|
|
179
|
+
model,
|
|
180
|
+
experts
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// bin/cli.ts
|
|
185
|
+
var defaultEventListener = (event) => console.log(JSON.stringify(event));
|
|
186
|
+
var checkpointStore = /* @__PURE__ */ new Map();
|
|
187
|
+
var storeCheckpoint = async (checkpoint) => {
|
|
188
|
+
checkpointStore.set(checkpoint.id, checkpoint);
|
|
189
|
+
};
|
|
190
|
+
var retrieveCheckpoint = async (_jobId, checkpointId) => {
|
|
191
|
+
const checkpoint = checkpointStore.get(checkpointId);
|
|
192
|
+
if (!checkpoint) {
|
|
193
|
+
throw new Error(`Checkpoint not found: ${checkpointId}`);
|
|
194
|
+
}
|
|
195
|
+
return checkpoint;
|
|
196
|
+
};
|
|
197
|
+
var program = new Command().name("perstack-runtime").description("Perstack Runtime CLI - Execute Experts directly").version(package_default.version);
|
|
198
|
+
program.command("run").description("Run an Expert with JSON event output").argument("<expertKey>", "Expert key to run").argument("<query>", "Query to run").option("--config <configPath>", "Path to perstack.toml config file").option("--provider <provider>", "Provider to use").option("--model <model>", "Model to use").option("--temperature <temperature>", "Temperature for the model, default is 0.3").option(
|
|
199
|
+
"--max-steps <maxSteps>",
|
|
200
|
+
"Maximum number of steps to run, default is undefined (no limit)"
|
|
201
|
+
).option("--max-retries <maxRetries>", "Maximum number of generation retries, default is 5").option(
|
|
202
|
+
"--timeout <timeout>",
|
|
203
|
+
"Timeout for each generation in milliseconds, default is 60000 (1 minute)"
|
|
204
|
+
).option("--job-id <jobId>", "Job ID for identifying the job").option("--run-id <runId>", "Run ID for identifying the run").option("--env-path <envPath...>", "Path to the environment file, default is .env and .env.local").option("--verbose", "Enable verbose logging").action(async (expertKey, query, options) => {
|
|
205
|
+
const input = parseWithFriendlyError(runCommandInputSchema, { expertKey, query, options });
|
|
206
|
+
try {
|
|
207
|
+
const { perstackConfig, env, providerConfig, model, experts } = await resolveRunContext({
|
|
208
|
+
configPath: input.options.config,
|
|
209
|
+
provider: input.options.provider,
|
|
210
|
+
model: input.options.model,
|
|
211
|
+
envPath: input.options.envPath
|
|
212
|
+
});
|
|
213
|
+
await run(
|
|
214
|
+
{
|
|
215
|
+
setting: {
|
|
216
|
+
jobId: input.options.jobId,
|
|
217
|
+
runId: input.options.runId,
|
|
218
|
+
expertKey: input.expertKey,
|
|
219
|
+
input: { text: input.query },
|
|
220
|
+
experts,
|
|
221
|
+
model,
|
|
222
|
+
providerConfig,
|
|
223
|
+
temperature: input.options.temperature ?? perstackConfig.temperature,
|
|
224
|
+
maxSteps: input.options.maxSteps ?? perstackConfig.maxSteps,
|
|
225
|
+
maxRetries: input.options.maxRetries ?? perstackConfig.maxRetries,
|
|
226
|
+
timeout: input.options.timeout ?? perstackConfig.timeout,
|
|
227
|
+
perstackApiBaseUrl: perstackConfig.perstackApiBaseUrl,
|
|
228
|
+
perstackApiKey: env.PERSTACK_API_KEY,
|
|
229
|
+
perstackBaseSkillCommand: perstackConfig.perstackBaseSkillCommand,
|
|
230
|
+
env,
|
|
231
|
+
proxyUrl: process.env.PERSTACK_PROXY_URL
|
|
232
|
+
}
|
|
233
|
+
},
|
|
234
|
+
{ eventListener: defaultEventListener, storeCheckpoint, retrieveCheckpoint }
|
|
235
|
+
);
|
|
236
|
+
} catch (error) {
|
|
237
|
+
if (error instanceof Error) {
|
|
238
|
+
console.error(error.message);
|
|
239
|
+
} else {
|
|
240
|
+
console.error(error);
|
|
241
|
+
}
|
|
242
|
+
process.exit(1);
|
|
243
|
+
}
|
|
244
|
+
});
|
|
245
|
+
program.parse();
|
|
246
|
+
//# sourceMappingURL=cli.js.map
|
|
247
|
+
//# sourceMappingURL=cli.js.map
|