@sesamespace/hivemind 0.8.3 → 0.8.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/.pnpmrc.json +1 -0
  2. package/DASHBOARD-PLAN.md +206 -0
  3. package/TOOL-USE-DESIGN.md +173 -0
  4. package/config/default.toml +3 -1
  5. package/dist/{chunk-ZR23DEZ6.js → chunk-ELRUZHT5.js} +2 -2
  6. package/dist/{chunk-DODOQGIL.js → chunk-ESY7ZS46.js} +29 -11
  7. package/dist/chunk-ESY7ZS46.js.map +1 -0
  8. package/dist/{chunk-X54OQAJA.js → chunk-FQ2HFJDZ.js} +2 -2
  9. package/dist/{chunk-VCITLDMK.js → chunk-Q5ZO5WXM.js} +242 -28
  10. package/dist/chunk-Q5ZO5WXM.js.map +1 -0
  11. package/dist/{chunk-YWQZZS43.js → chunk-TRRT2WFH.js} +2 -2
  12. package/dist/{chunk-HSNWMYVC.js → chunk-TTL73U2P.js} +3 -3
  13. package/dist/commands/fleet.js +3 -3
  14. package/dist/commands/init.js +1 -1
  15. package/dist/commands/start.js +3 -3
  16. package/dist/commands/watchdog.js +3 -3
  17. package/dist/index.js +2 -2
  18. package/dist/main.js +6 -6
  19. package/dist/start.js +1 -1
  20. package/docs/TOOL-PARITY-PLAN.md +191 -0
  21. package/package.json +22 -25
  22. package/dist/chunk-DODOQGIL.js.map +0 -1
  23. package/dist/chunk-VCITLDMK.js.map +0 -1
  24. package/install.sh +0 -131
  25. package/packages/memory/Cargo.lock +0 -6480
  26. package/packages/memory/Cargo.toml +0 -21
  27. package/packages/memory/src/src/context.rs +0 -179
  28. package/packages/memory/src/src/embeddings.rs +0 -51
  29. package/packages/memory/src/src/main.rs +0 -626
  30. package/packages/memory/src/src/promotion.rs +0 -637
  31. package/packages/memory/src/src/scoring.rs +0 -131
  32. package/packages/memory/src/src/store.rs +0 -460
  33. package/packages/memory/src/src/tasks.rs +0 -321
  34. /package/dist/{chunk-ZR23DEZ6.js.map → chunk-ELRUZHT5.js.map} +0 -0
  35. /package/dist/{chunk-X54OQAJA.js.map → chunk-FQ2HFJDZ.js.map} +0 -0
  36. /package/dist/{chunk-YWQZZS43.js.map → chunk-TRRT2WFH.js.map} +0 -0
  37. /package/dist/{chunk-HSNWMYVC.js.map → chunk-TTL73U2P.js.map} +0 -0
package/.pnpmrc.json ADDED
@@ -0,0 +1 @@
1
+ {"onlyBuiltDependencies":["better-sqlite3"]}
@@ -0,0 +1,206 @@
1
+ # Hivemind Dashboard — Implementation Plan
2
+
3
+ **Goal:** Local web dashboard for debugging memory, context routing, and LLM request formation.
4
+ **Access:** `http://localhost:9485` on the Mac mini (local access only for now).
5
+ **Priority:** LLM Request Inspector first, then Memory Browser, then Context Overview.
6
+
7
+ ---
8
+
9
+ ## Phase 1: LLM Request Logger + Inspector UI
10
+
11
+ ### Backend: Request Logging
12
+
13
+ **Where:** Instrument `buildMessages()` in `prompt.ts` and `processMessage()` in `agent.ts`.
14
+
15
+ Each logged request captures:
16
+ ```typescript
17
+ interface RequestLog {
18
+ id: string; // uuid
19
+ timestamp: string; // ISO-8601
20
+ // Routing
21
+ context: string; // which context was used
22
+ contextSwitched: boolean; // explicit switch?
23
+ routingReason: string; // "pattern_match:X" | "inferred:X" | "active:X"
24
+ // Sender
25
+ channelId: string;
26
+ channelKind: "dm" | "group";
27
+ senderHandle: string;
28
+ rawMessage: string; // as received (with prefix)
29
+ // Prompt components (broken out for UI)
30
+ systemPrompt: {
31
+ identity: string; // workspace files section
32
+ l3Knowledge: string[]; // individual L3 entries
33
+ l2Episodes: Array<{
34
+ id: string;
35
+ content: string;
36
+ score: number;
37
+ timestamp: string;
38
+ context_name: string;
39
+ role: string;
40
+ }>;
41
+ contextInfo: string; // active context section
42
+ fullText: string; // complete system prompt as sent
43
+ };
44
+ conversationHistory: Array<{ role: string; content: string }>; // L1 turns included
45
+ userMessage: string; // final user message
46
+ // Response
47
+ response: {
48
+ content: string;
49
+ model: string;
50
+ latencyMs: number;
51
+ skipped: boolean; // was it __SKIP__?
52
+ };
53
+ // Config snapshot
54
+ config: {
55
+ topK: number;
56
+ model: string;
57
+ maxTokens: number;
58
+ temperature: number;
59
+ };
60
+ // Approximate token counts (char-based estimate: chars/4)
61
+ tokenEstimates: {
62
+ systemPrompt: number;
63
+ conversationHistory: number;
64
+ userMessage: number;
65
+ total: number;
66
+ };
67
+ }
68
+ ```
69
+
70
+ **Storage:** SQLite database at `data/dashboard.db`.
71
+ - Single `request_logs` table with JSON columns for complex fields.
72
+ - Auto-prune: keep last 7 days or 10,000 entries (whichever is smaller).
73
+ - Why SQLite over ring buffer: survives restarts, queryable, minimal overhead.
74
+
75
+ **Token estimation:** Use chars/4 approximation. Good enough for relative sizing. Avoid tokenizer dependency.
76
+
77
+ **Logging approach:** Eager logging. Serialize at request time. The overhead is minimal (~1ms for JSON.stringify) compared to LLM latency (~1-10s). Capturing the exact state at request time is more valuable than lazy reconstruction.
78
+
79
+ ### Backend: Dashboard HTTP Server
80
+
81
+ **Where:** New file `packages/runtime/src/dashboard.ts`.
82
+
83
+ Extend the existing health server (or create a sibling on port 9485):
84
+ - `GET /` — serve the SPA (single HTML file)
85
+ - `GET /api/requests` — list recent requests (paginated, filterable)
86
+ - `GET /api/requests/:id` — single request detail
87
+ - `GET /api/contexts` — proxy to memory daemon's context list
88
+ - `GET /api/contexts/:name/episodes` — proxy L2 episodes
89
+ - `GET /api/contexts/:name/l3` — proxy L3 knowledge
90
+ - `GET /api/stats` — memory stats (episode counts, last promotion, etc.)
91
+ - `DELETE /api/l3/:id` — delete a bad L3 entry (write op from day 1)
92
+ - `POST /api/l3/:id/edit` — edit L3 entry content
93
+
94
+ Bind to `127.0.0.1:9485` only.
95
+
96
+ ### Frontend: Single-File SPA
97
+
98
+ **Why single file:** No build step, no React, no dependencies. Ship as one HTML file with embedded CSS/JS. Can always upgrade later.
99
+
100
+ **Layout:**
101
+ - Left sidebar: navigation (Requests, Memory, Contexts)
102
+ - Main area: content
103
+
104
+ **Request Inspector view:**
105
+ - Reverse-chronological list of requests
106
+ - Each row: timestamp, sender, context, model, latency, token estimate
107
+ - Click to expand → shows all sections:
108
+ - **Identity files** (collapsible, usually not interesting)
109
+ - **L3 Knowledge** (list of entries with metadata)
110
+ - **L2 Episodes** (with similarity scores, timestamps, source context)
111
+ - **L1 History** (conversation turns)
112
+ - **User Message** (raw with prefix)
113
+ - **Response** (with model, latency)
114
+ - **Config** (top_k, model, temperature)
115
+ - **Token breakdown** (bar chart showing proportion per section)
116
+ - Filters: by context, by sender, by time range
117
+ - Search: full-text search across messages
118
+
119
+ **Memory Browser view (Phase 2):**
120
+ - L2: searchable episode list, filterable by context/role/time
121
+ - L3: per-context knowledge entries with edit/delete buttons
122
+ - Promotion log (if we add logging for it)
123
+
124
+ **Context Overview (Phase 2):**
125
+ - List of contexts with episode counts, last active
126
+ - Active context highlighted
127
+ - Click to drill into episodes/L3
128
+
129
+ ---
130
+
131
+ ## Phase 2: Memory Browser + Context Overview
132
+
133
+ After Phase 1 is working and useful, add:
134
+ - Full L2 browsing with semantic search UI
135
+ - L3 management (view, edit, delete)
136
+ - Context explorer with stats
137
+ - Promotion history logging
138
+
139
+ ---
140
+
141
+ ## Implementation Steps (Phase 1)
142
+
143
+ ### Step 1: Request logging infrastructure
144
+ - [ ] Create `packages/runtime/src/request-logger.ts`
145
+ - SQLite setup (using better-sqlite3)
146
+ - `logRequest()` method
147
+ - `getRequests()` with pagination/filters
148
+ - `getRequest(id)` for detail view
149
+ - Auto-pruning on startup
150
+ - [ ] Add better-sqlite3 dependency
151
+
152
+ ### Step 2: Instrument the pipeline
153
+ - [ ] Modify `agent.ts` `processMessage()` to capture routing decision + timing
154
+ - [ ] Modify `prompt.ts` `buildSystemPrompt()` to return structured components (not just string)
155
+ - [ ] Log each request after LLM response arrives
156
+ - [ ] Capture config snapshot with each log entry
157
+
158
+ ### Step 3: Dashboard HTTP server
159
+ - [ ] Create `packages/runtime/src/dashboard.ts`
160
+ - Express-free: use Node's built-in `http` module (like health server)
161
+ - Serve SPA at `/`
162
+ - JSON APIs for request logs and memory proxy
163
+ - [ ] Wire into `pipeline.ts` startup
164
+
165
+ ### Step 4: Frontend SPA
166
+ - [ ] Single HTML file at `packages/runtime/src/dashboard.html`
167
+ - Vanilla JS, no framework
168
+ - CSS grid layout
169
+ - Fetch-based API calls
170
+ - Expandable request cards
171
+ - Token breakdown visualization
172
+ - Basic filtering
173
+
174
+ ### Step 5: Memory proxy + write ops
175
+ - [ ] Proxy endpoints to memory daemon for L2/L3 browsing
176
+ - [ ] DELETE/PATCH endpoints for L3 management
177
+
178
+ ---
179
+
180
+ ## Design Decisions
181
+
182
+ | Question | Decision | Rationale |
183
+ |----------|----------|-----------|
184
+ | Storage | SQLite | Survives restarts, queryable, lightweight |
185
+ | Token counting | chars/4 estimate | Good enough, no tokenizer dep |
186
+ | Logging | Eager | Captures exact state, overhead negligible vs LLM latency |
187
+ | Bind address | 127.0.0.1 only | Local access, no auth needed |
188
+ | Framework | None (vanilla) | Single HTML file, no build step |
189
+ | Read-only or read-write? | Read-write from start | Ryan will want to delete bad L3 entries immediately |
190
+ | Persist request logs? | Yes, 7 days | Need to compare across memory config changes |
191
+ | Multi-agent? | Single agent for now | Don't over-engineer, but use agent name in logs |
192
+ | Port | 9485 | Next to health port (9484), easy to remember |
193
+
194
+ ---
195
+
196
+ ## Sesame Command Fix (Bonus)
197
+
198
+ While we're in the code, fix the sender prefix issue:
199
+ - In `pipeline.ts` `startSesameLoop()`, before calling `agent.processMessage()`, strip the sender prefix for command parsing
200
+ - Or better: in `agent.ts` `handleSpecialCommand()`, strip known prefix patterns before regex matching
201
+ - This unblocks context switching, task commands, and cross-context search over Sesame
202
+
203
+ ---
204
+
205
+ *Created: 2026-02-28*
206
+ *Status: Ready to implement*
@@ -0,0 +1,173 @@
1
+ # Hivemind Tool Use — Architecture Design
2
+
3
+ ## Current State
4
+
5
+ The LLM client does simple chat completions: `messages[] → response.content`. No tool/function calling.
6
+
7
+ ## Goal
8
+
9
+ Full agentic tool-use loop matching OpenClaw capabilities, with Hivemind's memory system as a differentiator.
10
+
11
+ ## Architecture
12
+
13
+ ### 1. Tool Calling Protocol (OpenAI-compatible, works with OpenRouter)
14
+
15
+ The OpenAI chat completions API supports `tools` (function definitions) and `tool_choice`. When the model wants to use a tool, it returns a `tool_calls` array instead of (or alongside) content. We then execute the tool, append the result as a `tool` role message, and call the model again.
16
+
17
+ ```
18
+ User message
19
+
20
+ LLM (with tools defined)
21
+
22
+ If tool_calls → execute tools → append results → call LLM again (loop)
23
+ If content only → return response
24
+ ```
25
+
26
+ This is a **while loop**, not a single call. The model may chain multiple tool calls before producing a final text response.
27
+
28
+ ### 2. Key Data Structures
29
+
30
+ ```typescript
31
+ interface ToolDefinition {
32
+ name: string;
33
+ description: string;
34
+ parameters: JSONSchema; // JSON Schema for function params
35
+ }
36
+
37
+ interface ToolCall {
38
+ id: string;
39
+ type: "function";
40
+ function: { name: string; arguments: string }; // arguments is JSON string
41
+ }
42
+
43
+ interface ToolResult {
44
+ tool_call_id: string;
45
+ role: "tool";
46
+ content: string; // result as string
47
+ }
48
+
49
+ // Extended message types
50
+ interface AssistantMessage {
51
+ role: "assistant";
52
+ content: string | null;
53
+ tool_calls?: ToolCall[];
54
+ }
55
+
56
+ interface ToolMessage {
57
+ role: "tool";
58
+ tool_call_id: string;
59
+ content: string;
60
+ }
61
+ ```
62
+
63
+ ### 3. Tool Registry
64
+
65
+ A simple registry where tools are registered with:
66
+ - Name
67
+ - Description (for the LLM)
68
+ - JSON Schema for parameters
69
+ - Executor function: `(params: any) => Promise<string>`
70
+
71
+ ```typescript
72
+ class ToolRegistry {
73
+ private tools: Map<string, { def: ToolDefinition; exec: (params: any) => Promise<string> }>;
74
+
75
+ register(name, description, schema, executor): void;
76
+ getDefinitions(): ToolDefinition[]; // For LLM API call
77
+ execute(name: string, params: any): Promise<string>; // Run a tool
78
+ }
79
+ ```
80
+
81
+ ### 4. The Agentic Loop (in Agent.processMessage)
82
+
83
+ ```
84
+ 1. Build messages (system + history + user)
85
+ 2. Call LLM with tools
86
+ 3. While response has tool_calls:
87
+ a. For each tool_call: execute, collect result
88
+ b. Append assistant message (with tool_calls) to messages
89
+ c. Append tool result messages
90
+ d. Call LLM again with updated messages
91
+ 4. Return final text content
92
+ 5. Store in memory (include tool usage summary)
93
+ ```
94
+
95
+ **Safety limits:**
96
+ - Max iterations per turn (e.g., 25)
97
+ - Max total tokens per turn
98
+ - Tool execution timeout (per tool)
99
+ - Dangerous command confirmation (optional)
100
+
101
+ ### 5. Phase 1 Tools
102
+
103
+ #### `shell` (exec)
104
+ - Run a shell command, return stdout/stderr
105
+ - Working directory: `~/hivemind/workspace`
106
+ - Timeout: 30s default, configurable
107
+ - Safety: no `rm -rf /` etc.
108
+
109
+ #### `read_file`
110
+ - Read file contents (with optional offset/limit for large files)
111
+ - Returns text content or error
112
+
113
+ #### `write_file`
114
+ - Write content to a file (creates dirs if needed)
115
+ - Returns success/failure
116
+
117
+ #### `edit_file`
118
+ - Find and replace exact text in a file
119
+ - oldText → newText pattern (surgical edits)
120
+
121
+ #### `web_search`
122
+ - Search via Brave API
123
+ - Returns titles, URLs, snippets
124
+
125
+ #### `web_fetch`
126
+ - Fetch URL, extract markdown
127
+ - Returns readable content
128
+
129
+ ### 6. Memory Integration
130
+
131
+ Tool calls and results should be stored in memory, but summarized:
132
+ - Don't store full file contents in L2 episodes
133
+ - Store: "Used shell to run `git status`, found 3 modified files"
134
+ - L3 promotion can learn patterns: "For git operations, agent uses shell tool"
135
+
136
+ ### 7. Config
137
+
138
+ ```toml
139
+ [tools]
140
+ enabled = true
141
+ max_iterations = 25
142
+ shell_timeout_s = 30
143
+ workspace = "workspace"
144
+
145
+ [tools.web_search]
146
+ api_key = "" # or from vault
147
+ ```
148
+
149
+ ### 8. Implementation Order
150
+
151
+ 1. **ToolRegistry class** — registration, definitions, execution
152
+ 2. **LLMClient.chatWithTools()** — extended chat that handles tool_calls
153
+ 3. **Agentic loop in Agent** — the while loop with safety limits
154
+ 4. **shell tool** — most impactful, enables everything
155
+ 5. **File tools** — read/write/edit
156
+ 6. **Web tools** — search/fetch
157
+ 7. **Memory integration** — summarize tool usage in episodes
158
+
159
+ ### 9. OpenRouter Compatibility
160
+
161
+ OpenRouter passes through tool definitions to the underlying model. Most models support tools:
162
+ - Claude: Native tool_use
163
+ - GPT-4: Native function_calling
164
+ - Gemini: Native function declarations
165
+
166
+ The OpenAI-compatible format works for all of them through OpenRouter.
167
+
168
+ ### 10. Safety Considerations
169
+
170
+ - **Sandbox**: Tools run on the agent's machine. File access should be scoped to workspace.
171
+ - **Confirmation**: Optionally require human approval for destructive operations.
172
+ - **Logging**: All tool calls logged to request logger for debugging.
173
+ - **Rate limiting**: Prevent runaway tool loops.
@@ -4,9 +4,11 @@ personality = "A sharp, direct AI agent with persistent memory. Part of the Hive
4
4
  team_charter = "config/TEAM-CHARTER.md"
5
5
 
6
6
  [llm]
7
+ # provider = "anthropic" # Use 'anthropic' for direct Anthropic API (Claude Max), 'openai' for OpenRouter
8
+ # base_url = "https://api.anthropic.com" # For Anthropic direct; set api_key via ANTHROPIC_API_KEY env var
7
9
  base_url = "https://openrouter.ai/api/v1"
8
10
  model = "anthropic/claude-sonnet-4"
9
- api_key = "" # Set via LLM_API_KEY env var or hivemind setup
11
+ api_key = "" # Set via LLM_API_KEY env var, ANTHROPIC_API_KEY, or hivemind setup
10
12
  max_tokens = 4096
11
13
  temperature = 0.7
12
14
 
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  FleetManager
3
- } from "./chunk-YWQZZS43.js";
3
+ } from "./chunk-TRRT2WFH.js";
4
4
 
5
5
  // packages/cli/src/commands/fleet.ts
6
6
  function formatUptime(seconds) {
@@ -183,4 +183,4 @@ Commands:
183
183
  export {
184
184
  runFleetCommand
185
185
  };
186
- //# sourceMappingURL=chunk-ZR23DEZ6.js.map
186
+ //# sourceMappingURL=chunk-ELRUZHT5.js.map
@@ -116,7 +116,12 @@ async function runInitCommand(args) {
116
116
  config.llmModel = fields.llm_model;
117
117
  config.personality = fields.agent_personality || fields.personality;
118
118
  config.fleetRole = fields.fleet_role;
119
- if (config.llmApiKey) console.log(" \u2713 LLM API key loaded from vault");
119
+ if (!config.llmApiKey && fields.anthropic_api_key) {
120
+ config.llmApiKey = fields.anthropic_api_key;
121
+ config.llmProvider = "anthropic";
122
+ if (!config.llmBaseUrl) config.llmBaseUrl = "https://api.anthropic.com";
123
+ }
124
+ if (config.llmApiKey) console.log(` \u2713 LLM API key loaded from vault${config.llmProvider === "anthropic" ? " (Anthropic)" : ""}`);
120
125
  if (config.llmModel) console.log(` \u2713 LLM model: ${config.llmModel}`);
121
126
  if (config.personality) console.log(` \u2713 Personality: ${config.personality.slice(0, 60)}...`);
122
127
  if (config.fleetRole) console.log(` \u2713 Fleet role: ${config.fleetRole}`);
@@ -182,6 +187,15 @@ _This file defines your personality and values. Edit it to evolve who you are._
182
187
  `);
183
188
  console.log(` \u2713 ${identityPath}`);
184
189
  }
190
+ const llmLines = [];
191
+ if (config.llmModel || config.llmProvider || config.llmBaseUrl) {
192
+ llmLines.push("[llm]");
193
+ if (config.llmProvider) llmLines.push(`provider = "${config.llmProvider}"`);
194
+ if (config.llmBaseUrl) llmLines.push(`base_url = "${config.llmBaseUrl}"`);
195
+ if (config.llmModel) llmLines.push(`model = "${config.llmModel}"`);
196
+ } else {
197
+ llmLines.push("# [llm] using defaults");
198
+ }
185
199
  const localToml = `# Generated by hivemind init \u2014 ${(/* @__PURE__ */ new Date()).toISOString()}
186
200
  # Overrides config/default.toml with agent-specific settings
187
201
 
@@ -190,21 +204,25 @@ name = "${config.agentName}"
190
204
  ${config.personality ? `personality = "${config.personality.replace(/"/g, '\\"')}"` : "# personality = (using default)"}
191
205
  workspace = "workspace"
192
206
 
193
- ${config.llmModel ? `[llm]
194
- model = "${config.llmModel}"` : "# [llm] using defaults"}
195
- ${config.llmBaseUrl ? `# base_url = "${config.llmBaseUrl}"` : ""}
207
+ ${llmLines.join("\n")}
196
208
 
197
209
  [sesame]
198
210
  api_key = "${sesameApiKey}"
199
211
  `;
200
212
  writeFileSync(LOCAL_TOML, localToml);
201
213
  console.log(` \u2713 ${LOCAL_TOML}`);
202
- const envContent = `# Hivemind Agent \u2014 ${config.agentName}
203
- # Generated by hivemind init \u2014 ${(/* @__PURE__ */ new Date()).toISOString()}
204
- SESAME_API_KEY=${sesameApiKey}
205
- LLM_API_KEY=${config.llmApiKey || ""}
206
- AGENT_NAME=${config.agentName}
207
- `;
214
+ const envLines = [
215
+ `# Hivemind Agent \u2014 ${config.agentName}`,
216
+ `# Generated by hivemind init \u2014 ${(/* @__PURE__ */ new Date()).toISOString()}`,
217
+ `SESAME_API_KEY=${sesameApiKey}`
218
+ ];
219
+ if (config.llmProvider === "anthropic") {
220
+ envLines.push(`ANTHROPIC_API_KEY=${config.llmApiKey || ""}`);
221
+ } else {
222
+ envLines.push(`LLM_API_KEY=${config.llmApiKey || ""}`);
223
+ }
224
+ envLines.push(`AGENT_NAME=${config.agentName}`, "");
225
+ const envContent = envLines.join("\n");
208
226
  writeFileSync(ENV_FILE, envContent, { mode: 384 });
209
227
  console.log(` \u2713 ${ENV_FILE} (chmod 600)`);
210
228
  console.log("\n\u2192 Setting up memory system...");
@@ -330,4 +348,4 @@ Options:
330
348
  export {
331
349
  runInitCommand
332
350
  };
333
- //# sourceMappingURL=chunk-DODOQGIL.js.map
351
+ //# sourceMappingURL=chunk-ESY7ZS46.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../packages/cli/src/commands/init.ts"],"sourcesContent":["import { resolve, dirname } from \"path\";\nimport { existsSync, writeFileSync, mkdirSync, readFileSync, copyFileSync, realpathSync, chmodSync } from \"fs\";\nimport { createInterface } from \"readline\";\nimport { fileURLToPath } from \"url\";\nimport { execSync } from \"child_process\";\nimport { SesameClient } from \"@sesamespace/sdk\";\nimport { homedir, arch, platform } from \"os\";\n\nconst HIVEMIND_DIR = resolve(process.env.HIVEMIND_HOME || resolve(homedir(), \"hivemind\"));\nconst CONFIG_DIR = resolve(HIVEMIND_DIR, \"config\");\nconst WORKSPACE_DIR = resolve(HIVEMIND_DIR, \"workspace\");\nconst BIN_DIR = resolve(HIVEMIND_DIR, \"bin\");\nconst DATA_DIR = resolve(HIVEMIND_DIR, \"data\");\nconst ENV_FILE = resolve(HIVEMIND_DIR, \".env\");\nconst LOCAL_TOML = resolve(CONFIG_DIR, \"local.toml\");\nconst MEMORY_BIN = resolve(BIN_DIR, \"hivemind-memory\");\n\nconst RELEASES_BASE_URL = \"https://sesame-hivemind-releases.s3.amazonaws.com\";\nconst EMBEDDING_MODEL = \"nomic-embed-text\";\n\nconst VAULT_CONFIG_NAME = \"hivemind-config\";\n\ninterface ProvisioningConfig {\n agentName: string;\n agentHandle: string;\n agentId: string;\n personality?: string;\n llmApiKey?: string;\n llmBaseUrl?: string;\n llmModel?: string;\n llmProvider?: 'openai' | 'anthropic';\n fleetRole?: string;\n channels: Array<{ id: string; name: string | null; kind: string }>;\n}\n\nasync function prompt(question: string): Promise<string> {\n const rl = createInterface({ input: process.stdin, output: process.stdout });\n return new Promise((resolve) => {\n rl.question(question, (answer) => {\n rl.close();\n resolve(answer.trim());\n });\n });\n}\n\nexport async function runInitCommand(args: string[]): Promise<void> {\n const nonInteractive = args.includes(\"--yes\") || args.includes(\"-y\") || args.includes(\"--non-interactive\");\n const filteredArgs = args.filter((a) => ![\"--yes\", \"-y\", \"--non-interactive\", \"--help\", \"-h\"].includes(a));\n let sesameApiKey = filteredArgs[0];\n\n if (args.includes(\"--help\") || args.includes(\"-h\")) {\n printHelp();\n return;\n }\n\n console.log(`\n ╦ ╦╦╦ ╦╔═╗╔╦╗╦╔╗╔╔╦╗\n ╠═╣║╚╗╔╝║╣ ║║║║║║║ ║║\n ╩ ╩╩ ╚╝ ╚═╝╩ ╩╩╝╚╝═╩╝\n Agent Initialization\n`);\n\n // --- Step 1: Get Sesame API key ---\n // Check existing config if no key provided\n if (!sesameApiKey) {\n // Try .env file\n if (existsSync(ENV_FILE)) {\n try {\n const envContent = readFileSync(ENV_FILE, \"utf-8\");\n const match = envContent.match(/^SESAME_API_KEY=(.+)$/m);\n if (match && match[1].trim()) {\n sesameApiKey = match[1].trim();\n console.log(\" ✓ Found existing API key in .env\");\n }\n } catch {}\n }\n // Try local.toml\n if (!sesameApiKey && existsSync(LOCAL_TOML)) {\n try {\n const tomlContent = readFileSync(LOCAL_TOML, \"utf-8\");\n const match = tomlContent.match(/api_key\\s*=\\s*\"([^\"]+)\"/);\n if (match && match[1].trim()) {\n sesameApiKey = match[1].trim();\n console.log(\" ✓ Found existing API key in local.toml\");\n }\n } catch {}\n }\n // Try environment variable\n if (!sesameApiKey && process.env.SESAME_API_KEY) {\n sesameApiKey = process.env.SESAME_API_KEY;\n console.log(\" ✓ Using API key from SESAME_API_KEY env var\");\n }\n }\n if (!sesameApiKey) {\n sesameApiKey = await prompt(\" Sesame API key: \");\n }\n if (!sesameApiKey) {\n console.error(\"Error: Sesame API key is required\");\n process.exit(1);\n }\n\n // --- Step 2: Connect to Sesame and fetch manifest ---\n console.log(\"\\n→ Connecting to Sesame...\");\n const sdk = new SesameClient({\n apiUrl: \"https://api.sesame.space\",\n wsUrl: \"wss://ws.sesame.space\",\n apiKey: sesameApiKey,\n });\n\n let config: ProvisioningConfig;\n try {\n const manifest = await sdk.getManifest();\n console.log(` ✓ Authenticated as ${manifest.agent.handle} (${manifest.agent.id})`);\n console.log(` ✓ Workspace: ${manifest.workspace.name}`);\n console.log(` ✓ Channels: ${manifest.channels.length}`);\n for (const ch of manifest.channels) {\n console.log(` - ${ch.name || ch.id} (${ch.kind})`);\n }\n\n config = {\n agentName: manifest.agent.handle,\n agentHandle: manifest.agent.handle,\n agentId: manifest.agent.id,\n channels: manifest.channels.map((ch) => ({\n id: ch.id,\n name: ch.name,\n kind: ch.kind,\n })),\n };\n\n // --- Step 3: Check vault for config ---\n console.log(\"\\n→ Checking vault for provisioning config...\");\n try {\n const vaultResp = await sdk.listVaultItems() as any;\n const items = vaultResp.items || vaultResp.data || [];\n const configItem = items.find((i: any) => i.name === VAULT_CONFIG_NAME);\n\n if (configItem) {\n console.log(` ✓ Found ${VAULT_CONFIG_NAME} vault item`);\n const revealResp = await sdk.revealItem(configItem.id) as any;\n const fields = revealResp.data?.fields || revealResp.fields || revealResp.data || {};\n\n config.llmApiKey = fields.llm_api_key || fields.openrouter_api_key;\n config.llmBaseUrl = fields.llm_base_url;\n config.llmModel = fields.llm_model;\n config.personality = fields.agent_personality || fields.personality;\n config.fleetRole = fields.fleet_role;\n\n // Detect Anthropic API key from vault — sets provider + base_url automatically\n if (!config.llmApiKey && fields.anthropic_api_key) {\n config.llmApiKey = fields.anthropic_api_key;\n config.llmProvider = 'anthropic';\n if (!config.llmBaseUrl) config.llmBaseUrl = 'https://api.anthropic.com';\n }\n\n if (config.llmApiKey) console.log(` ✓ LLM API key loaded from vault${config.llmProvider === 'anthropic' ? ' (Anthropic)' : ''}`);\n if (config.llmModel) console.log(` ✓ LLM model: ${config.llmModel}`);\n if (config.personality) console.log(` ✓ Personality: ${config.personality.slice(0, 60)}...`);\n if (config.fleetRole) console.log(` ✓ Fleet role: ${config.fleetRole}`);\n } else {\n console.log(\" ! No hivemind-config vault item found\");\n console.log(\" ! Will prompt for LLM API key instead\");\n }\n } catch (err) {\n console.log(` ! Could not read vault: ${(err as Error).message}`);\n }\n } catch (err) {\n console.error(`\\n ✗ Failed to connect to Sesame: ${(err as Error).message}`);\n console.error(\" Check your API key and try again.\");\n process.exit(1);\n } finally {\n sdk.disconnect();\n }\n\n // --- Step 4: Prompt for anything missing ---\n if (!config.llmApiKey) {\n console.log(\" ! No LLM API key found in vault — set LLM_API_KEY in .env after init\");\n }\n\n // --- Step 5: Write config files ---\n console.log(\"\\n→ Writing configuration...\");\n\n mkdirSync(CONFIG_DIR, { recursive: true });\n mkdirSync(WORKSPACE_DIR, { recursive: true });\n\n // Copy default.toml from installed package if not present\n const defaultToml = resolve(CONFIG_DIR, \"default.toml\");\n if (!existsSync(defaultToml)) {\n // Resolve from the hivemind binary location\n // process.argv[1] may be a symlink, so resolve it first\n const realBin = realpathSync(process.argv[1]);\n // realBin is <pkg>/dist/main.js, so ../config/ gets us to <pkg>/config/\n const packageConfigDir = resolve(dirname(realBin), \"..\", \"config\");\n const packageDefault = resolve(packageConfigDir, \"default.toml\");\n if (existsSync(packageDefault)) {\n copyFileSync(packageDefault, defaultToml);\n console.log(` ✓ ${defaultToml}`);\n // Also copy team charter if available\n const packageCharter = resolve(packageConfigDir, \"TEAM-CHARTER.md\");\n const localCharter = resolve(CONFIG_DIR, \"TEAM-CHARTER.md\");\n if (existsSync(packageCharter) && !existsSync(localCharter)) {\n copyFileSync(packageCharter, localCharter);\n console.log(` ✓ ${localCharter}`);\n }\n } else {\n console.log(` ! default.toml not found in package — you may need to copy it manually`);\n }\n }\n\n // Write workspace identity files\n const soulPath = resolve(WORKSPACE_DIR, \"SOUL.md\");\n if (!existsSync(soulPath)) {\n const personality = config.personality || \"A helpful, capable agent.\";\n writeFileSync(soulPath, `# SOUL.md — Who You Are\n\n${personality}\n\n---\n\n_This file defines your personality and values. Edit it to evolve who you are._\n`);\n console.log(` ✓ ${soulPath}`);\n }\n\n const identityPath = resolve(WORKSPACE_DIR, \"IDENTITY.md\");\n if (!existsSync(identityPath)) {\n writeFileSync(identityPath, `# IDENTITY.md\n\n- **Name:** ${config.agentName}\n- **Handle:** ${config.agentHandle}\n- **Agent ID:** ${config.agentId}\n`);\n console.log(` ✓ ${identityPath}`);\n }\n\n // Write local.toml (overrides)\n const llmLines: string[] = [];\n if (config.llmModel || config.llmProvider || config.llmBaseUrl) {\n llmLines.push('[llm]');\n if (config.llmProvider) llmLines.push(`provider = \"${config.llmProvider}\"`);\n if (config.llmBaseUrl) llmLines.push(`base_url = \"${config.llmBaseUrl}\"`);\n if (config.llmModel) llmLines.push(`model = \"${config.llmModel}\"`);\n } else {\n llmLines.push('# [llm] using defaults');\n }\n\n const localToml = `# Generated by hivemind init — ${new Date().toISOString()}\n# Overrides config/default.toml with agent-specific settings\n\n[agent]\nname = \"${config.agentName}\"\n${config.personality ? `personality = \"${config.personality.replace(/\"/g, '\\\\\"')}\"` : \"# personality = (using default)\"}\nworkspace = \"workspace\"\n\n${llmLines.join('\\n')}\n\n[sesame]\napi_key = \"${sesameApiKey}\"\n`;\n\n writeFileSync(LOCAL_TOML, localToml);\n console.log(` ✓ ${LOCAL_TOML}`);\n\n // Write .env\n const envLines = [\n `# Hivemind Agent — ${config.agentName}`,\n `# Generated by hivemind init — ${new Date().toISOString()}`,\n `SESAME_API_KEY=${sesameApiKey}`,\n ];\n if (config.llmProvider === 'anthropic') {\n envLines.push(`ANTHROPIC_API_KEY=${config.llmApiKey || \"\"}`);\n } else {\n envLines.push(`LLM_API_KEY=${config.llmApiKey || \"\"}`);\n }\n envLines.push(`AGENT_NAME=${config.agentName}`, '');\n const envContent = envLines.join('\\n');\n\n writeFileSync(ENV_FILE, envContent, { mode: 0o600 });\n console.log(` ✓ ${ENV_FILE} (chmod 600)`);\n\n // --- Step 6: Install memory infrastructure ---\n console.log(\"\\n→ Setting up memory system...\");\n mkdirSync(BIN_DIR, { recursive: true });\n mkdirSync(DATA_DIR, { recursive: true });\n\n await installOllama();\n await pullEmbeddingModel();\n await downloadMemoryDaemon();\n\n // --- Done ---\n console.log(`\n ✓ Hivemind initialized for ${config.agentName}!\n\n To start the agent:\n hivemind start\n\n To install as a service:\n hivemind service install\n\n Agent ID: ${config.agentId}\n Channels: ${config.channels.map((c) => c.name || c.id).join(\", \")}\n Fleet role: ${config.fleetRole || \"standalone\"}\n`);\n}\n\n// --- Memory infrastructure helpers ---\n\nasync function installOllama(): Promise<void> {\n try {\n execSync(\"which ollama\", { stdio: \"ignore\" });\n const version = execSync(\"ollama --version\", { encoding: \"utf-8\" }).trim();\n console.log(` ✓ Ollama already installed (${version})`);\n return;\n } catch {}\n\n console.log(\" → Installing Ollama...\");\n try {\n // Try Homebrew first (macOS)\n execSync(\"which brew\", { stdio: \"ignore\" });\n execSync(\"brew install ollama\", { stdio: \"inherit\" });\n console.log(\" ✓ Ollama installed via Homebrew\");\n } catch {\n // Fall back to official installer\n try {\n execSync(\"curl -fsSL https://ollama.com/install.sh | sh\", { stdio: \"inherit\" });\n console.log(\" ✓ Ollama installed via installer script\");\n } catch (err) {\n console.error(` ✗ Failed to install Ollama: ${(err as Error).message}`);\n console.error(\" ! Install manually: https://ollama.com/download\");\n console.error(\" ! Memory system will not work without Ollama\");\n }\n }\n}\n\nasync function pullEmbeddingModel(): Promise<void> {\n console.log(` → Pulling embedding model (${EMBEDDING_MODEL})...`);\n try {\n // Ensure ollama is running\n try {\n execSync(\"curl -sf http://localhost:11434/api/tags > /dev/null\", { stdio: \"ignore\" });\n } catch {\n // Start ollama serve in background\n console.log(\" → Starting Ollama server...\");\n execSync(\"ollama serve &\", { stdio: \"ignore\", shell: \"/bin/sh\" });\n // Wait for it to be ready\n for (let i = 0; i < 15; i++) {\n try {\n execSync(\"curl -sf http://localhost:11434/api/tags > /dev/null\", { stdio: \"ignore\" });\n break;\n } catch {\n execSync(\"sleep 1\");\n }\n }\n }\n\n execSync(`ollama pull ${EMBEDDING_MODEL}`, { stdio: \"inherit\" });\n console.log(` ✓ ${EMBEDDING_MODEL} model ready`);\n } catch (err) {\n console.error(` ✗ Failed to pull model: ${(err as Error).message}`);\n console.error(` ! Run manually: ollama pull ${EMBEDDING_MODEL}`);\n }\n}\n\nasync function downloadMemoryDaemon(): Promise<void> {\n if (existsSync(MEMORY_BIN)) {\n console.log(` ✓ Memory daemon already installed at ${MEMORY_BIN}`);\n return;\n }\n\n const osArch = arch();\n const osPlatform = platform();\n\n if (osPlatform !== \"darwin\") {\n console.log(` ! Memory daemon pre-built binaries only available for macOS currently`);\n console.log(` ! Build from source: cd packages/memory && cargo build --release`);\n return;\n }\n\n const artifactName = osArch === \"arm64\"\n ? \"hivemind-memory-darwin-arm64\"\n : \"hivemind-memory-darwin-x64\";\n\n console.log(` → Downloading memory daemon (${artifactName})...`);\n try {\n // Get latest version from S3\n const latestJson = execSync(\n `curl -sf \"${RELEASES_BASE_URL}/latest.json\"`,\n { encoding: \"utf-8\" },\n );\n const { version } = JSON.parse(latestJson);\n const url = `${RELEASES_BASE_URL}/v${version}/${artifactName}`;\n\n execSync(\n `curl -fSL -o \"${MEMORY_BIN}\" \"${url}\"`,\n { stdio: \"inherit\" },\n );\n chmodSync(MEMORY_BIN, 0o755);\n console.log(` ✓ Memory daemon installed at ${MEMORY_BIN} (v${version})`);\n } catch (err) {\n console.error(` ✗ Failed to download memory daemon: ${(err as Error).message}`);\n console.error(\" ! Build from source: cd packages/memory && cargo build --release\");\n console.error(` ! Then copy to: ${MEMORY_BIN}`);\n }\n}\n\nfunction printHelp(): void {\n console.log(`hivemind init — Initialize a Hivemind agent from Sesame\n\nUsage: hivemind init [sesame-api-key]\n\nThe API key can also be passed as the first argument.\n\nWhat it does:\n 1. Connects to Sesame and fetches agent identity\n 2. Reads provisioning config from Sesame vault (if available)\n 3. Writes config/local.toml and .env\n 4. Installs Ollama + embedding model for memory\n 5. Downloads pre-built memory daemon binary\n\nOptions:\n -h, --help Show this help\n`);\n}\n"],"mappings":";;;;;AAAA,SAAS,SAAS,eAAe;AACjC,SAAS,YAAY,eAAe,WAAW,cAAc,cAAc,cAAc,iBAAiB;AAC1G,SAAS,uBAAuB;AAEhC,SAAS,gBAAgB;AAEzB,SAAS,SAAS,MAAM,gBAAgB;AAExC,IAAM,eAAe,QAAQ,QAAQ,IAAI,iBAAiB,QAAQ,QAAQ,GAAG,UAAU,CAAC;AACxF,IAAM,aAAa,QAAQ,cAAc,QAAQ;AACjD,IAAM,gBAAgB,QAAQ,cAAc,WAAW;AACvD,IAAM,UAAU,QAAQ,cAAc,KAAK;AAC3C,IAAM,WAAW,QAAQ,cAAc,MAAM;AAC7C,IAAM,WAAW,QAAQ,cAAc,MAAM;AAC7C,IAAM,aAAa,QAAQ,YAAY,YAAY;AACnD,IAAM,aAAa,QAAQ,SAAS,iBAAiB;AAErD,IAAM,oBAAoB;AAC1B,IAAM,kBAAkB;AAExB,IAAM,oBAAoB;AAe1B,eAAe,OAAO,UAAmC;AACvD,QAAM,KAAK,gBAAgB,EAAE,OAAO,QAAQ,OAAO,QAAQ,QAAQ,OAAO,CAAC;AAC3E,SAAO,IAAI,QAAQ,CAACA,aAAY;AAC9B,OAAG,SAAS,UAAU,CAAC,WAAW;AAChC,SAAG,MAAM;AACT,MAAAA,SAAQ,OAAO,KAAK,CAAC;AAAA,IACvB,CAAC;AAAA,EACH,CAAC;AACH;AAEA,eAAsB,eAAe,MAA+B;AAClE,QAAM,iBAAiB,KAAK,SAAS,OAAO,KAAK,KAAK,SAAS,IAAI,KAAK,KAAK,SAAS,mBAAmB;AACzG,QAAM,eAAe,KAAK,OAAO,CAAC,MAAM,CAAC,CAAC,SAAS,MAAM,qBAAqB,UAAU,IAAI,EAAE,SAAS,CAAC,CAAC;AACzG,MAAI,eAAe,aAAa,CAAC;AAEjC,MAAI,KAAK,SAAS,QAAQ,KAAK,KAAK,SAAS,IAAI,GAAG;AAClD,cAAU;AACV;AAAA,EACF;AAEA,UAAQ,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA,CAKb;AAIC,MAAI,CAAC,cAAc;AAEjB,QAAI,WAAW,QAAQ,GAAG;AACxB,UAAI;AACF,cAAMC,cAAa,aAAa,UAAU,OAAO;AACjD,cAAM,QAAQA,YAAW,MAAM,wBAAwB;AACvD,YAAI,SAAS,MAAM,CAAC,EAAE,KAAK,GAAG;AAC5B,yBAAe,MAAM,CAAC,EAAE,KAAK;AAC7B,kBAAQ,IAAI,yCAAoC;AAAA,QAClD;AAAA,MACF,QAAQ;AAAA,MAAC;AAAA,IACX;AAEA,QAAI,CAAC,gBAAgB,WAAW,UAAU,GAAG;AAC3C,UAAI;AACF,cAAM,cAAc,aAAa,YAAY,OAAO;AACpD,cAAM,QAAQ,YAAY,MAAM,yBAAyB;AACzD,YAAI,SAAS,MAAM,CAAC,EAAE,KAAK,GAAG;AAC5B,yBAAe,MAAM,CAAC,EAAE,KAAK;AAC7B,kBAAQ,IAAI,+CAA0C;AAAA,QACxD;AAAA,MACF,QAAQ;AAAA,MAAC;AAAA,IACX;AAEA,QAAI,CAAC,gBAAgB,QAAQ,IAAI,gBAAgB;AAC/C,qBAAe,QAAQ,IAAI;AAC3B,cAAQ,IAAI,oDAA+C;AAAA,IAC7D;AAAA,EACF;AACA,MAAI,CAAC,cAAc;AACjB,mBAAe,MAAM,OAAO,oBAAoB;AAAA,EAClD;AACA,MAAI,CAAC,cAAc;AACjB,YAAQ,MAAM,mCAAmC;AACjD,YAAQ,KAAK,CAAC;AAAA,EAChB;AAGA,UAAQ,IAAI,kCAA6B;AACzC,QAAM,MAAM,IAAI,aAAa;AAAA,IAC3B,QAAQ;AAAA,IACR,OAAO;AAAA,IACP,QAAQ;AAAA,EACV,CAAC;AAED,MAAI;AACJ,MAAI;AACF,UAAM,WAAW,MAAM,IAAI,YAAY;AACvC,YAAQ,IAAI,6BAAwB,SAAS,MAAM,MAAM,KAAK,SAAS,MAAM,EAAE,GAAG;AAClF,YAAQ,IAAI,uBAAkB,SAAS,UAAU,IAAI,EAAE;AACvD,YAAQ,IAAI,sBAAiB,SAAS,SAAS,MAAM,EAAE;AACvD,eAAW,MAAM,SAAS,UAAU;AAClC,cAAQ,IAAI,SAAS,GAAG,QAAQ,GAAG,EAAE,KAAK,GAAG,IAAI,GAAG;AAAA,IACtD;AAEA,aAAS;AAAA,MACP,WAAW,SAAS,MAAM;AAAA,MAC1B,aAAa,SAAS,MAAM;AAAA,MAC5B,SAAS,SAAS,MAAM;AAAA,MACxB,UAAU,SAAS,SAAS,IAAI,CAAC,QAAQ;AAAA,QACvC,IAAI,GAAG;AAAA,QACP,MAAM,GAAG;AAAA,QACT,MAAM,GAAG;AAAA,MACX,EAAE;AAAA,IACJ;AAGA,YAAQ,IAAI,oDAA+C;AAC3D,QAAI;AACF,YAAM,YAAY,MAAM,IAAI,eAAe;AAC3C,YAAM,QAAQ,UAAU,SAAS,UAAU,QAAQ,CAAC;AACpD,YAAM,aAAa,MAAM,KAAK,CAAC,MAAW,EAAE,SAAS,iBAAiB;AAEtE,UAAI,YAAY;AACd,gBAAQ,IAAI,kBAAa,iBAAiB,aAAa;AACvD,cAAM,aAAa,MAAM,IAAI,WAAW,WAAW,EAAE;AACrD,cAAM,SAAS,WAAW,MAAM,UAAU,WAAW,UAAU,WAAW,QAAQ,CAAC;AAEnF,eAAO,YAAY,OAAO,eAAe,OAAO;AAChD,eAAO,aAAa,OAAO;AAC3B,eAAO,WAAW,OAAO;AACzB,eAAO,cAAc,OAAO,qBAAqB,OAAO;AACxD,eAAO,YAAY,OAAO;AAG1B,YAAI,CAAC,OAAO,aAAa,OAAO,mBAAmB;AACjD,iBAAO,YAAY,OAAO;AAC1B,iBAAO,cAAc;AACrB,cAAI,CAAC,OAAO,WAAY,QAAO,aAAa;AAAA,QAC9C;AAEA,YAAI,OAAO,UAAW,SAAQ,IAAI,yCAAoC,OAAO,gBAAgB,cAAc,iBAAiB,EAAE,EAAE;AAChI,YAAI,OAAO,SAAU,SAAQ,IAAI,uBAAkB,OAAO,QAAQ,EAAE;AACpE,YAAI,OAAO,YAAa,SAAQ,IAAI,yBAAoB,OAAO,YAAY,MAAM,GAAG,EAAE,CAAC,KAAK;AAC5F,YAAI,OAAO,UAAW,SAAQ,IAAI,wBAAmB,OAAO,SAAS,EAAE;AAAA,MACzE,OAAO;AACL,gBAAQ,IAAI,yCAAyC;AACrD,gBAAQ,IAAI,yCAAyC;AAAA,MACvD;AAAA,IACF,SAAS,KAAK;AACZ,cAAQ,IAAI,6BAA8B,IAAc,OAAO,EAAE;AAAA,IACnE;AAAA,EACF,SAAS,KAAK;AACZ,YAAQ,MAAM;AAAA,wCAAuC,IAAc,OAAO,EAAE;AAC5E,YAAQ,MAAM,qCAAqC;AACnD,YAAQ,KAAK,CAAC;AAAA,EAChB,UAAE;AACA,QAAI,WAAW;AAAA,EACjB;AAGA,MAAI,CAAC,OAAO,WAAW;AACrB,YAAQ,IAAI,6EAAwE;AAAA,EACtF;AAGA,UAAQ,IAAI,mCAA8B;AAE1C,YAAU,YAAY,EAAE,WAAW,KAAK,CAAC;AACzC,YAAU,eAAe,EAAE,WAAW,KAAK,CAAC;AAG5C,QAAM,cAAc,QAAQ,YAAY,cAAc;AACtD,MAAI,CAAC,WAAW,WAAW,GAAG;AAG5B,UAAM,UAAU,aAAa,QAAQ,KAAK,CAAC,CAAC;AAE5C,UAAM,mBAAmB,QAAQ,QAAQ,OAAO,GAAG,MAAM,QAAQ;AACjE,UAAM,iBAAiB,QAAQ,kBAAkB,cAAc;AAC/D,QAAI,WAAW,cAAc,GAAG;AAC9B,mBAAa,gBAAgB,WAAW;AACxC,cAAQ,IAAI,YAAO,WAAW,EAAE;AAEhC,YAAM,iBAAiB,QAAQ,kBAAkB,iBAAiB;AAClE,YAAM,eAAe,QAAQ,YAAY,iBAAiB;AAC1D,UAAI,WAAW,cAAc,KAAK,CAAC,WAAW,YAAY,GAAG;AAC3D,qBAAa,gBAAgB,YAAY;AACzC,gBAAQ,IAAI,YAAO,YAAY,EAAE;AAAA,MACnC;AAAA,IACF,OAAO;AACL,cAAQ,IAAI,+EAA0E;AAAA,IACxF;AAAA,EACF;AAGA,QAAM,WAAW,QAAQ,eAAe,SAAS;AACjD,MAAI,CAAC,WAAW,QAAQ,GAAG;AACzB,UAAM,cAAc,OAAO,eAAe;AAC1C,kBAAc,UAAU;AAAA;AAAA,EAE1B,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA,CAKZ;AACG,YAAQ,IAAI,YAAO,QAAQ,EAAE;AAAA,EAC/B;AAEA,QAAM,eAAe,QAAQ,eAAe,aAAa;AACzD,MAAI,CAAC,WAAW,YAAY,GAAG;AAC7B,kBAAc,cAAc;AAAA;AAAA,cAElB,OAAO,SAAS;AAAA,gBACd,OAAO,WAAW;AAAA,kBAChB,OAAO,OAAO;AAAA,CAC/B;AACG,YAAQ,IAAI,YAAO,YAAY,EAAE;AAAA,EACnC;AAGA,QAAM,WAAqB,CAAC;AAC5B,MAAI,OAAO,YAAY,OAAO,eAAe,OAAO,YAAY;AAC9D,aAAS,KAAK,OAAO;AACrB,QAAI,OAAO,YAAa,UAAS,KAAK,eAAe,OAAO,WAAW,GAAG;AAC1E,QAAI,OAAO,WAAY,UAAS,KAAK,eAAe,OAAO,UAAU,GAAG;AACxE,QAAI,OAAO,SAAU,UAAS,KAAK,YAAY,OAAO,QAAQ,GAAG;AAAA,EACnE,OAAO;AACL,aAAS,KAAK,wBAAwB;AAAA,EACxC;AAEA,QAAM,YAAY,wCAAkC,oBAAI,KAAK,GAAE,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA,UAIpE,OAAO,SAAS;AAAA,EACxB,OAAO,cAAc,kBAAkB,OAAO,YAAY,QAAQ,MAAM,KAAK,CAAC,MAAM,iCAAiC;AAAA;AAAA;AAAA,EAGrH,SAAS,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA,aAGR,YAAY;AAAA;AAGvB,gBAAc,YAAY,SAAS;AACnC,UAAQ,IAAI,YAAO,UAAU,EAAE;AAG/B,QAAM,WAAW;AAAA,IACf,2BAAsB,OAAO,SAAS;AAAA,IACtC,wCAAkC,oBAAI,KAAK,GAAE,YAAY,CAAC;AAAA,IAC1D,kBAAkB,YAAY;AAAA,EAChC;AACA,MAAI,OAAO,gBAAgB,aAAa;AACtC,aAAS,KAAK,qBAAqB,OAAO,aAAa,EAAE,EAAE;AAAA,EAC7D,OAAO;AACL,aAAS,KAAK,eAAe,OAAO,aAAa,EAAE,EAAE;AAAA,EACvD;AACA,WAAS,KAAK,cAAc,OAAO,SAAS,IAAI,EAAE;AAClD,QAAM,aAAa,SAAS,KAAK,IAAI;AAErC,gBAAc,UAAU,YAAY,EAAE,MAAM,IAAM,CAAC;AACnD,UAAQ,IAAI,YAAO,QAAQ,cAAc;AAGzC,UAAQ,IAAI,sCAAiC;AAC7C,YAAU,SAAS,EAAE,WAAW,KAAK,CAAC;AACtC,YAAU,UAAU,EAAE,WAAW,KAAK,CAAC;AAEvC,QAAM,cAAc;AACpB,QAAM,mBAAmB;AACzB,QAAM,qBAAqB;AAG3B,UAAQ,IAAI;AAAA,oCACiB,OAAO,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAQ/B,OAAO,OAAO;AAAA,gBACd,OAAO,SAAS,IAAI,CAAC,MAAM,EAAE,QAAQ,EAAE,EAAE,EAAE,KAAK,IAAI,CAAC;AAAA,gBACrD,OAAO,aAAa,YAAY;AAAA,CAC/C;AACD;AAIA,eAAe,gBAA+B;AAC5C,MAAI;AACF,aAAS,gBAAgB,EAAE,OAAO,SAAS,CAAC;AAC5C,UAAM,UAAU,SAAS,oBAAoB,EAAE,UAAU,QAAQ,CAAC,EAAE,KAAK;AACzE,YAAQ,IAAI,sCAAiC,OAAO,GAAG;AACvD;AAAA,EACF,QAAQ;AAAA,EAAC;AAET,UAAQ,IAAI,+BAA0B;AACtC,MAAI;AAEF,aAAS,cAAc,EAAE,OAAO,SAAS,CAAC;AAC1C,aAAS,uBAAuB,EAAE,OAAO,UAAU,CAAC;AACpD,YAAQ,IAAI,wCAAmC;AAAA,EACjD,QAAQ;AAEN,QAAI;AACF,eAAS,iDAAiD,EAAE,OAAO,UAAU,CAAC;AAC9E,cAAQ,IAAI,gDAA2C;AAAA,IACzD,SAAS,KAAK;AACZ,cAAQ,MAAM,sCAAkC,IAAc,OAAO,EAAE;AACvE,cAAQ,MAAM,mDAAmD;AACjE,cAAQ,MAAM,gDAAgD;AAAA,IAChE;AAAA,EACF;AACF;AAEA,eAAe,qBAAoC;AACjD,UAAQ,IAAI,qCAAgC,eAAe,MAAM;AACjE,MAAI;AAEF,QAAI;AACF,eAAS,wDAAwD,EAAE,OAAO,SAAS,CAAC;AAAA,IACtF,QAAQ;AAEN,cAAQ,IAAI,oCAA+B;AAC3C,eAAS,kBAAkB,EAAE,OAAO,UAAU,OAAO,UAAU,CAAC;AAEhE,eAAS,IAAI,GAAG,IAAI,IAAI,KAAK;AAC3B,YAAI;AACF,mBAAS,wDAAwD,EAAE,OAAO,SAAS,CAAC;AACpF;AAAA,QACF,QAAQ;AACN,mBAAS,SAAS;AAAA,QACpB;AAAA,MACF;AAAA,IACF;AAEA,aAAS,eAAe,eAAe,IAAI,EAAE,OAAO,UAAU,CAAC;AAC/D,YAAQ,IAAI,YAAO,eAAe,cAAc;AAAA,EAClD,SAAS,KAAK;AACZ,YAAQ,MAAM,kCAA8B,IAAc,OAAO,EAAE;AACnE,YAAQ,MAAM,iCAAiC,eAAe,EAAE;AAAA,EAClE;AACF;AAEA,eAAe,uBAAsC;AACnD,MAAI,WAAW,UAAU,GAAG;AAC1B,YAAQ,IAAI,+CAA0C,UAAU,EAAE;AAClE;AAAA,EACF;AAEA,QAAM,SAAS,KAAK;AACpB,QAAM,aAAa,SAAS;AAE5B,MAAI,eAAe,UAAU;AAC3B,YAAQ,IAAI,yEAAyE;AACrF,YAAQ,IAAI,oEAAoE;AAChF;AAAA,EACF;AAEA,QAAM,eAAe,WAAW,UAC5B,iCACA;AAEJ,UAAQ,IAAI,uCAAkC,YAAY,MAAM;AAChE,MAAI;AAEF,UAAM,aAAa;AAAA,MACjB,aAAa,iBAAiB;AAAA,MAC9B,EAAE,UAAU,QAAQ;AAAA,IACtB;AACA,UAAM,EAAE,QAAQ,IAAI,KAAK,MAAM,UAAU;AACzC,UAAM,MAAM,GAAG,iBAAiB,KAAK,OAAO,IAAI,YAAY;AAE5D;AAAA,MACE,iBAAiB,UAAU,MAAM,GAAG;AAAA,MACpC,EAAE,OAAO,UAAU;AAAA,IACrB;AACA,cAAU,YAAY,GAAK;AAC3B,YAAQ,IAAI,uCAAkC,UAAU,MAAM,OAAO,GAAG;AAAA,EAC1E,SAAS,KAAK;AACZ,YAAQ,MAAM,8CAA0C,IAAc,OAAO,EAAE;AAC/E,YAAQ,MAAM,oEAAoE;AAClF,YAAQ,MAAM,qBAAqB,UAAU,EAAE;AAAA,EACjD;AACF;AAEA,SAAS,YAAkB;AACzB,UAAQ,IAAI;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,CAeb;AACD;","names":["resolve","envContent"]}
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  startPipeline
3
- } from "./chunk-VCITLDMK.js";
3
+ } from "./chunk-Q5ZO5WXM.js";
4
4
 
5
5
  // packages/cli/src/commands/start.ts
6
6
  import { resolve } from "path";
@@ -66,4 +66,4 @@ Options:
66
66
  export {
67
67
  runStartCommand
68
68
  };
69
- //# sourceMappingURL=chunk-X54OQAJA.js.map
69
+ //# sourceMappingURL=chunk-FQ2HFJDZ.js.map