automatey 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. package/.env.defaults +100 -0
  2. package/.env.secret.example +42 -0
  3. package/LICENSE +21 -0
  4. package/README.md +258 -0
  5. package/dist/adapters/index.d.ts +3 -0
  6. package/dist/adapters/index.d.ts.map +1 -0
  7. package/dist/adapters/index.js +3 -0
  8. package/dist/adapters/index.js.map +1 -0
  9. package/dist/adapters/judge-client.d.ts +59 -0
  10. package/dist/adapters/judge-client.d.ts.map +1 -0
  11. package/dist/adapters/judge-client.js +123 -0
  12. package/dist/adapters/judge-client.js.map +1 -0
  13. package/dist/adapters/llm-client.d.ts +54 -0
  14. package/dist/adapters/llm-client.d.ts.map +1 -0
  15. package/dist/adapters/llm-client.js +327 -0
  16. package/dist/adapters/llm-client.js.map +1 -0
  17. package/dist/commands/checkpoint-command.d.ts +3 -0
  18. package/dist/commands/checkpoint-command.d.ts.map +1 -0
  19. package/dist/commands/checkpoint-command.js +105 -0
  20. package/dist/commands/checkpoint-command.js.map +1 -0
  21. package/dist/commands/clear-command.d.ts +3 -0
  22. package/dist/commands/clear-command.d.ts.map +1 -0
  23. package/dist/commands/clear-command.js +11 -0
  24. package/dist/commands/clear-command.js.map +1 -0
  25. package/dist/commands/compact-command.d.ts +3 -0
  26. package/dist/commands/compact-command.d.ts.map +1 -0
  27. package/dist/commands/compact-command.js +20 -0
  28. package/dist/commands/compact-command.js.map +1 -0
  29. package/dist/commands/config-command.d.ts +3 -0
  30. package/dist/commands/config-command.d.ts.map +1 -0
  31. package/dist/commands/config-command.js +28 -0
  32. package/dist/commands/config-command.js.map +1 -0
  33. package/dist/commands/cost-command.d.ts +3 -0
  34. package/dist/commands/cost-command.d.ts.map +1 -0
  35. package/dist/commands/cost-command.js +42 -0
  36. package/dist/commands/cost-command.js.map +1 -0
  37. package/dist/commands/eval-command.d.ts +16 -0
  38. package/dist/commands/eval-command.d.ts.map +1 -0
  39. package/dist/commands/eval-command.js +132 -0
  40. package/dist/commands/eval-command.js.map +1 -0
  41. package/dist/commands/help-command.d.ts +3 -0
  42. package/dist/commands/help-command.d.ts.map +1 -0
  43. package/dist/commands/help-command.js +37 -0
  44. package/dist/commands/help-command.js.map +1 -0
  45. package/dist/commands/index.d.ts +8 -0
  46. package/dist/commands/index.d.ts.map +1 -0
  47. package/dist/commands/index.js +42 -0
  48. package/dist/commands/index.js.map +1 -0
  49. package/dist/commands/load-command.d.ts +3 -0
  50. package/dist/commands/load-command.d.ts.map +1 -0
  51. package/dist/commands/load-command.js +42 -0
  52. package/dist/commands/load-command.js.map +1 -0
  53. package/dist/commands/model-command.d.ts +3 -0
  54. package/dist/commands/model-command.d.ts.map +1 -0
  55. package/dist/commands/model-command.js +42 -0
  56. package/dist/commands/model-command.js.map +1 -0
  57. package/dist/commands/save-command.d.ts +3 -0
  58. package/dist/commands/save-command.d.ts.map +1 -0
  59. package/dist/commands/save-command.js +17 -0
  60. package/dist/commands/save-command.js.map +1 -0
  61. package/dist/commands/servers-command.d.ts +3 -0
  62. package/dist/commands/servers-command.d.ts.map +1 -0
  63. package/dist/commands/servers-command.js +99 -0
  64. package/dist/commands/servers-command.js.map +1 -0
  65. package/dist/commands/think-command.d.ts +3 -0
  66. package/dist/commands/think-command.d.ts.map +1 -0
  67. package/dist/commands/think-command.js +37 -0
  68. package/dist/commands/think-command.js.map +1 -0
  69. package/dist/core/chat-engine.d.ts +54 -0
  70. package/dist/core/chat-engine.d.ts.map +1 -0
  71. package/dist/core/chat-engine.js +528 -0
  72. package/dist/core/chat-engine.js.map +1 -0
  73. package/dist/core/checkpoint-manager.d.ts +50 -0
  74. package/dist/core/checkpoint-manager.d.ts.map +1 -0
  75. package/dist/core/checkpoint-manager.js +173 -0
  76. package/dist/core/checkpoint-manager.js.map +1 -0
  77. package/dist/core/command-parser.d.ts +6 -0
  78. package/dist/core/command-parser.d.ts.map +1 -0
  79. package/dist/core/command-parser.js +14 -0
  80. package/dist/core/command-parser.js.map +1 -0
  81. package/dist/core/compact.d.ts +23 -0
  82. package/dist/core/compact.d.ts.map +1 -0
  83. package/dist/core/compact.js +56 -0
  84. package/dist/core/compact.js.map +1 -0
  85. package/dist/core/config-manager.d.ts +13 -0
  86. package/dist/core/config-manager.d.ts.map +1 -0
  87. package/dist/core/config-manager.js +86 -0
  88. package/dist/core/config-manager.js.map +1 -0
  89. package/dist/core/context-manager.d.ts +17 -0
  90. package/dist/core/context-manager.d.ts.map +1 -0
  91. package/dist/core/context-manager.js +51 -0
  92. package/dist/core/context-manager.js.map +1 -0
  93. package/dist/core/eval-runner.d.ts +110 -0
  94. package/dist/core/eval-runner.d.ts.map +1 -0
  95. package/dist/core/eval-runner.js +177 -0
  96. package/dist/core/eval-runner.js.map +1 -0
  97. package/dist/core/index.d.ts +12 -0
  98. package/dist/core/index.d.ts.map +1 -0
  99. package/dist/core/index.js +11 -0
  100. package/dist/core/index.js.map +1 -0
  101. package/dist/core/interfaces.d.ts +179 -0
  102. package/dist/core/interfaces.d.ts.map +1 -0
  103. package/dist/core/interfaces.js +6 -0
  104. package/dist/core/interfaces.js.map +1 -0
  105. package/dist/core/mcp-client-manager.d.ts +32 -0
  106. package/dist/core/mcp-client-manager.d.ts.map +1 -0
  107. package/dist/core/mcp-client-manager.js +118 -0
  108. package/dist/core/mcp-client-manager.js.map +1 -0
  109. package/dist/core/mcp-config-manager.d.ts +40 -0
  110. package/dist/core/mcp-config-manager.d.ts.map +1 -0
  111. package/dist/core/mcp-config-manager.js +120 -0
  112. package/dist/core/mcp-config-manager.js.map +1 -0
  113. package/dist/core/session-manager.d.ts +16 -0
  114. package/dist/core/session-manager.d.ts.map +1 -0
  115. package/dist/core/session-manager.js +87 -0
  116. package/dist/core/session-manager.js.map +1 -0
  117. package/dist/core/skills-manager.d.ts +49 -0
  118. package/dist/core/skills-manager.d.ts.map +1 -0
  119. package/dist/core/skills-manager.js +175 -0
  120. package/dist/core/skills-manager.js.map +1 -0
  121. package/dist/core/sub-agent.d.ts +23 -0
  122. package/dist/core/sub-agent.d.ts.map +1 -0
  123. package/dist/core/sub-agent.js +98 -0
  124. package/dist/core/sub-agent.js.map +1 -0
  125. package/dist/index.d.ts +11 -0
  126. package/dist/index.d.ts.map +1 -0
  127. package/dist/index.js +230 -0
  128. package/dist/index.js.map +1 -0
  129. package/dist/tui/index.d.ts +2 -0
  130. package/dist/tui/index.d.ts.map +1 -0
  131. package/dist/tui/index.js +3 -0
  132. package/dist/tui/index.js.map +1 -0
  133. package/dist/tui/renderer.d.ts +32 -0
  134. package/dist/tui/renderer.d.ts.map +1 -0
  135. package/dist/tui/renderer.js +225 -0
  136. package/dist/tui/renderer.js.map +1 -0
  137. package/dist/utils/index.d.ts +5 -0
  138. package/dist/utils/index.d.ts.map +1 -0
  139. package/dist/utils/index.js +6 -0
  140. package/dist/utils/index.js.map +1 -0
  141. package/dist/utils/logger.d.ts +19 -0
  142. package/dist/utils/logger.d.ts.map +1 -0
  143. package/dist/utils/logger.js +37 -0
  144. package/dist/utils/logger.js.map +1 -0
  145. package/dist/utils/logo.d.ts +24 -0
  146. package/dist/utils/logo.d.ts.map +1 -0
  147. package/dist/utils/logo.js +119 -0
  148. package/dist/utils/logo.js.map +1 -0
  149. package/dist/utils/message-formatter.d.ts +19 -0
  150. package/dist/utils/message-formatter.d.ts.map +1 -0
  151. package/dist/utils/message-formatter.js +79 -0
  152. package/dist/utils/message-formatter.js.map +1 -0
  153. package/dist/utils/syntax-highlighter.d.ts +6 -0
  154. package/dist/utils/syntax-highlighter.d.ts.map +1 -0
  155. package/dist/utils/syntax-highlighter.js +34 -0
  156. package/dist/utils/syntax-highlighter.js.map +1 -0
  157. package/mcp/coder/dist/server.d.ts +15 -0
  158. package/mcp/coder/dist/server.d.ts.map +1 -0
  159. package/mcp/coder/dist/server.js +421 -0
  160. package/mcp/coder/dist/server.js.map +1 -0
  161. package/mcp/planner/dist/server.d.ts +23 -0
  162. package/mcp/planner/dist/server.d.ts.map +1 -0
  163. package/mcp/planner/dist/server.js +373 -0
  164. package/mcp/planner/dist/server.js.map +1 -0
  165. package/package.json +95 -0
package/.env.defaults ADDED
@@ -0,0 +1,100 @@
1
+ # ─────────────────────────────────────────────────────────────────────────────
2
+ # automatey — default configuration
3
+ #
4
+ # Copy relevant lines to .env.secret (or set as real env vars) to override.
5
+ # These values are also readable by the ConfigManager as JSON at:
6
+ # ~/.automatey/config.json
7
+ #
8
+ # See .env.secret.example for provider API key examples.
9
+ # ─────────────────────────────────────────────────────────────────────────────
10
+
11
+
12
+ # ── LLM Provider ─────────────────────────────────────────────────────────────
13
+ # Which provider to use: nemotron | openai | anthropic | perplexity
14
+ LLM_PROVIDER=nemotron
15
+
16
+ # The model identifier passed to the API
17
+ LLM_MODEL=nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4
18
+
19
+ # Comma-separated list of selectable models (shown in /model command)
20
+ # LLM_MODELS=nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4,gpt-4o,claude-opus-4-5
21
+
22
+ # API key — required for openai / anthropic / perplexity; optional for nemotron (local)
23
+ # LLM_API_KEY=sk-...
24
+ #
25
+ # Provider-specific API keys (read from environment or .env.secret):
26
+ # OPENAI_API_KEY=sk-...
27
+ # ANTHROPIC_API_KEY=sk-ant-...
28
+ # PERPLEXITY_API_KEY=pplx-...
29
+
30
+
31
+ # ── nemotron / OpenAI-compatible endpoint ────────────────────────────────────
32
+ # Base URL for the vLLM / OpenAI-compatible server (nemotron provider only)
33
+ LLM_BASE_URL=http://localhost:8002
34
+
35
+
36
+ # ── Token budgets ─────────────────────────────────────────────────────────────
37
+ # Context window size (characters are estimated at 4 chars/token)
38
+ # The context manager trims oldest messages when this is exceeded.
39
+ LLM_MAX_TOKENS=200000
40
+
41
+ # Per-call output token limit when thinking is OFF
42
+ LLM_MAX_OUTPUT_TOKENS=4096
43
+
44
+ # Per-call output token limit when thinking is ON (must be > LLM_THINKING_BUDGET)
45
+ # Effective limit = max(LLM_MAX_OUTPUT_TOKENS_THINK, LLM_THINKING_BUDGET + 4096)
46
+ LLM_MAX_OUTPUT_TOKENS_THINK=8192
47
+
48
+ # Token budget for chain-of-thought reasoning (thinking=true)
49
+ LLM_THINKING_BUDGET=4096
50
+
51
+
52
+ # ── Sampling ──────────────────────────────────────────────────────────────────
53
+ TEMPERATURE=0.1
54
+
55
+
56
+ # ── Agentic loop ──────────────────────────────────────────────────────────────
57
+ # Maximum number of tool-call rounds before aborting with a warning
58
+ AGENT_MAX_TOOL_ROUNDS=20
59
+
60
+ # Maximum empty-response retries before giving up
61
+ AGENT_MAX_EMPTY_RETRIES=2
62
+
63
+ # Context usage ratio (0.0–1.0) that triggers auto-compact summarization.
64
+ # Set to 1.0 to disable auto-compact.
65
+ AGENT_COMPACT_THRESHOLD=0.8
66
+
67
+
68
+ # ── Sessions ──────────────────────────────────────────────────────────────────
69
+ # Directory for saved sessions (default: ~/.automatey/sessions)
70
+ # SESSION_DIR=/path/to/sessions
71
+
72
+
73
+ # ── MCP ───────────────────────────────────────────────────────────────────────
74
+ # Full MCP server config used by the activate_tools mechanism.
75
+ # Default: ~/.automatey/mcp.json
76
+ # MCP_CONFIG=/path/to/mcp.json
77
+
78
+ # Startup-subset config (servers to connect at launch; rest deferred).
79
+ # Search order: {cwd}/.automatey/mcp.chat.json → ~/.automatey/mcp.chat.json
80
+ # MCP_CHAT_CONFIG=/path/to/mcp.chat.json
81
+
82
+
83
+ # ── UI ────────────────────────────────────────────────────────────────────────
84
+ # Show token count in prompt: true | false
85
+ UI_SHOW_TOKENS=true
86
+
87
+ # Syntax-highlight code blocks in responses: true | false
88
+ UI_SYNTAX_HIGHLIGHT=true
89
+
90
+
91
+ # ── Eval / LLM-as-judge ───────────────────────────────────────────────────────
92
+ # Provider for the LLM judge used in /eval runs.
93
+ # Supported: openai | anthropic | perplexity
94
+ # EVAL_JUDGE_PROVIDER=perplexity
95
+
96
+ # Model identifier for the judge (must belong to EVAL_JUDGE_PROVIDER)
97
+ # EVAL_JUDGE_MODEL=sonar-pro
98
+
99
+ # When to invoke the judge: always | on-failure | never
100
+ # EVAL_JUDGE_MODE=on-failure
@@ -0,0 +1,42 @@
1
+ # ─────────────────────────────────────────────────────────────────────────────
2
+ # automatey — secret keys example
3
+ #
4
+ # Copy this file to .env.secret and fill in your actual API keys.
5
+ # .env.secret is gitignored and will never be committed.
6
+ #
7
+ # Usage: the agent loads .env.defaults first, then .env.secret overrides.
8
+ # ─────────────────────────────────────────────────────────────────────────────
9
+
10
+
11
+ # ── OpenAI ───────────────────────────────────────────────────────────────────
12
+ # Required when LLM_PROVIDER=openai
13
+ # Get yours at: https://platform.openai.com/api-keys
14
+ OPENAI_API_KEY=sk-...
15
+
16
+
17
+ # ── Anthropic ────────────────────────────────────────────────────────────────
18
+ # Required when LLM_PROVIDER=anthropic
19
+ # Get yours at: https://console.anthropic.com/settings/keys
20
+ ANTHROPIC_API_KEY=sk-ant-...
21
+
22
+
23
+ # ── Perplexity ───────────────────────────────────────────────────────────────
24
+ # Required when LLM_PROVIDER=perplexity OR EVAL_JUDGE_PROVIDER=perplexity
25
+ # Get yours at: https://www.perplexity.ai/settings/api
26
+ PERPLEXITY_API_KEY=pplx-...
27
+
28
+
29
+ # ── Brave Search ─────────────────────────────────────────────────────────────
30
+ # Required for the brave-search MCP server tool
31
+ # Get yours at: https://api.search.brave.com/app/keys
32
+ BRAVE_API_KEY=BSA...
33
+
34
+
35
+ # ── LLM-as-judge (eval) ───────────────────────────────────────────────────────
36
+ # Optional — enables automatic verdict generation in /eval runs.
37
+ # EVAL_JUDGE_PROVIDER can be: openai | anthropic | perplexity
38
+ # EVAL_JUDGE_MODE can be: always | on-failure | never
39
+ #
40
+ # EVAL_JUDGE_PROVIDER=perplexity
41
+ # EVAL_JUDGE_MODEL=sonar-pro
42
+ # EVAL_JUDGE_MODE=on-failure
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Max Golov
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,258 @@
1
+ <p align="center">
2
+ <img src="extra/logo/automatey-term+scanlines.svg" alt="Automatey" width="220" />
3
+ </p>
4
+
5
+ <h1 align="center">⚓ Automatey</h1>
6
+ <p align="center"><em>Yer lean &amp; mean agentic helper — no fluff, all action, a touch o' pirate.</em></p>
7
+
8
+ <p align="center">
9
+ <img alt="tests" src="https://img.shields.io/badge/tests-124%20passing-brightgreen?style=flat-square">
10
+ <img alt="node" src="https://img.shields.io/badge/node-%3E%3D20-blue?style=flat-square">
11
+ <img alt="license" src="https://img.shields.io/badge/license-MIT-blue?style=flat-square">
12
+ </p>
13
+
14
+ ---
15
+
16
+ **Automatey** is a minimal, MCP-powered CLI agent that lets any LLM wield real tools — file ops, shell commands, web search, memory, planning — through a clean ReAct loop. No bloat. No framework lock-in. Just a sharp hook and a fast ship.
17
+
18
+ ```
19
+ ══════════════════════════════════════════════════════════════════════
20
+ 🤖 automatey — lean & mean agent
21
+ ══════════════════════════════════════════════════════════════════════
22
+ Provider: nemotron
23
+ Model: nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-NVFP4
24
+ Session: session-2026-03-12
25
+ Sandbox: /workspace/my-project/sandbox
26
+
27
+ Type /help for commands | Ctrl+C to exit
28
+ Logs: tail -f ~/.automatey/logs/agent.log
29
+ ══════════════════════════════════════════════════════════════════════
30
+ ```
31
+
32
+
33
+ ## Features
34
+
35
+ - **Providers**: Nemotron (vLLM/OpenAI-compatible), OpenAI, Anthropic
36
+ - **MCP tools**: Any stdio or HTTP MCP server; auto-loaded from `./mcp.json` or `~/.automatey/mcp.json`
37
+ - **ReAct loop**: Up to 20 tool-call rounds per message (configurable)
38
+ - **Chain-of-thought**: `/think` toggle for Nemotron / Anthropic reasoning tokens
39
+ - **Sandbox**: Isolated directory for agent file I/O and code execution (`./sandbox` default, gitignored)
40
+ - **Sessions**: Save/load conversation sessions in `~/.automatey/sessions/`
41
+ - **Checkpoints**: `/checkpoint` — full conversation snapshots with BM25 keyword search
42
+ - **Auto-compact**: LLM summarises older context when usage ≥ 80%
43
+ - **Skills**: Progressive SKILL.md loading from `.agents/skills/`
44
+ - **Planner MCP**: Bundled `mcp/planner` — todos + plans
45
+ - **Coder MCP**: Bundled `mcp/coder` — read/write/edit files, run commands, glob files
46
+
47
+ ## Quick Start
48
+
49
+ ```bash
50
+ git clone https://github.com/maxgolov/automatey.git
51
+ cd automatey
52
+ npm install
53
+ cp mcp.example.json mcp.json # edit to add your API keys
54
+ npm run build
55
+ node dist/index.js chat
56
+ ```
57
+
58
+ Override provider, model, and sandbox:
59
+
60
+ ```bash
61
+ node dist/index.js chat --provider openai --model gpt-4o --sandbox ./my-sandbox
62
+ ```
63
+
64
+ ## CLI Installation
65
+
66
+ ### Option A — `npm link` (recommended)
67
+
68
+ ```bash
69
+ npm run build
70
+ npm run link:cli # registers "automatey" globally via symlink
71
+ ```
72
+
73
+ After linking:
74
+ ```bash
75
+ automatey chat
76
+ automatey --help
77
+ ```
78
+
79
+ Unlink: `npm run unlink:cli`
80
+
81
+ ### Option B — Shell alias
82
+
83
+ ```bash
84
+ alias automatey="node /workspace/simple-agent/dist/index.js"
85
+ ```
86
+
87
+ > JSON / JSONC in `mcp.json`: Standard JSON does not allow `//` comments.
88
+ > The agent uses a JSONC parser — `//` line comments and `/* */` block comments
89
+ > are fully supported in all `mcp.json` files.
90
+
91
+ ## MCP Config — `mcp.json`
92
+
93
+ The agent looks for MCP config in this order:
94
+ 1. `./mcp.json` (current working directory / project root)
95
+ 2. `~/.automatey/mcp.json` (global fallback)
96
+
97
+ Copy `mcp.example.json` from this repo as your starting point:
98
+
99
+ ```bash
100
+ cp mcp.example.json mcp.json # project-local
101
+ # OR
102
+ cp mcp.example.json ~/.automatey/mcp.json # global
103
+ ```
104
+
105
+ Example `mcp.json`:
106
+
107
+ ```jsonc
108
+ {
109
+ // JSONC is supported — '//' and '/* */' comments are stripped before parsing
110
+ "mcpServers": {
111
+ "planner": {
112
+ "type": "stdio",
113
+ "command": "node",
114
+ "args": ["./mcp/planner/dist/server.js"]
115
+ },
116
+ "brave-search": {
117
+ "type": "stdio",
118
+ "command": "npx",
119
+ "args": ["-y", "@brave/brave-search-mcp-server", "--transport", "stdio"],
120
+ "env": { "BRAVE_API_KEY": "${env:BRAVE_API_KEY}" },
121
+ "requiresEnv": "BRAVE_API_KEY"
122
+ },
123
+ "memento": {
124
+ "type": "http",
125
+ "url": "http://localhost:3500/mcp",
126
+ "portCheck": true
127
+ }
128
+ }
129
+ }
130
+ ```
131
+
132
+ Conditional loading:
133
+ - `requiresEnv` — skip server if env var is missing (no key = no server, no error)
134
+ - `portCheck: true` — skip HTTP/SSE server if the URL is unreachable at startup (1 s probe)
135
+
136
+ ## Config
137
+
138
+ Config lives in `~/.automatey/config.json` (auto-created on first run):
139
+
140
+ ```json
141
+ {
142
+ "provider": "nemotron",
143
+ "llm": {
144
+ "baseUrl": "http://localhost:8002",
145
+ "model": "nvidia/Llama-3.1-Nemotron-Nano-8B-v1"
146
+ }
147
+ }
148
+ ```
149
+
150
+ Environment variables (copy `.env.defaults` → `.env` to override):
151
+
152
+ | Variable | Default | Description |
153
+ |---|---|---|
154
+ | `LLM_PROVIDER` | `nemotron` | `nemotron` \| `openai` \| `anthropic` |
155
+ | `LLM_MODEL` | Nemotron NVFP4 | Model ID |
156
+ | `LLM_BASE_URL` | `http://localhost:8002` | vLLM endpoint |
157
+ | `TEMPERATURE` | `0.1` | Sampling temperature |
158
+ | `AGENT_MAX_TOOL_ROUNDS` | `20` | Max ReAct rounds |
159
+ | `AGENT_COMPACT_THRESHOLD` | `0.8` | Auto-compact at 80% context fill |
160
+
161
+ ## Commands
162
+
163
+ | Command | Description |
164
+ |---------|-------------|
165
+ | `/help` | Show all commands |
166
+ | `/model` | List / switch model |
167
+ | `/think [on\|off\|budget N]` | Toggle CoT reasoning |
168
+ | `/save [name]` | Save session |
169
+ | `/load [name]` | Load session |
170
+ | `/servers` | Manage MCP connections |
171
+ | `/config` | Show config |
172
+ | `/cost` | Show estimated token usage |
173
+ | `/compact` | Manually compact context via LLM summarization |
174
+ | `/checkpoint [save\|list\|restore N\|search q\|delete N]` | Manage checkpoints |
175
+ | `/clear` | Clear context |
176
+ | `/exit` | Quit |
177
+
178
+ ## Context Management
179
+
180
+ ### Auto-compact
181
+ When estimated context usage reaches `AGENT_COMPACT_THRESHOLD` (default 80%), the older portion of the conversation is automatically summarized by the LLM and replaced with a concise summary message. This keeps the token count manageable without discarding knowledge.
182
+
183
+ Disable per-session: the `/compact` command can be used to trigger compaction manually at any time.
184
+
185
+ ### Checkpoints
186
+
187
+ Checkpoints are full conversation snapshots saved to `~/.automatey/checkpoints/` as JSON:
188
+
189
+ ```
190
+ /checkpoint save # save current conversation
191
+ /checkpoint list # list checkpoints (newest first)
192
+ /checkpoint restore 2 # restore checkpoint #2 into context
193
+ /checkpoint search "bm25" # BM25 keyword search across all checkpoints
194
+ /checkpoint delete 3 # delete checkpoint #3
195
+ ```
196
+
197
+ The BM25 search indexes the full message history of every checkpoint and ranks them by keyword relevance.
198
+
199
+ ## Built-in MCP Servers
200
+
201
+ ### 🗂 Coder (`mcp/coder`)
202
+ | Tool | What it does |
203
+ |------|-------------|
204
+ | `read_file` | Read file contents with optional line range |
205
+ | `write_file` | Write / create a file |
206
+ | `edit_file` | Replace an exact string in a file |
207
+ | `execute_command` | Run a shell command (default cwd: sandbox) |
208
+ | `search_text` | Grep-style text search |
209
+ | `list_dir` | List directory contents |
210
+ | `glob_files` | Find files by glob pattern (`**/*.ts`, `src/**`) |
211
+
212
+ ### 📋 Planner (`mcp/planner`)
213
+ Todos and multi-step plans persisted to `~/.automatey/planner/`.
214
+
215
+ ## Development
216
+
217
+ ```bash
218
+ npm run dev # tsx watch (no build needed)
219
+ npm run build # tsc + build all MCP servers
220
+ npm test # 124 tests (Vitest)
221
+ npm run test:watch # watch mode
222
+ ```
223
+
224
+ **VS Code tasks** (Ctrl+Shift+B / Ctrl+Shift+P → Run Task):
225
+ - `Build: All`
226
+ - `Run: CLI (automatey)` — builds first, then launches
227
+ - `Run: CLI (dev, no build)` — tsx, faster iteration
228
+ - `Test: All`
229
+ - `Test: Hello World (verbose)`
230
+
231
+ ## Tests
232
+
233
+ ```
234
+ Tests 124 passed
235
+ ├── unit/ command-parser, context-manager, config, session,
236
+ │ mcp-config, llm-client, skills, coder-server
237
+ └── integration/ openai, anthropic, nemotron, planner,
238
+ coder-hello-world (all 3 providers)
239
+ ```
240
+
241
+ The `coder-hello-world` integration tests drive a full ReAct loop per provider —
242
+ LLM writes `index.js`, executes it, output is verified. Results live in `sandbox/<provider>/`:
243
+
244
+ ```
245
+ [openai] execute_command output: Hello, World!
246
+ [anthropic] execute_command output: Hello, World!
247
+ [nemotron] execute_command output: Hello, World!
248
+ ```
249
+
250
+ ## Artwork
251
+
252
+ Logos in `extra/logo/` are from the [Automatey](https://github.com/top-5/automatey) terminal project,
253
+ licensed **CC BY 4.0** — Copyright © 2024–2025 Top-5 And Contributors.
254
+ Used here with attribution as permitted by the license.
255
+
256
+ ## License
257
+
258
+ MIT — see [LICENSE](LICENSE).
@@ -0,0 +1,3 @@
1
+ export { LLMClient } from './llm-client.js';
2
+ export type { LLMClientConfig } from './llm-client.js';
3
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/adapters/index.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AAC5C,YAAY,EAAE,eAAe,EAAE,MAAM,iBAAiB,CAAC"}
@@ -0,0 +1,3 @@
1
+ // Adapters barrel
2
+ export { LLMClient } from './llm-client.js';
3
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/adapters/index.ts"],"names":[],"mappings":"AAAA,kBAAkB;AAClB,OAAO,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC"}
@@ -0,0 +1,59 @@
1
+ /**
2
+ * LLM-as-Judge — uses generateObject() with a structured Zod schema to score
3
+ * eval responses with a second LLM.
4
+ *
5
+ * Env vars:
6
+ * EVAL_JUDGE_PROVIDER — perplexity | openai | anthropic | nemotron
7
+ * EVAL_JUDGE_MODEL — model name (defaults per provider below)
8
+ * EVAL_JUDGE_MODE — on-failure (default) | always
9
+ *
10
+ * Default models per provider:
11
+ * perplexity → sonar-pro (web-grounded, includes citations)
12
+ * openai → gpt-4o-mini
13
+ * anthropic → claude-haiku-4-5
14
+ * nemotron → (same as primary)
15
+ */
16
+ import { z } from 'zod';
17
+ import type { LLMProvider } from '#core/interfaces';
18
+ export declare const JudgeVerdictSchema: z.ZodObject<{
19
+ verdict: z.ZodEnum<["correct", "incorrect", "uncertain"]>;
20
+ reasoning: z.ZodString;
21
+ confidence: z.ZodNumber;
22
+ }, "strip", z.ZodTypeAny, {
23
+ reasoning: string;
24
+ verdict: "correct" | "incorrect" | "uncertain";
25
+ confidence: number;
26
+ }, {
27
+ reasoning: string;
28
+ verdict: "correct" | "incorrect" | "uncertain";
29
+ confidence: number;
30
+ }>;
31
+ export type JudgeVerdict = z.infer<typeof JudgeVerdictSchema> & {
32
+ /** Cited URLs from Perplexity responses (when EVAL_JUDGE_PROVIDER=perplexity). */
33
+ sources?: string[];
34
+ };
35
+ export type JudgeMode = 'never' | 'on-failure' | 'always';
36
+ export interface JudgeConfig {
37
+ provider: LLMProvider;
38
+ model: string;
39
+ mode: JudgeMode;
40
+ /** Base URL (needed for nemotron/openai-compatible). */
41
+ baseUrl?: string;
42
+ /** API key override — falls back to env-var per provider. */
43
+ apiKey?: string;
44
+ }
45
+ /**
46
+ * Build a JudgeConfig from environment variables.
47
+ * Returns null when EVAL_JUDGE_PROVIDER is not set (judge disabled).
48
+ */
49
+ export declare function judgeConfigFromEnv(overrideBaseUrl?: string): JudgeConfig | null;
50
+ /**
51
+ * Judge a single eval result.
52
+ * @param config Judge configuration (provider, model, mode).
53
+ * @param question The original question asked.
54
+ * @param expectedAnswer The expected/reference answer.
55
+ * @param actualAnswer The actual response from the primary LLM.
56
+ * @returns JudgeVerdict (includes `sources[]` for Perplexity judges).
57
+ */
58
+ export declare function judgeResult(config: JudgeConfig, question: string, expectedAnswer: string, actualAnswer: string): Promise<JudgeVerdict>;
59
+ //# sourceMappingURL=judge-client.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"judge-client.d.ts","sourceRoot":"","sources":["../../src/adapters/judge-client.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;GAcG;AAOH,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AACxB,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,kBAAkB,CAAC;AAIpD,eAAO,MAAM,kBAAkB;;;;;;;;;;;;EAI7B,CAAC;AAEH,MAAM,MAAM,YAAY,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,kBAAkB,CAAC,GAAG;IAC9D,kFAAkF;IAClF,OAAO,CAAC,EAAE,MAAM,EAAE,CAAC;CACpB,CAAC;AAEF,MAAM,MAAM,SAAS,GAAG,OAAO,GAAG,YAAY,GAAG,QAAQ,CAAC;AAE1D,MAAM,WAAW,WAAW;IAC1B,QAAQ,EAAE,WAAW,CAAC;IACtB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,SAAS,CAAC;IAChB,wDAAwD;IACxD,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,6DAA6D;IAC7D,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB;AAaD;;;GAGG;AACH,wBAAgB,kBAAkB,CAAC,eAAe,CAAC,EAAE,MAAM,GAAG,WAAW,GAAG,IAAI,CA0B/E;AAyCD;;;;;;;GAOG;AACH,wBAAsB,WAAW,CAC/B,MAAM,EAAE,WAAW,EACnB,QAAQ,EAAE,MAAM,EAChB,cAAc,EAAE,MAAM,EACtB,YAAY,EAAE,MAAM,GACnB,OAAO,CAAC,YAAY,CAAC,CA4BvB"}
@@ -0,0 +1,123 @@
1
+ /**
2
+ * LLM-as-Judge — uses generateObject() with a structured Zod schema to score
3
+ * eval responses with a second LLM.
4
+ *
5
+ * Env vars:
6
+ * EVAL_JUDGE_PROVIDER — perplexity | openai | anthropic | nemotron
7
+ * EVAL_JUDGE_MODEL — model name (defaults per provider below)
8
+ * EVAL_JUDGE_MODE — on-failure (default) | always
9
+ *
10
+ * Default models per provider:
11
+ * perplexity → sonar-pro (web-grounded, includes citations)
12
+ * openai → gpt-4o-mini
13
+ * anthropic → claude-haiku-4-5
14
+ * nemotron → (same as primary)
15
+ */
16
+ import { createAnthropic } from '@ai-sdk/anthropic';
17
+ import { createOpenAI } from '@ai-sdk/openai';
18
+ import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
19
+ import { createPerplexity } from '@ai-sdk/perplexity';
20
+ import { generateObject } from 'ai';
21
+ import { z } from 'zod';
22
+ // ── Types ─────────────────────────────────────────────────────────────────────
23
+ export const JudgeVerdictSchema = z.object({
24
+ verdict: z.enum(['correct', 'incorrect', 'uncertain']),
25
+ reasoning: z.string(),
26
+ confidence: z.number().min(0).max(1),
27
+ });
28
+ // ── Defaults ──────────────────────────────────────────────────────────────────
29
+ const DEFAULT_MODELS = {
30
+ perplexity: 'sonar-pro',
31
+ openai: 'gpt-4o-mini',
32
+ anthropic: 'claude-haiku-4-5',
33
+ nemotron: 'nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16',
34
+ };
35
+ // ── Config factory ────────────────────────────────────────────────────────────
36
+ /**
37
+ * Build a JudgeConfig from environment variables.
38
+ * Returns null when EVAL_JUDGE_PROVIDER is not set (judge disabled).
39
+ */
40
+ export function judgeConfigFromEnv(overrideBaseUrl) {
41
+ const provider = process.env.EVAL_JUDGE_PROVIDER;
42
+ if (!provider)
43
+ return null;
44
+ const validProviders = ['perplexity', 'openai', 'anthropic', 'nemotron'];
45
+ if (!validProviders.includes(provider)) {
46
+ throw new Error(`EVAL_JUDGE_PROVIDER="${provider}" is not valid. Choose from: ${validProviders.join(', ')}`);
47
+ }
48
+ const model = process.env.EVAL_JUDGE_MODEL ?? DEFAULT_MODELS[provider];
49
+ const rawMode = process.env.EVAL_JUDGE_MODE ?? 'on-failure';
50
+ const validModes = ['never', 'on-failure', 'always'];
51
+ if (!validModes.includes(rawMode)) {
52
+ throw new Error(`EVAL_JUDGE_MODE="${rawMode}" is not valid. Choose from: ${validModes.join(', ')}`);
53
+ }
54
+ return {
55
+ provider,
56
+ model,
57
+ mode: rawMode,
58
+ baseUrl: process.env.LLM_BASE_URL ?? overrideBaseUrl,
59
+ };
60
+ }
61
+ // ── Model builder ─────────────────────────────────────────────────────────────
62
+ function buildJudgeModel(config) {
63
+ switch (config.provider) {
64
+ case 'perplexity':
65
+ return createPerplexity({
66
+ apiKey: config.apiKey ?? process.env.PERPLEXITY_API_KEY ?? '',
67
+ })(config.model);
68
+ case 'openai':
69
+ return createOpenAI({
70
+ apiKey: config.apiKey ?? process.env.OPENAI_API_KEY ?? '',
71
+ })(config.model);
72
+ case 'anthropic':
73
+ return createAnthropic({
74
+ apiKey: config.apiKey ?? process.env.ANTHROPIC_API_KEY ?? '',
75
+ })(config.model);
76
+ case 'nemotron': {
77
+ const baseUrl = (config.baseUrl ?? 'http://localhost:8002').replace(/\/$/, '');
78
+ const provider = createOpenAICompatible({
79
+ name: 'vllm-judge',
80
+ baseURL: `${baseUrl}/v1`,
81
+ ...(config.apiKey ? { headers: { Authorization: `Bearer ${config.apiKey}` } } : {}),
82
+ });
83
+ return provider(config.model);
84
+ }
85
+ }
86
+ }
87
+ // ── Judge function ────────────────────────────────────────────────────────────
88
+ const JUDGE_SYSTEM_PROMPT = 'You are an impartial evaluator judging whether an AI assistant answered a question correctly. ' +
89
+ 'Analyze the expected answer and the actual response. ' +
90
+ 'Return a JSON object with: verdict ("correct" / "incorrect" / "uncertain"), ' +
91
+ 'a brief reasoning string, and a confidence score between 0 and 1.';
92
+ /**
93
+ * Judge a single eval result.
94
+ * @param config Judge configuration (provider, model, mode).
95
+ * @param question The original question asked.
96
+ * @param expectedAnswer The expected/reference answer.
97
+ * @param actualAnswer The actual response from the primary LLM.
98
+ * @returns JudgeVerdict (includes `sources[]` for Perplexity judges).
99
+ */
100
+ export async function judgeResult(config, question, expectedAnswer, actualAnswer) {
101
+ const model = buildJudgeModel(config);
102
+ const prompt = `Question: ${question}\n\n` +
103
+ `Expected answer: ${expectedAnswer}\n\n` +
104
+ `Actual response: ${actualAnswer}`;
105
+ const result = await generateObject({
106
+ model,
107
+ schema: JudgeVerdictSchema,
108
+ system: JUDGE_SYSTEM_PROMPT,
109
+ prompt,
110
+ maxRetries: 0,
111
+ });
112
+ const verdict = { ...result.object };
113
+ // Perplexity includes web citations — surface them in the verdict.
114
+ const meta = result.experimental_providerMetadata;
115
+ const sources = meta?.perplexity?.sources
116
+ ?.map((s) => s?.url)
117
+ .filter((u) => typeof u === 'string');
118
+ if (sources && sources.length > 0) {
119
+ verdict.sources = sources;
120
+ }
121
+ return verdict;
122
+ }
123
+ //# sourceMappingURL=judge-client.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"judge-client.js","sourceRoot":"","sources":["../../src/adapters/judge-client.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;GAcG;AAEH,OAAO,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AACpD,OAAO,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAC9C,OAAO,EAAE,sBAAsB,EAAE,MAAM,2BAA2B,CAAC;AACnE,OAAO,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAC;AACtD,OAAO,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AACpC,OAAO,EAAE,CAAC,EAAE,MAAM,KAAK,CAAC;AAGxB,iFAAiF;AAEjF,MAAM,CAAC,MAAM,kBAAkB,GAAG,CAAC,CAAC,MAAM,CAAC;IACzC,OAAO,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,SAAS,EAAE,WAAW,EAAE,WAAW,CAAC,CAAC;IACtD,SAAS,EAAE,CAAC,CAAC,MAAM,EAAE;IACrB,UAAU,EAAE,CAAC,CAAC,MAAM,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC;CACrC,CAAC,CAAC;AAmBH,iFAAiF;AAEjF,MAAM,cAAc,GAAgC;IAClD,UAAU,EAAE,WAAW;IACvB,MAAM,EAAE,aAAa;IACrB,SAAS,EAAE,kBAAkB;IAC7B,QAAQ,EAAE,4CAA4C;CACvD,CAAC;AAEF,iFAAiF;AAEjF;;;GAGG;AACH,MAAM,UAAU,kBAAkB,CAAC,eAAwB;IACzD,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,CAAC,mBAA8C,CAAC;IAC5E,IAAI,CAAC,QAAQ;QAAE,OAAO,IAAI,CAAC;IAE3B,MAAM,cAAc,GAAkB,CAAC,YAAY,EAAE,QAAQ,EAAE,WAAW,EAAE,UAAU,CAAC,CAAC;IACxF,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC;QACvC,MAAM,IAAI,KAAK,CACb,wBAAwB,QAAQ,gCAAgC,cAAc,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAC5F,CAAC;IACJ,CAAC;IAED,MAAM,KAAK,GAAG,OAAO,CAAC,GAAG,CAAC,gBAAgB,IAAI,cAAc,CAAC,QAAQ,CAAC,CAAC;IACvE,MAAM,OAAO,GAAG,OAAO,CAAC,GAAG,CAAC,eAAe,IAAI,YAAY,CAAC;IAC5D,MAAM,UAAU,GAAgB,CAAC,OAAO,EAAE,YAAY,EAAE,QAAQ,CAAC,CAAC;IAClE,IAAI,CAAC,UAAU,CAAC,QAAQ,CAAC,OAAoB,CAAC,EAAE,CAAC;QAC/C,MAAM,IAAI,KAAK,CACb,oBAAoB,OAAO,gCAAgC,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CACnF,CAAC;IACJ,CAAC;IAED,OAAO;QACL,QAAQ;QACR,KAAK;QACL,IAAI,EAAE,OAAoB;QAC1B,OAAO,EAAE,OAAO,CAAC,GAAG,CAAC,YAAY,IAAI,eAAe;KACrD,CAAC;AACJ,CAAC;AAED,iFAAiF;AAEjF,SAAS,eAAe,CAAC,MAAmB;IAC1C,QAAQ,MAAM,CAAC,QAAQ,EAAE,CAAC;QACxB,KAAK,YAAY;YACf,OAAO,gBAAgB,CAAC;gBACtB,MAAM,EAAE,MAAM,CAAC,MAAM,IAAI,OAAO,CAAC,GAAG,CAAC,kBAAkB,IAAI,EAAE;aAC9D,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QAEnB,KAAK,QAAQ;YACX,OAAO,YAAY,CAAC;gBAClB,MAAM,EAAE,MAAM,CAAC,MAAM,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,EAAE;aAC1D,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QAEnB,KAAK,WAAW;YACd,OAAO,eAAe,CAAC;gBACrB,MAAM,EAAE,MAAM,CAAC,MAAM,IAAI,OAAO,CAAC,GAAG,CAAC,iBAAiB,IAAI,EAAE;aAC7D,CAAC,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QAEnB,KAAK,UAAU,CAAC,CAAC,CAAC;YAChB,MAAM,OAAO,GAAG,CAAC,MAAM,CAAC,OAAO,IAAI,uBAAuB,CAAC,CAAC,OAAO,CAAC,KAAK,EAAE,EAAE,CAAC,CAAC;YAC/E,MAAM,QAAQ,GAAG,sBAAsB,CAAC;gBACtC,IAAI,EAAE,YAAY;gBAClB,OAAO,EAAE,GAAG,OAAO,KAAK;gBACxB,GAAG,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,EAAE,aAAa,EAAE,UAAU,MAAM,CAAC,MAAM,EAAE,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;aACpF,CAAC,CAAC;YACH,OAAO,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC;QAChC,CAAC;IACH,CAAC;AACH,CAAC;AAED,iFAAiF;AAEjF,MAAM,mBAAmB,GACvB,gGAAgG;IAChG,uDAAuD;IACvD,8EAA8E;IAC9E,mEAAmE,CAAC;AAEtE;;;;;;;GAOG;AACH,MAAM,CAAC,KAAK,UAAU,WAAW,CAC/B,MAAmB,EACnB,QAAgB,EAChB,cAAsB,EACtB,YAAoB;IAEpB,MAAM,KAAK,GAAG,eAAe,CAAC,MAAM,CAAC,CAAC;IAEtC,MAAM,MAAM,GACV,aAAa,QAAQ,MAAM;QAC3B,oBAAoB,cAAc,MAAM;QACxC,oBAAoB,YAAY,EAAE,CAAC;IAErC,MAAM,MAAM,GAAG,MAAM,cAAc,CAAC;QAClC,KAAK;QACL,MAAM,EAAE,kBAAkB;QAC1B,MAAM,EAAE,mBAAmB;QAC3B,MAAM;QACN,UAAU,EAAE,CAAC;KACd,CAAC,CAAC;IAEH,MAAM,OAAO,GAAiB,EAAE,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC;IAEnD,mEAAmE;IACnE,MAAM,IAAI,GAAI,MAAgH,CAAC,6BAA6B,CAAC;IAC7J,MAAM,OAAO,GAAG,IAAI,EAAE,UAAU,EAAE,OAAO;QACvC,EAAE,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,EAAE,GAAG,CAAC;SACnB,MAAM,CAAC,CAAC,CAAC,EAAe,EAAE,CAAC,OAAO,CAAC,KAAK,QAAQ,CAAC,CAAC;IACrD,IAAI,OAAO,IAAI,OAAO,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAClC,OAAO,CAAC,OAAO,GAAG,OAAO,CAAC;IAC5B,CAAC;IAED,OAAO,OAAO,CAAC;AACjB,CAAC"}