deepclause-sdk 0.0.58 → 0.0.60
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +71 -19
- package/dist/cli/compile.d.ts +9 -0
- package/dist/cli/compile.d.ts.map +1 -1
- package/dist/cli/compile.js +106 -138
- package/dist/cli/compile.js.map +1 -1
- package/dist/cli/config.d.ts +73 -23
- package/dist/cli/config.d.ts.map +1 -1
- package/dist/cli/config.js +151 -58
- package/dist/cli/config.js.map +1 -1
- package/dist/cli/index.js +64 -14
- package/dist/cli/index.js.map +1 -1
- package/dist/cli/interactive.d.ts +2 -0
- package/dist/cli/interactive.d.ts.map +1 -0
- package/dist/cli/interactive.js +20 -0
- package/dist/cli/interactive.js.map +1 -0
- package/dist/cli/prompt.d.ts +1 -19
- package/dist/cli/prompt.d.ts.map +1 -1
- package/dist/cli/prompt.js +1 -869
- package/dist/cli/prompt.js.map +1 -1
- package/dist/cli/run.d.ts +6 -1
- package/dist/cli/run.d.ts.map +1 -1
- package/dist/cli/run.js +65 -279
- package/dist/cli/run.js.map +1 -1
- package/dist/cli/search.d.ts +1 -0
- package/dist/cli/search.d.ts.map +1 -1
- package/dist/cli/search.js +10 -4
- package/dist/cli/search.js.map +1 -1
- package/dist/cli/tools.d.ts +2 -2
- package/dist/cli/tools.d.ts.map +1 -1
- package/dist/cli/tools.js +30 -5
- package/dist/cli/tools.js.map +1 -1
- package/dist/cli/tui.d.ts +58 -0
- package/dist/cli/tui.d.ts.map +1 -0
- package/dist/cli/tui.js +1742 -0
- package/dist/cli/tui.js.map +1 -0
- package/dist/compiler_prompt.d.ts +1 -1
- package/dist/compiler_prompt.d.ts.map +1 -1
- package/dist/compiler_prompt.js +8 -10
- package/dist/compiler_prompt.js.map +1 -1
- package/dist/prolog-src/deepclause_mi.pl +25 -4
- package/dist/prolog-src/deepclause_strings.pl +22 -2
- package/dist/system/assets/docs/CONDUCTOR_PROMPT.md +46 -0
- package/dist/system/assets/docs/DML_COMPILER_PROMPT.md +654 -0
- package/dist/system/assets/index.d.ts +9 -0
- package/dist/system/assets/index.d.ts.map +1 -0
- package/dist/system/assets/index.js +47 -0
- package/dist/system/assets/index.js.map +1 -0
- package/dist/system/assets/skills/conductor.dml +70 -0
- package/dist/system/assets/skills/skill-creator.dml +182 -0
- package/dist/system/config/model-slots.d.ts +34 -0
- package/dist/system/config/model-slots.d.ts.map +1 -0
- package/dist/system/config/model-slots.js +121 -0
- package/dist/system/config/model-slots.js.map +1 -0
- package/dist/system/runtime/agentvm-manager.d.ts +18 -0
- package/dist/system/runtime/agentvm-manager.d.ts.map +1 -0
- package/dist/system/runtime/agentvm-manager.js +98 -0
- package/dist/system/runtime/agentvm-manager.js.map +1 -0
- package/dist/system/runtime/conductor.d.ts +54 -0
- package/dist/system/runtime/conductor.d.ts.map +1 -0
- package/dist/system/runtime/conductor.js +457 -0
- package/dist/system/runtime/conductor.js.map +1 -0
- package/dist/system/runtime/console-capture.d.ts +9 -0
- package/dist/system/runtime/console-capture.d.ts.map +1 -0
- package/dist/system/runtime/console-capture.js +67 -0
- package/dist/system/runtime/console-capture.js.map +1 -0
- package/dist/system/runtime/dml-executor.d.ts +41 -0
- package/dist/system/runtime/dml-executor.d.ts.map +1 -0
- package/dist/system/runtime/dml-executor.js +137 -0
- package/dist/system/runtime/dml-executor.js.map +1 -0
- package/dist/system/runtime/runtime-tools.d.ts +16 -0
- package/dist/system/runtime/runtime-tools.d.ts.map +1 -0
- package/dist/system/runtime/runtime-tools.js +146 -0
- package/dist/system/runtime/runtime-tools.js.map +1 -0
- package/dist/system/runtime/shell-manager.d.ts +20 -0
- package/dist/system/runtime/shell-manager.d.ts.map +1 -0
- package/dist/system/runtime/shell-manager.js +133 -0
- package/dist/system/runtime/shell-manager.js.map +1 -0
- package/dist/system/runtime/skill-creator.d.ts +33 -0
- package/dist/system/runtime/skill-creator.d.ts.map +1 -0
- package/dist/system/runtime/skill-creator.js +583 -0
- package/dist/system/runtime/skill-creator.js.map +1 -0
- package/dist/system/runtime/token-usage.d.ts +16 -0
- package/dist/system/runtime/token-usage.d.ts.map +1 -0
- package/dist/system/runtime/token-usage.js +45 -0
- package/dist/system/runtime/token-usage.js.map +1 -0
- package/dist/tools.js +1 -1
- package/dist/tools.js.map +1 -1
- package/package.json +4 -3
- package/src/prolog-src/deepclause_mi.pl +25 -4
- package/src/prolog-src/deepclause_strings.pl +22 -2
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { access, readFile } from 'fs/promises';
|
|
2
|
+
import { dirname, join } from 'path';
|
|
3
|
+
import { fileURLToPath } from 'url';
|
|
4
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
5
|
+
const __dirname = dirname(__filename);
|
|
6
|
+
const SKILLS_DIR = join(__dirname, 'skills');
|
|
7
|
+
const DOCS_DIR = join(__dirname, 'docs');
|
|
8
|
+
const SYSTEM_OVERRIDE_DIR = '.deepclause/system';
|
|
9
|
+
function getSystemSkillFileName(name) {
|
|
10
|
+
switch (name) {
|
|
11
|
+
case 'conductor':
|
|
12
|
+
return 'conductor.dml';
|
|
13
|
+
case 'skill-creator':
|
|
14
|
+
return 'skill-creator.dml';
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
export function getSystemSkillAssetPath(name) {
|
|
18
|
+
return join(SKILLS_DIR, getSystemSkillFileName(name));
|
|
19
|
+
}
|
|
20
|
+
export function getSystemPromptAssetPath(name) {
|
|
21
|
+
switch (name) {
|
|
22
|
+
case 'conductor':
|
|
23
|
+
return join(DOCS_DIR, 'CONDUCTOR_PROMPT.md');
|
|
24
|
+
case 'skill-creator':
|
|
25
|
+
return join(DOCS_DIR, 'DML_COMPILER_PROMPT.md');
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
async function resolveSystemSkillAssetPath(name, workspaceRoot) {
|
|
29
|
+
if (workspaceRoot) {
|
|
30
|
+
const overridePath = join(workspaceRoot, SYSTEM_OVERRIDE_DIR, getSystemSkillFileName(name));
|
|
31
|
+
try {
|
|
32
|
+
await access(overridePath);
|
|
33
|
+
return overridePath;
|
|
34
|
+
}
|
|
35
|
+
catch {
|
|
36
|
+
// Fall back to the packaged asset when no workspace override exists.
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
return getSystemSkillAssetPath(name);
|
|
40
|
+
}
|
|
41
|
+
export async function readSystemSkillAsset(name, options = {}) {
|
|
42
|
+
return readFile(await resolveSystemSkillAssetPath(name, options.workspaceRoot), 'utf8');
|
|
43
|
+
}
|
|
44
|
+
export async function readSystemPromptAsset(name) {
|
|
45
|
+
return readFile(getSystemPromptAssetPath(name), 'utf8');
|
|
46
|
+
}
|
|
47
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/system/assets/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,QAAQ,EAAE,MAAM,aAAa,CAAC;AAC/C,OAAO,EAAE,OAAO,EAAE,IAAI,EAAE,MAAM,MAAM,CAAC;AACrC,OAAO,EAAE,aAAa,EAAE,MAAM,KAAK,CAAC;AAKpC,MAAM,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;AAClD,MAAM,SAAS,GAAG,OAAO,CAAC,UAAU,CAAC,CAAC;AACtC,MAAM,UAAU,GAAG,IAAI,CAAC,SAAS,EAAE,QAAQ,CAAC,CAAC;AAC7C,MAAM,QAAQ,GAAG,IAAI,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;AACzC,MAAM,mBAAmB,GAAG,oBAAoB,CAAC;AAEjD,SAAS,sBAAsB,CAAC,IAA0B;IACxD,QAAQ,IAAI,EAAE,CAAC;QACb,KAAK,WAAW;YACd,OAAO,eAAe,CAAC;QACzB,KAAK,eAAe;YAClB,OAAO,mBAAmB,CAAC;IAC/B,CAAC;AACH,CAAC;AAED,MAAM,UAAU,uBAAuB,CAAC,IAA0B;IAChE,OAAO,IAAI,CAAC,UAAU,EAAE,sBAAsB,CAAC,IAAI,CAAC,CAAC,CAAC;AACxD,CAAC;AAED,MAAM,UAAU,wBAAwB,CAAC,IAA2B;IAClE,QAAQ,IAAI,EAAE,CAAC;QACb,KAAK,WAAW;YACd,OAAO,IAAI,CAAC,QAAQ,EAAE,qBAAqB,CAAC,CAAC;QAC/C,KAAK,eAAe;YAClB,OAAO,IAAI,CAAC,QAAQ,EAAE,wBAAwB,CAAC,CAAC;IACpD,CAAC;AACH,CAAC;AAED,KAAK,UAAU,2BAA2B,CACxC,IAA0B,EAC1B,aAAsB;IAEtB,IAAI,aAAa,EAAE,CAAC;QAClB,MAAM,YAAY,GAAG,IAAI,CAAC,aAAa,EAAE,mBAAmB,EAAE,sBAAsB,CAAC,IAAI,CAAC,CAAC,CAAC;QAC5F,IAAI,CAAC;YACH,MAAM,MAAM,CAAC,YAAY,CAAC,CAAC;YAC3B,OAAO,YAAY,CAAC;QACtB,CAAC;QAAC,MAAM,CAAC;YACP,qEAAqE;QACvE,CAAC;IACH,CAAC;IAED,OAAO,uBAAuB,CAAC,IAAI,CAAC,CAAC;AACvC,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,oBAAoB,CACxC,IAA0B,EAC1B,UAAsC,EAAE;IAExC,OAAO,QAAQ,CAAC,MAAM,2BAA2B,CAAC,IAAI,EAAE,OAAO,CAAC,aAAa,CAAC,EAAE,MAAM,CAAC,CAAC;AAC1F,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,qBAAqB,CAAC,IAA2B;IACrE,OAAO,QAAQ,CAAC,wBAAwB,CAAC,IAAI,CAAC,EAAE,MAAM,CAAC,CAAC;AAC1D,CAAC"}
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
% ============================================================================
|
|
2
|
+
% Conductor (full tool set)
|
|
3
|
+
% ============================================================================
|
|
4
|
+
%
|
|
5
|
+
% Has all tools: skills, bash, web, skill creation, etc.
|
|
6
|
+
% Runs autonomously to complete the task described in its instructions.
|
|
7
|
+
%
|
|
8
|
+
% ============================================================================
|
|
9
|
+
|
|
10
|
+
% --- Tool predicates (full tool set) ---
|
|
11
|
+
|
|
12
|
+
tool(run_skill(Slug, Args, Result), "Run an existing skill. Slug is the skill slug (e.g. 'deep-research'). Args is a JSON array of string arguments. Returns the skill result.") :-
|
|
13
|
+
|
|
14
|
+
exec(run_skill(slug: Slug, args: Args), Result).
|
|
15
|
+
|
|
16
|
+
tool(search_web(Query, Results), "Search the web for current information. Returns search results.") :-
|
|
17
|
+
exec(web_search(query: Query), Results).
|
|
18
|
+
|
|
19
|
+
tool(search_news(Query, Results), "Search recent news articles. Returns news results.") :-
|
|
20
|
+
exec(news_search(query: Query), Results).
|
|
21
|
+
|
|
22
|
+
tool(fetch_url(Url, Content), "Fetch the content of a web page by URL. Returns the page body as text.") :-
|
|
23
|
+
exec(url_fetch(url: Url), Result),
|
|
24
|
+
get_dict(body, Result, Content).
|
|
25
|
+
|
|
26
|
+
tool(download_file(Url, FilePath, Size), "Download a file from a URL and save it to disk. Returns the file path and size.") :-
|
|
27
|
+
exec(url_fetch(url: Url, save_to: FilePath), Result),
|
|
28
|
+
get_dict(file_path, Result, FilePath),
|
|
29
|
+
get_dict(size, Result, Size).
|
|
30
|
+
|
|
31
|
+
tool(run_bash(Command, Output), "Run a shell command in the workspace.") :-
|
|
32
|
+
exec(bash(command: Command), Output).
|
|
33
|
+
|
|
34
|
+
tool(create_new_skill(Spec, Result), "Create and deploy a new reusable skill from a natural language spec.") :-
|
|
35
|
+
exec(create_skill(spec: Spec), Result).
|
|
36
|
+
|
|
37
|
+
tool(ask_user(Prompt, Response), "Ask the user a clarifying question and get their response.") :-
|
|
38
|
+
exec(ask_user(prompt: Prompt), Result),
|
|
39
|
+
get_dict(user_response, Result, Response).
|
|
40
|
+
|
|
41
|
+
tool(inform_user(Message, Result), "Send a status update or progress report to the user.") :-
|
|
42
|
+
output(Message),
|
|
43
|
+
Result = "ok".
|
|
44
|
+
|
|
45
|
+
tool(save_memory(Content, Result), "Update task memory with technical learnings from this execution: what tools/approaches worked, what failed, useful commands, patterns discovered. Pass the COMPLETE updated memory in markdown (old + new learnings). This is technical memory - things that help future tasks.") :-
|
|
46
|
+
exec(update_memory(content: Content), Result).
|
|
47
|
+
|
|
48
|
+
% --- Main entry point ---
|
|
49
|
+
|
|
50
|
+
agent_main(UserMessage) :-
|
|
51
|
+
param(system_prompt, "Conductor system prompt", SysPrompt),
|
|
52
|
+
system(SysPrompt),
|
|
53
|
+
system("When using bash or similar tools, avoid dumping large outputs directly into the conversation context. Write large or uncertain outputs to temporary files first, then inspect them with focused commands such as wc, grep, head, tail, sed, or similar narrow follow-up commands so the context stays small and relevant."),
|
|
54
|
+
format(string(TaskPrompt), "Task instructions: ~w\n\nComplete this task autonomously using your tools.\n\nIf you can complete the task, do so and report the result.\nIf you are blocked by a task-specific, tool-specific, external-service, environment, permission, dependency, or data issue that is not an LLM/provider/API/transport failure, do not go silent. Diagnose the blocker and explain clearly to the user why the task could not be completed. Do not call save_memory from this main task.\nIf the problem appears to be an LLM/provider/API/transport failure, do not invent a speculative diagnosis.\n\nReturn a good final answer in all non-LLM cases, including when the task is blocked.", [UserMessage]),
|
|
55
|
+
task(TaskPrompt, string(Answer)),
|
|
56
|
+
update_session_memory(UserMessage, Answer),
|
|
57
|
+
answer(Answer).
|
|
58
|
+
|
|
59
|
+
% Fallback for technical failures where the main task itself cannot run to completion
|
|
60
|
+
agent_main(_) :-
|
|
61
|
+
answer("Task failed. Please try again.").
|
|
62
|
+
|
|
63
|
+
% Memory update: extract technical learnings and persist to task-memory.md
|
|
64
|
+
update_session_memory(UserMessage, Answer) :-
|
|
65
|
+
format(string(MemoryPrompt), "You just completed:\nTask: ~w\nResult: ~w\n\nReview the task memory in your context (technical learnings). If this execution produced useful technical insights - what worked, what failed, useful commands, error resolutions, or reusable lessons from a non-LLM task failure - call save_memory with the COMPLETE updated memory. Only use the save_memory tool and no other tools. If nothing technically noteworthy, reply 'ok'.", [UserMessage, Answer]),
|
|
66
|
+
with_tools([save_memory], (
|
|
67
|
+
task(MemoryPrompt, string(_))
|
|
68
|
+
)),
|
|
69
|
+
!.
|
|
70
|
+
update_session_memory(_, _).
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
% ============================================================================
|
|
2
|
+
% _skill-creator v4 - Creates, tests, and deploys DML skills.
|
|
3
|
+
%
|
|
4
|
+
% Four-phase pipeline matching the DML Compiler Prompt role definition:
|
|
5
|
+
% Phase 1 - Understand & Research
|
|
6
|
+
% Clarify requirements, research dependencies, understand APIs.
|
|
7
|
+
% Phase 2 - Environment Setup
|
|
8
|
+
% Install packages, create/test helper scripts via bash.
|
|
9
|
+
% Phase 3 - DML Implementation
|
|
10
|
+
% Write DML, validate syntax, run tests - iterate until passing.
|
|
11
|
+
% Phase 4 - Deploy
|
|
12
|
+
% Ask user for permission, then deploy.
|
|
13
|
+
%
|
|
14
|
+
% System tools: validate_dml, test_dml, deploy_skill, write_file, bash
|
|
15
|
+
% Proxied tools: web_search, news_search, url_fetch
|
|
16
|
+
% Auto-augmented: ask_user
|
|
17
|
+
%
|
|
18
|
+
% agent_main(SpecMarkdown) - create new skill
|
|
19
|
+
% agent_main(SpecMarkdown, EditSlug) - edit existing skill
|
|
20
|
+
% ============================================================================
|
|
21
|
+
|
|
22
|
+
% -- Tools available to the LLM during task() calls --------------------------
|
|
23
|
+
|
|
24
|
+
tool(search(Query, Results),
|
|
25
|
+
"Search the web for information. Returns titles, URLs, and snippets.") :-
|
|
26
|
+
exec(web_search(query: Query), Results).
|
|
27
|
+
|
|
28
|
+
tool(search_news(Query, Results),
|
|
29
|
+
"Search for recent news articles.") :-
|
|
30
|
+
exec(news_search(query: Query), Results).
|
|
31
|
+
|
|
32
|
+
tool(fetch(Url, Content),
|
|
33
|
+
"Fetch a URL and return its body. Use for documentation pages, README files, API references.") :-
|
|
34
|
+
exec(url_fetch(url: Url), Result),
|
|
35
|
+
get_dict(body, Result, Content).
|
|
36
|
+
|
|
37
|
+
tool(shell(Command, Result),
|
|
38
|
+
"Run a shell command on the VM. Returns dict with: success (boolean), stdout (string), stderr (string), exitCode (integer), summary (string). Use for package installation, environment checks, and helper script creation/testing. Always check the 'success' field.") :-
|
|
39
|
+
exec(bash(command: Command), Result).
|
|
40
|
+
|
|
41
|
+
tool(write(Path, Content, Result),
|
|
42
|
+
"Write (or overwrite) a file in the workspace. Content is passed as a JSON string so there are NO shell escaping issues - ALWAYS use this instead of bash heredocs for writing DML and other code files. Returns dict with: success (boolean), path (string), bytes (number).") :-
|
|
43
|
+
exec(write_file(path: Path, content: Content), Result).
|
|
44
|
+
|
|
45
|
+
tool(validate(DmlFile, Result),
|
|
46
|
+
"Validate DML code from a .dml file using the Prolog parser. You MUST write the DML to a file first with write(), then pass the file path. Returns dict with: valid (boolean), errors (list), warnings (list).") :-
|
|
47
|
+
exec(validate_dml(dml_file: DmlFile), Result).
|
|
48
|
+
|
|
49
|
+
tool(test_run(DmlFile, Input, Result),
|
|
50
|
+
"Run a DML program from a .dml file with test input(s) to verify it works. You MUST write the DML to a file first with write(), then pass the file path. Input can be a single string (for agent_main/1) or a JSON array string like '[\"arg1\", \"arg2\"]' for multi-arg agent_main. Returns dict with: success (boolean), status (string: ok/error/completed_no_answer/failed_silently), answer (string), outputs (list), errors (list), trace (list).") :-
|
|
51
|
+
exec(test_dml(dml_file: DmlFile, test_args: Input), Result).
|
|
52
|
+
|
|
53
|
+
tool(deploy(DmlFile, Spec, MetaJSON, Result),
|
|
54
|
+
"Publish a new skill to the local runtime catalog. IMPORTANT ARG ORDER: deploy(<dml file path>, <original spec markdown>, <metadata json>). The SECOND argument is the original specification text. The THIRD argument is the metadata JSON string with keys: slug, name, description, trigger_phrases (array of 3-5 phrases). Never pass a version number or boolean flag here. Returns dict with: ok (boolean), slug (string), version (number), or error (string).") :-
|
|
55
|
+
exec(deploy_skill(dml_file: DmlFile, spec_markdown: Spec, metadata_json: MetaJSON), Result).
|
|
56
|
+
|
|
57
|
+
tool(deploy_update(DmlFile, Spec, MetaJSON, Slug, Result),
|
|
58
|
+
"Update an existing skill. IMPORTANT ARG ORDER: deploy_update(<dml file path>, <updated spec markdown>, <metadata json>, <existing slug>). The THIRD argument is metadata JSON; the FOURTH argument is the slug override. Never pass a version number or boolean flag. Returns dict with: ok (boolean), slug (string), version (number), or error (string).") :-
|
|
59
|
+
exec(deploy_skill(dml_file: DmlFile, spec_markdown: Spec, metadata_json: MetaJSON, slug_override: Slug), Result).
|
|
60
|
+
|
|
61
|
+
tool(ask_user(Prompt, Response), "Ask the user a clarifying question or for confirmation and get their response.") :-
|
|
62
|
+
exec(ask_user(prompt: Prompt), Result),
|
|
63
|
+
get_dict(user_response, Result, Response).
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
% -- Create new skill --------------------------------------------------------
|
|
67
|
+
|
|
68
|
+
agent_main(SpecMarkdown) :-
|
|
69
|
+
param(system_prompt, "System prompt with DML reference", SysPrompt),
|
|
70
|
+
system(SysPrompt),
|
|
71
|
+
output("📋 Starting skill creation..."),
|
|
72
|
+
|
|
73
|
+
% -- Phase 1: Understand & Research --------------------------------------
|
|
74
|
+
output("Phase 1/4: Understanding requirements and researching dependencies..."),
|
|
75
|
+
format(string(ResearchTask),
|
|
76
|
+
"You are starting to build a DML skill from the specification below.\n\nYour job in this phase:\n1. Read the specification carefully. If anything is ambiguous or missing, use ask_user to clarify before proceeding.\n2. Identify all external dependencies (Python packages, Node modules, system tools, APIs, CLI tools).\n3. For each dependency, use search or fetch to look up installation instructions, API docs, or usage examples if you are not already familiar with them.\n4. Summarise: what the skill does, its inputs/outputs, all dependencies found, and any tricky parts.\n\nSpecification:\n---\n~w\n---\n\nStore your findings in ResearchSummary.",
|
|
77
|
+
[SpecMarkdown]),
|
|
78
|
+
task(ResearchTask, string(ResearchSummary)),
|
|
79
|
+
output(ResearchSummary),
|
|
80
|
+
|
|
81
|
+
% -- Phase 2: Environment Setup -------------------------------------------
|
|
82
|
+
output("Phase 2/4: Setting up the environment..."),
|
|
83
|
+
format(string(EnvTask),
|
|
84
|
+
"Set up the execution environment for the skill you are about to build.\n\nBased on your research:\n~w\n\nYour job in this phase:\n1. Use shell() to check what is already installed (python --version, node --version, which <tool>, pip show <pkg>, etc.).\n2. Install ALL missing packages and tools: pip install, apt-get install (always use sudo and -y), npm install -g.\n3. If the skill needs helper scripts (Python, Shell, etc.), create and smoke-test them now with shell().\n4. When you encounter errors during setup, diagnose the root cause (version mismatch, missing system lib, permissions, ...) and resolve it.\n5. Prefer the simplest approach with the fewest new dependencies when the spec does not specify.\n6. Do NOT install anything inside the DML skill itself - the skill must assume everything is pre-installed.\n\nStore a short environment summary (what was installed, what exists) in EnvSummary.",
|
|
85
|
+
[ResearchSummary]),
|
|
86
|
+
task(EnvTask, string(EnvSummary)),
|
|
87
|
+
output(EnvSummary),
|
|
88
|
+
|
|
89
|
+
% -- Phase 3: Implement, Validate, Test ----------------------------------
|
|
90
|
+
output("Phase 3/4: Writing DML, validating, and testing..."),
|
|
91
|
+
format(string(ImplTask),
|
|
92
|
+
"Write, validate, and test the DML skill.\n\nBackground:\n- Research: ~w\n- Environment: ~w\n- Original specification: ~w\n\nYour job in this phase:\n1. Write a complete DML implementation plan (tool definitions, agent_main structure, fallback clause).\n2. Write the full DML code to a .dml file using write(). NEVER use bash heredocs for DML - they mangle backslashes and special Prolog characters. NEVER pass DML as an inline string to validate or test_run.\n3. Call validate('my-skill.dml') - if there are syntax errors, fix the code using write() and re-validate until valid.\n4. Call test_run('my-skill.dml', ...) with a realistic test input - if it fails, analyse the error/trace, fix the code with write(), and re-test until passing.\n5. Keep iterating on validate -> test until the skill works correctly.\n\nStore the final .dml file path in FinalDML.",
|
|
93
|
+
[ResearchSummary, EnvSummary, SpecMarkdown]),
|
|
94
|
+
task(ImplTask, string(FinalDML)),
|
|
95
|
+
output("✅ DML implementation ready."),
|
|
96
|
+
|
|
97
|
+
% -- Phase 4: Deploy ------------------------------------------------------
|
|
98
|
+
( param(auto_deploy, "Automatic publish mode", true)
|
|
99
|
+
-> output("Phase 4/4: Publishing skill..."),
|
|
100
|
+
param(deployment_metadata_json, "Exact metadata JSON for automatic publishing", DeploymentMetaJSON),
|
|
101
|
+
format(string(DeployTask),
|
|
102
|
+
"You are ready to publish the skill automatically. Here is the final DML file path:\n\n~w\n\nOriginal specification:\n~w\n\nUse EXACTLY this metadata JSON string and do not modify it:\n\n~w\n\nYour job:\n1. Do NOT call ask_user. Automatic publish mode already serves as deployment approval.\n2. Call deploy() exactly once with the DML file path, the original specification, and the exact metadata JSON string shown above.\n3. Do not add, remove, or rename metadata keys.\n4. After a successful publish, end your response with a structured summary in EXACTLY this format:\n\nDEPLOY_RESULT:\n- name: <skill name>\n- slug: <skill slug>\n- trigger_phrases: <comma-separated list>\n- test_status: passed\n- deploy_status: success",
|
|
103
|
+
[FinalDML, SpecMarkdown, DeploymentMetaJSON]),
|
|
104
|
+
system("Automatic publish mode is enabled. Do not ask the user for permission. Use deployment_metadata_json exactly as provided."),
|
|
105
|
+
system("When you publish, use EXACTLY this argument order: deploy(FinalDMLPath, OriginalSpecMarkdown, MetadataJSONString). MetadataJSONString must be deployment_metadata_json without edits."),
|
|
106
|
+
task(DeployTask, string(DeployResult))
|
|
107
|
+
; output("Phase 4/4: Requesting deployment permission..."),
|
|
108
|
+
format(string(DeployTask),
|
|
109
|
+
"You are ready to deploy the skill. Here is the final DML file path:\n\n~w\n\nOriginal specification:\n~w\n\nYour job:\n1. Use ask_user to present a short summary of the skill (name, slug, what it does, trigger phrases you intend to use) and ask the user for permission to deploy.\n2. If the user approves (yes / ok / go ahead / etc.), call deploy() with the DML file path, the original specification, and a metadata JSON string containing: slug, name, description, trigger_phrases (3-5 phrases).\n3. If the user declines, answer with a message that the skill is ready but was not deployed, and that they can ask to deploy it later.\n4. After a successful deploy, end your response with a structured summary in EXACTLY this format:\n\nDEPLOY_RESULT:\n- name: <skill name>\n- slug: <skill slug>\n- trigger_phrases: <comma-separated list>\n- test_status: passed\n- deploy_status: success",
|
|
110
|
+
[FinalDML, SpecMarkdown]),
|
|
111
|
+
system("When you deploy, use EXACTLY this argument order: deploy(FinalDMLPath, OriginalSpecMarkdown, MetadataJSONString). Do not pass a version number, edit flag, or boolean in place of MetadataJSONString."),
|
|
112
|
+
task(DeployTask, string(DeployResult))
|
|
113
|
+
),
|
|
114
|
+
|
|
115
|
+
format(string(Summary), "✅ Skill creation complete!\n\n~w\n\nYou can trigger this skill using one of the trigger phrases shown above.", [DeployResult]),
|
|
116
|
+
answer(Summary).
|
|
117
|
+
|
|
118
|
+
% -- Fallback for create -----------------------------------------------------
|
|
119
|
+
|
|
120
|
+
agent_main(SpecMarkdown) :-
|
|
121
|
+
format(string(Msg), "❌ Skill creation failed.\n\nThe skill could not be compiled, tested, or deployed from the specification provided. This can happen when:\n- The specification is ambiguous or incomplete\n- Required APIs or tools are not available\n- The DML code could not pass validation or testing after multiple attempts\n\nPlease try:\n1. Simplifying the specification\n2. Being more explicit about inputs, outputs, and logic\n3. Breaking into smaller, focused skills\n\nOriginal spec (first 200 chars): ~w", [SpecMarkdown]),
|
|
122
|
+
answer(Msg).
|
|
123
|
+
|
|
124
|
+
% -- Edit existing skill -----------------------------------------------------
|
|
125
|
+
|
|
126
|
+
agent_main(SpecMarkdown, EditSlug) :-
|
|
127
|
+
param(system_prompt, "System prompt with DML reference", SysPrompt),
|
|
128
|
+
system(SysPrompt),
|
|
129
|
+
format(string(Ctx), "You are editing the existing skill '~w'.", [EditSlug]),
|
|
130
|
+
system(Ctx),
|
|
131
|
+
output("📋 Starting skill update..."),
|
|
132
|
+
|
|
133
|
+
% -- Phase 1: Understand changes ------------------------------------------
|
|
134
|
+
output("Phase 1/4: Understanding required changes and dependencies..."),
|
|
135
|
+
format(string(ResearchTask),
|
|
136
|
+
"You are updating the existing DML skill '~w' from the specification below.\n\nYour job in this phase:\n1. Read the updated specification. If anything is ambiguous, use ask_user to clarify.\n2. Identify what is changing and any new dependencies introduced by the changes.\n3. Research new dependencies if needed (search, fetch).\n4. Summarise: what is changing, new dependencies if any, and any tricky parts.\n\nUpdated specification:\n---\n~w\n---\n\nStore your findings in ResearchSummary.",
|
|
137
|
+
[EditSlug, SpecMarkdown]),
|
|
138
|
+
task(ResearchTask, string(ResearchSummary)),
|
|
139
|
+
output(ResearchSummary),
|
|
140
|
+
|
|
141
|
+
% -- Phase 2: Environment Setup -------------------------------------------
|
|
142
|
+
output("Phase 2/4: Updating the environment if needed..."),
|
|
143
|
+
format(string(EnvTask),
|
|
144
|
+
"Review and update the execution environment for the skill changes.\n\nBased on your analysis:\n~w\n\nYour job:\n1. Check whether any new packages or tools need to be installed for the changes.\n2. Install anything missing via shell().\n3. Create or update any helper scripts if needed.\n4. If no environment changes are needed, confirm that with a brief note.\n\nStore an environment summary in EnvSummary.",
|
|
145
|
+
[ResearchSummary]),
|
|
146
|
+
task(EnvTask, string(EnvSummary)),
|
|
147
|
+
output(EnvSummary),
|
|
148
|
+
|
|
149
|
+
% -- Phase 3: Implement, Validate, Test ----------------------------------
|
|
150
|
+
output("Phase 3/4: Writing updated DML, validating, and testing..."),
|
|
151
|
+
format(string(ImplTask),
|
|
152
|
+
"Write, validate, and test the updated DML skill.\n\nBackground:\n- Changes summary: ~w\n- Environment: ~w\n- Updated specification: ~w\n\nYour job:\n1. Write the complete updated DML code to a .dml file using write(). NEVER use bash heredocs for DML - they mangle backslashes and special Prolog characters.\n2. Call validate('my-skill.dml') - fix any syntax errors by rewriting the file with write().\n3. Call test_run('my-skill.dml', ...) - fix any runtime failures by rewriting with write().\n4. Iterate until both validation and testing pass.\n\nStore the final .dml file path in FinalDML.",
|
|
153
|
+
[ResearchSummary, EnvSummary, SpecMarkdown]),
|
|
154
|
+
task(ImplTask, string(FinalDML)),
|
|
155
|
+
output("✅ Updated DML implementation ready."),
|
|
156
|
+
|
|
157
|
+
% -- Phase 4: Deploy -------------------------------------------------------
|
|
158
|
+
( param(auto_deploy, "Automatic publish mode", true)
|
|
159
|
+
-> output("Phase 4/4: Publishing update..."),
|
|
160
|
+
param(deployment_metadata_json, "Exact metadata JSON for automatic publishing", DeploymentMetaJSON),
|
|
161
|
+
format(string(DeployTask),
|
|
162
|
+
"You are ready to publish the updated skill '~w' automatically. Here is the final DML file path:\n\n~w\n\nUpdated specification:\n~w\n\nUse EXACTLY this metadata JSON string and do not modify it:\n\n~w\n\nYour job:\n1. Do NOT call ask_user. Automatic publish mode already serves as deployment approval.\n2. Call deploy_update() exactly once with the DML file path, updated specification, the exact metadata JSON shown above, and slug override '~w'.\n3. Do not add, remove, or rename metadata keys.\n4. After a successful publish, end your response with a structured summary in EXACTLY this format:\n\nDEPLOY_RESULT:\n- name: <skill name>\n- slug: ~w\n- changes: <brief summary of what changed>\n- test_status: passed\n- deploy_status: success",
|
|
163
|
+
[EditSlug, FinalDML, SpecMarkdown, DeploymentMetaJSON, EditSlug, EditSlug]),
|
|
164
|
+
system("Automatic publish mode is enabled. Do not ask the user for permission. Use deployment_metadata_json exactly as provided."),
|
|
165
|
+
system("When you publish an update, use EXACTLY this argument order: deploy_update(FinalDMLPath, UpdatedSpecMarkdown, MetadataJSONString, ExistingSlug). MetadataJSONString must be deployment_metadata_json without edits."),
|
|
166
|
+
task(DeployTask, string(DeployResult))
|
|
167
|
+
; output("Phase 4/4: Requesting deployment permission..."),
|
|
168
|
+
format(string(DeployTask),
|
|
169
|
+
"You are ready to deploy the updated skill '~w'. Here is the final DML file path:\n\n~w\n\nUpdated specification:\n~w\n\nYour job:\n1. Use ask_user to summarise what changed and ask the user for permission to deploy.\n2. If approved, call deploy_update() with the DML file path, spec, a metadata JSON (name, description, trigger_phrases), and slug override '~w'.\n3. If declined, answer that the update is ready but not deployed, and they can ask to deploy later.\n4. After a successful deploy, end your response with a structured summary in EXACTLY this format:\n\nDEPLOY_RESULT:\n- name: <skill name>\n- slug: ~w\n- changes: <brief summary of what changed>\n- test_status: passed\n- deploy_status: success",
|
|
170
|
+
[EditSlug, FinalDML, SpecMarkdown, EditSlug, EditSlug]),
|
|
171
|
+
system("When you deploy an update, use EXACTLY this argument order: deploy_update(FinalDMLPath, UpdatedSpecMarkdown, MetadataJSONString, ExistingSlug). Do not pass a version number or boolean flag."),
|
|
172
|
+
task(DeployTask, string(DeployResult))
|
|
173
|
+
),
|
|
174
|
+
|
|
175
|
+
format(string(Summary), "✅ Skill '~w' updated and deployed!\n\n~w", [EditSlug, DeployResult]),
|
|
176
|
+
answer(Summary).
|
|
177
|
+
|
|
178
|
+
% -- Fallback for edit --------------------------------------------------------
|
|
179
|
+
|
|
180
|
+
agent_main(SpecMarkdown, EditSlug) :-
|
|
181
|
+
format(string(Msg), "❌ Skill update failed for '~w'.\n\nThe skill could not be updated from the specification provided. Please try:\n1. Simplifying the changes\n2. Providing the full updated specification\n3. Checking if the skill slug '~w' exists\n\nSpec (first 200 chars): ~w", [EditSlug, EditSlug, SpecMarkdown]),
|
|
182
|
+
answer(Msg).
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
export type Provider = 'openai' | 'anthropic' | 'google' | 'openrouter';
|
|
2
|
+
export type ModelSlot = 'gateway' | 'run' | 'compile';
|
|
3
|
+
export interface ProviderConfig {
|
|
4
|
+
apiKey?: string;
|
|
5
|
+
baseUrl?: string;
|
|
6
|
+
}
|
|
7
|
+
export interface SlotModelConfig {
|
|
8
|
+
models: Record<ModelSlot, string>;
|
|
9
|
+
temperatures: Record<ModelSlot, number>;
|
|
10
|
+
providers?: Partial<Record<Provider, ProviderConfig>>;
|
|
11
|
+
}
|
|
12
|
+
export interface ParsedModelId {
|
|
13
|
+
id: string;
|
|
14
|
+
provider: Provider;
|
|
15
|
+
model: string;
|
|
16
|
+
customProviderName?: string;
|
|
17
|
+
}
|
|
18
|
+
export interface ResolvedModelConfig extends ParsedModelId {
|
|
19
|
+
slot: ModelSlot;
|
|
20
|
+
temperature: number;
|
|
21
|
+
baseUrl?: string;
|
|
22
|
+
apiKey?: string;
|
|
23
|
+
}
|
|
24
|
+
export declare const DEFAULT_MODEL_IDS: Record<ModelSlot, string>;
|
|
25
|
+
export declare const DEFAULT_TEMPERATURES: Record<ModelSlot, number>;
|
|
26
|
+
export declare function normalizeModelId(modelId: string): string;
|
|
27
|
+
export declare function formatModelId(modelId: string): string;
|
|
28
|
+
export declare function parseModelId(modelId: string): ParsedModelId;
|
|
29
|
+
export declare function resolveModelSlotConfig(config: SlotModelConfig, slot: ModelSlot, overrides?: {
|
|
30
|
+
modelId?: string;
|
|
31
|
+
temperature?: number;
|
|
32
|
+
}): ResolvedModelConfig;
|
|
33
|
+
export declare function buildModelOverride(model?: string, provider?: Provider): string | undefined;
|
|
34
|
+
//# sourceMappingURL=model-slots.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"model-slots.d.ts","sourceRoot":"","sources":["../../../src/system/config/model-slots.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,QAAQ,GAAG,QAAQ,GAAG,WAAW,GAAG,QAAQ,GAAG,YAAY,CAAC;AACxE,MAAM,MAAM,SAAS,GAAG,SAAS,GAAG,KAAK,GAAG,SAAS,CAAC;AAEtD,MAAM,WAAW,cAAc;IAC7B,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC;CAClB;AAED,MAAM,WAAW,eAAe;IAC9B,MAAM,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;IAClC,YAAY,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAAC,CAAC;IACxC,SAAS,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,QAAQ,EAAE,cAAc,CAAC,CAAC,CAAC;CACvD;AAED,MAAM,WAAW,aAAa;IAC5B,EAAE,EAAE,MAAM,CAAC;IACX,QAAQ,EAAE,QAAQ,CAAC;IACnB,KAAK,EAAE,MAAM,CAAC;IACd,kBAAkB,CAAC,EAAE,MAAM,CAAC;CAC7B;AAED,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACxD,IAAI,EAAE,SAAS,CAAC;IAChB,WAAW,EAAE,MAAM,CAAC;IACpB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,eAAO,MAAM,iBAAiB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAIvD,CAAC;AAEF,eAAO,MAAM,oBAAoB,EAAE,MAAM,CAAC,SAAS,EAAE,MAAM,CAI1D,CAAC;AAwBF,wBAAgB,gBAAgB,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,CAqCxD;AAED,wBAAgB,aAAa,CAAC,OAAO,EAAE,MAAM,GAAG,MAAM,CAErD;AAED,wBAAgB,YAAY,CAAC,OAAO,EAAE,MAAM,GAAG,aAAa,CA0B3D;AAUD,wBAAgB,sBAAsB,CACpC,MAAM,EAAE,eAAe,EACvB,IAAI,EAAE,SAAS,EACf,SAAS,GAAE;IAAE,OAAO,CAAC,EAAE,MAAM,CAAC;IAAC,WAAW,CAAC,EAAE,MAAM,CAAA;CAAO,GACzD,mBAAmB,CAcrB;AAED,wBAAgB,kBAAkB,CAAC,KAAK,CAAC,EAAE,MAAM,EAAE,QAAQ,CAAC,EAAE,QAAQ,GAAG,MAAM,GAAG,SAAS,CAQ1F"}
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
export const DEFAULT_MODEL_IDS = {
|
|
2
|
+
gateway: 'openai:gpt-4o',
|
|
3
|
+
run: 'openai:gpt-4o',
|
|
4
|
+
compile: 'openai:gpt-4o',
|
|
5
|
+
};
|
|
6
|
+
export const DEFAULT_TEMPERATURES = {
|
|
7
|
+
gateway: 0.7,
|
|
8
|
+
run: 0.7,
|
|
9
|
+
compile: 0.4,
|
|
10
|
+
};
|
|
11
|
+
const RUNTIME_PROVIDERS = new Set(['openai', 'anthropic', 'google', 'openrouter']);
|
|
12
|
+
function inferProvider(model) {
|
|
13
|
+
if (model.startsWith('gpt-') || model.startsWith('o1') || model.startsWith('o3')) {
|
|
14
|
+
return 'openai';
|
|
15
|
+
}
|
|
16
|
+
if (model.startsWith('claude-')) {
|
|
17
|
+
return 'anthropic';
|
|
18
|
+
}
|
|
19
|
+
if (model.startsWith('gemini-')) {
|
|
20
|
+
return 'google';
|
|
21
|
+
}
|
|
22
|
+
return 'openrouter';
|
|
23
|
+
}
|
|
24
|
+
function normalizeOpenRouterModel(remainder) {
|
|
25
|
+
if (remainder.includes('/')) {
|
|
26
|
+
return remainder;
|
|
27
|
+
}
|
|
28
|
+
return remainder.replace(':', '/');
|
|
29
|
+
}
|
|
30
|
+
export function normalizeModelId(modelId) {
|
|
31
|
+
const trimmed = modelId.trim();
|
|
32
|
+
if (!trimmed) {
|
|
33
|
+
throw new Error('Invalid model id: value cannot be empty');
|
|
34
|
+
}
|
|
35
|
+
if (trimmed.startsWith('custom:')) {
|
|
36
|
+
return trimmed;
|
|
37
|
+
}
|
|
38
|
+
const colonIndex = trimmed.indexOf(':');
|
|
39
|
+
if (colonIndex > 0) {
|
|
40
|
+
const prefix = trimmed.slice(0, colonIndex).toLowerCase();
|
|
41
|
+
const remainder = trimmed.slice(colonIndex + 1);
|
|
42
|
+
if (prefix === 'custom') {
|
|
43
|
+
return `custom:${remainder}`;
|
|
44
|
+
}
|
|
45
|
+
if (RUNTIME_PROVIDERS.has(prefix)) {
|
|
46
|
+
if (prefix === 'openrouter') {
|
|
47
|
+
return `openrouter:${normalizeOpenRouterModel(remainder)}`;
|
|
48
|
+
}
|
|
49
|
+
return `${prefix}:${remainder}`;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
const slashIndex = trimmed.indexOf('/');
|
|
53
|
+
if (slashIndex > 0) {
|
|
54
|
+
const prefix = trimmed.slice(0, slashIndex).toLowerCase();
|
|
55
|
+
const remainder = trimmed.slice(slashIndex + 1);
|
|
56
|
+
if (RUNTIME_PROVIDERS.has(prefix)) {
|
|
57
|
+
return prefix === 'openrouter'
|
|
58
|
+
? `openrouter:${remainder}`
|
|
59
|
+
: `${prefix}:${remainder}`;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
return `${inferProvider(trimmed)}:${trimmed}`;
|
|
63
|
+
}
|
|
64
|
+
export function formatModelId(modelId) {
|
|
65
|
+
return normalizeModelId(modelId);
|
|
66
|
+
}
|
|
67
|
+
export function parseModelId(modelId) {
|
|
68
|
+
const id = normalizeModelId(modelId);
|
|
69
|
+
const [prefix, ...rest] = id.split(':');
|
|
70
|
+
if (prefix === 'custom') {
|
|
71
|
+
const [customProviderName, ...modelParts] = rest;
|
|
72
|
+
if (!customProviderName || modelParts.length === 0) {
|
|
73
|
+
throw new Error(`Invalid custom model id: ${modelId}`);
|
|
74
|
+
}
|
|
75
|
+
return {
|
|
76
|
+
id,
|
|
77
|
+
provider: 'openai',
|
|
78
|
+
model: modelParts.join(':'),
|
|
79
|
+
customProviderName,
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
if (!RUNTIME_PROVIDERS.has(prefix) || rest.length === 0) {
|
|
83
|
+
throw new Error(`Invalid model id: ${modelId}`);
|
|
84
|
+
}
|
|
85
|
+
return {
|
|
86
|
+
id,
|
|
87
|
+
provider: prefix,
|
|
88
|
+
model: rest.join(':'),
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
function readCustomProviderEnv(customProviderName) {
|
|
92
|
+
const envPrefix = `LLM_PROVIDER_${customProviderName.toUpperCase().replace(/[^A-Z0-9]+/g, '_')}`;
|
|
93
|
+
return {
|
|
94
|
+
apiKey: process.env[`${envPrefix}_API_KEY`],
|
|
95
|
+
baseUrl: process.env[`${envPrefix}_BASE_URL`],
|
|
96
|
+
};
|
|
97
|
+
}
|
|
98
|
+
export function resolveModelSlotConfig(config, slot, overrides = {}) {
|
|
99
|
+
const parsed = parseModelId(overrides.modelId ?? config.models[slot] ?? DEFAULT_MODEL_IDS[slot]);
|
|
100
|
+
const standardProviderConfig = config.providers?.[parsed.provider] ?? {};
|
|
101
|
+
const customProviderConfig = parsed.customProviderName
|
|
102
|
+
? readCustomProviderEnv(parsed.customProviderName)
|
|
103
|
+
: {};
|
|
104
|
+
return {
|
|
105
|
+
...parsed,
|
|
106
|
+
slot,
|
|
107
|
+
temperature: overrides.temperature ?? config.temperatures[slot] ?? DEFAULT_TEMPERATURES[slot],
|
|
108
|
+
apiKey: customProviderConfig.apiKey ?? standardProviderConfig.apiKey,
|
|
109
|
+
baseUrl: customProviderConfig.baseUrl ?? standardProviderConfig.baseUrl,
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
export function buildModelOverride(model, provider) {
|
|
113
|
+
if (!model) {
|
|
114
|
+
return undefined;
|
|
115
|
+
}
|
|
116
|
+
if (provider && !model.includes(':') && !model.includes('/')) {
|
|
117
|
+
return normalizeModelId(`${provider}:${model}`);
|
|
118
|
+
}
|
|
119
|
+
return normalizeModelId(model);
|
|
120
|
+
}
|
|
121
|
+
//# sourceMappingURL=model-slots.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"model-slots.js","sourceRoot":"","sources":["../../../src/system/config/model-slots.ts"],"names":[],"mappings":"AA4BA,MAAM,CAAC,MAAM,iBAAiB,GAA8B;IAC1D,OAAO,EAAE,eAAe;IACxB,GAAG,EAAE,eAAe;IACpB,OAAO,EAAE,eAAe;CACzB,CAAC;AAEF,MAAM,CAAC,MAAM,oBAAoB,GAA8B;IAC7D,OAAO,EAAE,GAAG;IACZ,GAAG,EAAE,GAAG;IACR,OAAO,EAAE,GAAG;CACb,CAAC;AAEF,MAAM,iBAAiB,GAAG,IAAI,GAAG,CAAW,CAAC,QAAQ,EAAE,WAAW,EAAE,QAAQ,EAAE,YAAY,CAAC,CAAC,CAAC;AAE7F,SAAS,aAAa,CAAC,KAAa;IAClC,IAAI,KAAK,CAAC,UAAU,CAAC,MAAM,CAAC,IAAI,KAAK,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,KAAK,CAAC,UAAU,CAAC,IAAI,CAAC,EAAE,CAAC;QACjF,OAAO,QAAQ,CAAC;IAClB,CAAC;IACD,IAAI,KAAK,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE,CAAC;QAChC,OAAO,WAAW,CAAC;IACrB,CAAC;IACD,IAAI,KAAK,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE,CAAC;QAChC,OAAO,QAAQ,CAAC;IAClB,CAAC;IACD,OAAO,YAAY,CAAC;AACtB,CAAC;AAED,SAAS,wBAAwB,CAAC,SAAiB;IACjD,IAAI,SAAS,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;QAC5B,OAAO,SAAS,CAAC;IACnB,CAAC;IACD,OAAO,SAAS,CAAC,OAAO,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC;AACrC,CAAC;AAED,MAAM,UAAU,gBAAgB,CAAC,OAAe;IAC9C,MAAM,OAAO,GAAG,OAAO,CAAC,IAAI,EAAE,CAAC;IAC/B,IAAI,CAAC,OAAO,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,yCAAyC,CAAC,CAAC;IAC7D,CAAC;IAED,IAAI,OAAO,CAAC,UAAU,CAAC,SAAS,CAAC,EAAE,CAAC;QAClC,OAAO,OAAO,CAAC;IACjB,CAAC;IAED,MAAM,UAAU,GAAG,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;IACxC,IAAI,UAAU,GAAG,CAAC,EAAE,CAAC;QACnB,MAAM,MAAM,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,WAAW,EAAE,CAAC;QAC1D,MAAM,SAAS,GAAG,OAAO,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;QAChD,IAAI,MAAM,KAAK,QAAQ,EAAE,CAAC;YACxB,OAAO,UAAU,SAAS,EAAE,CAAC;QAC/B,CAAC;QACD,IAAI,iBAAiB,CAAC,GAAG,CAAC,MAAkB,CAAC,EAAE,CAAC;YAC9C,IAAI,MAAM,KAAK,YAAY,EAAE,CAAC;gBAC5B,OAAO,cAAc,wBAAwB,CAAC,SAAS,CAAC,EAAE,CAAC;YAC7D,CAAC;YACD,OAAO,GAAG,MAAM,IAAI,SAAS,EAAE,CAAC;QAClC,CAAC;IACH,CAAC;IAED,MAAM,UAAU,GAAG,OAAO,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;IACxC,IAAI,UAAU,GAAG,CAAC,EAAE,CAAC;QACnB,MAAM,MAAM,GAAG,OAAO,CAAC,KAAK,CAAC,CAAC,EAAE,UAAU,CAAC,CAAC,WAAW,EAAE,CAAC;QAC1D,MAAM,SAAS,GAAG,OAAO,CAAC,KAAK,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;QAChD,IAAI,iBAAiB,CAAC,GAAG,CAAC,MAAkB,CAAC,EAAE,CAAC;YAC9C,OAAO,MAAM,KAAK,YAAY;gBAC5B,CAAC,CAAC,cAAc,SAAS,EAAE;gBAC3B,CAAC,CAAC,GAAG,MAAM,IAAI,SAAS,EAAE,CAAC;QAC/B,CAAC;IACH,CAAC;IAED,OAAO,GAAG,aAAa,CAAC,OAAO,CAAC,IAAI,OAAO,EAAE,CAAC;AAChD,CAAC;AAED,MAAM,UAAU,aAAa,CAAC,OAAe;IAC3C,OAAO,gBAAgB,CAAC,OAAO,CAAC,CAAC;AACnC,CAAC;AAED,MAAM,UAAU,YAAY,CAAC,OAAe;IAC1C,MAAM,EAAE,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;IACrC,MAAM,CAAC,MAAM,EAAE,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;IAExC,IAAI,MAAM,KAAK,QAAQ,EAAE,CAAC;QACxB,MAAM,CAAC,kBAAkB,EAAE,GAAG,UAAU,CAAC,GAAG,IAAI,CAAC;QACjD,IAAI,CAAC,kBAAkB,IAAI,UAAU,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YACnD,MAAM,IAAI,KAAK,CAAC,4BAA4B,OAAO,EAAE,CAAC,CAAC;QACzD,CAAC;QACD,OAAO;YACL,EAAE;YACF,QAAQ,EAAE,QAAQ;YAClB,KAAK,EAAE,UAAU,CAAC,IAAI,CAAC,GAAG,CAAC;YAC3B,kBAAkB;SACnB,CAAC;IACJ,CAAC;IAED,IAAI,CAAC,iBAAiB,CAAC,GAAG,CAAC,MAAkB,CAAC,IAAI,IAAI,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;QACpE,MAAM,IAAI,KAAK,CAAC,qBAAqB,OAAO,EAAE,CAAC,CAAC;IAClD,CAAC;IAED,OAAO;QACL,EAAE;QACF,QAAQ,EAAE,MAAkB;QAC5B,KAAK,EAAE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC;KACtB,CAAC;AACJ,CAAC;AAED,SAAS,qBAAqB,CAAC,kBAA0B;IACvD,MAAM,SAAS,GAAG,gBAAgB,kBAAkB,CAAC,WAAW,EAAE,CAAC,OAAO,CAAC,aAAa,EAAE,GAAG,CAAC,EAAE,CAAC;IACjG,OAAO;QACL,MAAM,EAAE,OAAO,CAAC,GAAG,CAAC,GAAG,SAAS,UAAU,CAAC;QAC3C,OAAO,EAAE,OAAO,CAAC,GAAG,CAAC,GAAG,SAAS,WAAW,CAAC;KAC9C,CAAC;AACJ,CAAC;AAED,MAAM,UAAU,sBAAsB,CACpC,MAAuB,EACvB,IAAe,EACf,YAAwD,EAAE;IAE1D,MAAM,MAAM,GAAG,YAAY,CAAC,SAAS,CAAC,OAAO,IAAI,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,iBAAiB,CAAC,IAAI,CAAC,CAAC,CAAC;IACjG,MAAM,sBAAsB,GAAG,MAAM,CAAC,SAAS,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,EAAE,CAAC;IACzE,MAAM,oBAAoB,GAAG,MAAM,CAAC,kBAAkB;QACpD,CAAC,CAAC,qBAAqB,CAAC,MAAM,CAAC,kBAAkB,CAAC;QAClD,CAAC,CAAC,EAAE,CAAC;IAEP,OAAO;QACL,GAAG,MAAM;QACT,IAAI;QACJ,WAAW,EAAE,SAAS,CAAC,WAAW,IAAI,MAAM,CAAC,YAAY,CAAC,IAAI,CAAC,IAAI,oBAAoB,CAAC,IAAI,CAAC;QAC7F,MAAM,EAAE,oBAAoB,CAAC,MAAM,IAAI,sBAAsB,CAAC,MAAM;QACpE,OAAO,EAAE,oBAAoB,CAAC,OAAO,IAAI,sBAAsB,CAAC,OAAO;KACxE,CAAC;AACJ,CAAC;AAED,MAAM,UAAU,kBAAkB,CAAC,KAAc,EAAE,QAAmB;IACpE,IAAI,CAAC,KAAK,EAAE,CAAC;QACX,OAAO,SAAS,CAAC;IACnB,CAAC;IACD,IAAI,QAAQ,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC,EAAE,CAAC;QAC7D,OAAO,gBAAgB,CAAC,GAAG,QAAQ,IAAI,KAAK,EAAE,CAAC,CAAC;IAClD,CAAC;IACD,OAAO,gBAAgB,CAAC,KAAK,CAAC,CAAC;AACjC,CAAC"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
export interface ShellExecResult {
|
|
2
|
+
success: boolean;
|
|
3
|
+
stdout: string;
|
|
4
|
+
stderr: string;
|
|
5
|
+
exitCode: number;
|
|
6
|
+
summary: string;
|
|
7
|
+
}
|
|
8
|
+
export declare class AgentVMManager {
|
|
9
|
+
private readonly workspacePath;
|
|
10
|
+
private readonly network;
|
|
11
|
+
private vm;
|
|
12
|
+
readonly kind: "sandbox";
|
|
13
|
+
constructor(workspacePath: string, network: boolean);
|
|
14
|
+
exec(command: string, signal?: AbortSignal): Promise<ShellExecResult>;
|
|
15
|
+
dispose(): Promise<void>;
|
|
16
|
+
private getVM;
|
|
17
|
+
}
|
|
18
|
+
//# sourceMappingURL=agentvm-manager.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"agentvm-manager.d.ts","sourceRoot":"","sources":["../../../src/system/runtime/agentvm-manager.ts"],"names":[],"mappings":"AAIA,MAAM,WAAW,eAAe;IAC9B,OAAO,EAAE,OAAO,CAAC;IACjB,MAAM,EAAE,MAAM,CAAC;IACf,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,MAAM,CAAC;CACjB;AAED,qBAAa,cAAc;IAKvB,OAAO,CAAC,QAAQ,CAAC,aAAa;IAC9B,OAAO,CAAC,QAAQ,CAAC,OAAO;IAL1B,OAAO,CAAC,EAAE,CAAwB;IAClC,QAAQ,CAAC,IAAI,EAAG,SAAS,CAAU;gBAGhB,aAAa,EAAE,MAAM,EACrB,OAAO,EAAE,OAAO;IAG7B,IAAI,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,WAAW,GAAG,OAAO,CAAC,eAAe,CAAC;IAgErE,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;YAOhB,KAAK;CAepB"}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
let AgentVMClass = null;
|
|
2
|
+
export class AgentVMManager {
|
|
3
|
+
workspacePath;
|
|
4
|
+
network;
|
|
5
|
+
vm = null;
|
|
6
|
+
kind = 'sandbox';
|
|
7
|
+
constructor(workspacePath, network) {
|
|
8
|
+
this.workspacePath = workspacePath;
|
|
9
|
+
this.network = network;
|
|
10
|
+
}
|
|
11
|
+
async exec(command, signal) {
|
|
12
|
+
if (signal?.aborted) {
|
|
13
|
+
throw abortError(signal.reason);
|
|
14
|
+
}
|
|
15
|
+
const vm = await this.getVM();
|
|
16
|
+
const result = signal
|
|
17
|
+
? await new Promise((resolve, reject) => {
|
|
18
|
+
let settled = false;
|
|
19
|
+
const finishResolve = (value) => {
|
|
20
|
+
if (settled) {
|
|
21
|
+
return;
|
|
22
|
+
}
|
|
23
|
+
settled = true;
|
|
24
|
+
cleanup();
|
|
25
|
+
resolve(value);
|
|
26
|
+
};
|
|
27
|
+
const finishReject = (error) => {
|
|
28
|
+
if (settled) {
|
|
29
|
+
return;
|
|
30
|
+
}
|
|
31
|
+
settled = true;
|
|
32
|
+
cleanup();
|
|
33
|
+
reject(error);
|
|
34
|
+
};
|
|
35
|
+
const cleanup = () => {
|
|
36
|
+
signal.removeEventListener('abort', onAbort);
|
|
37
|
+
};
|
|
38
|
+
const onAbort = () => {
|
|
39
|
+
void this.dispose().catch(() => { });
|
|
40
|
+
finishReject(abortError(signal.reason));
|
|
41
|
+
};
|
|
42
|
+
signal.addEventListener('abort', onAbort, { once: true });
|
|
43
|
+
void vm.exec(command)
|
|
44
|
+
.then((value) => {
|
|
45
|
+
if (signal.aborted) {
|
|
46
|
+
finishReject(abortError(signal.reason));
|
|
47
|
+
return;
|
|
48
|
+
}
|
|
49
|
+
finishResolve(value);
|
|
50
|
+
})
|
|
51
|
+
.catch((error) => {
|
|
52
|
+
if (signal.aborted) {
|
|
53
|
+
finishReject(abortError(signal.reason));
|
|
54
|
+
return;
|
|
55
|
+
}
|
|
56
|
+
finishReject(error);
|
|
57
|
+
});
|
|
58
|
+
})
|
|
59
|
+
: await vm.exec(command);
|
|
60
|
+
const stdout = result.stdout ?? '';
|
|
61
|
+
const stderr = result.stderr ?? '';
|
|
62
|
+
const exitCode = result.exitCode ?? 0;
|
|
63
|
+
return {
|
|
64
|
+
success: exitCode === 0,
|
|
65
|
+
stdout,
|
|
66
|
+
stderr,
|
|
67
|
+
exitCode,
|
|
68
|
+
summary: exitCode === 0 ? 'Command completed successfully' : (stderr || `Command failed with exit code ${exitCode}`),
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
async dispose() {
|
|
72
|
+
if (this.vm) {
|
|
73
|
+
await this.vm.stop();
|
|
74
|
+
this.vm = null;
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
async getVM() {
|
|
78
|
+
if (!AgentVMClass) {
|
|
79
|
+
const mod = await import('deepclause-agentvm');
|
|
80
|
+
AgentVMClass = mod.AgentVM;
|
|
81
|
+
}
|
|
82
|
+
if (!this.vm) {
|
|
83
|
+
this.vm = new AgentVMClass({
|
|
84
|
+
network: this.network,
|
|
85
|
+
mounts: { '/workspace': this.workspacePath },
|
|
86
|
+
});
|
|
87
|
+
await this.vm.start();
|
|
88
|
+
await this.vm.exec('cd /workspace');
|
|
89
|
+
}
|
|
90
|
+
return this.vm;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
function abortError(reason) {
|
|
94
|
+
return reason instanceof Error
|
|
95
|
+
? reason
|
|
96
|
+
: Object.assign(new Error('This operation was aborted'), { name: 'AbortError' });
|
|
97
|
+
}
|
|
98
|
+
//# sourceMappingURL=agentvm-manager.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"agentvm-manager.js","sourceRoot":"","sources":["../../../src/system/runtime/agentvm-manager.ts"],"names":[],"mappings":"AAEA,IAAI,YAAY,GAA+F,IAAI,CAAC;AAUpH,MAAM,OAAO,cAAc;IAKN;IACA;IALX,EAAE,GAAmB,IAAI,CAAC;IACzB,IAAI,GAAG,SAAkB,CAAC;IAEnC,YACmB,aAAqB,EACrB,OAAgB;QADhB,kBAAa,GAAb,aAAa,CAAQ;QACrB,YAAO,GAAP,OAAO,CAAS;IAChC,CAAC;IAEJ,KAAK,CAAC,IAAI,CAAC,OAAe,EAAE,MAAoB;QAC9C,IAAI,MAAM,EAAE,OAAO,EAAE,CAAC;YACpB,MAAM,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QAClC,CAAC;QAED,MAAM,EAAE,GAAG,MAAM,IAAI,CAAC,KAAK,EAAE,CAAC;QAC9B,MAAM,MAAM,GAAG,MAAM;YACnB,CAAC,CAAC,MAAM,IAAI,OAAO,CAAuC,CAAC,OAAO,EAAE,MAAM,EAAE,EAAE;gBAC1E,IAAI,OAAO,GAAG,KAAK,CAAC;gBACpB,MAAM,aAAa,GAAG,CAAC,KAA2C,EAAE,EAAE;oBACpE,IAAI,OAAO,EAAE,CAAC;wBACZ,OAAO;oBACT,CAAC;oBACD,OAAO,GAAG,IAAI,CAAC;oBACf,OAAO,EAAE,CAAC;oBACV,OAAO,CAAC,KAAK,CAAC,CAAC;gBACjB,CAAC,CAAC;gBACF,MAAM,YAAY,GAAG,CAAC,KAAc,EAAE,EAAE;oBACtC,IAAI,OAAO,EAAE,CAAC;wBACZ,OAAO;oBACT,CAAC;oBACD,OAAO,GAAG,IAAI,CAAC;oBACf,OAAO,EAAE,CAAC;oBACV,MAAM,CAAC,KAAK,CAAC,CAAC;gBAChB,CAAC,CAAC;gBACF,MAAM,OAAO,GAAG,GAAG,EAAE;oBACnB,MAAM,CAAC,mBAAmB,CAAC,OAAO,EAAE,OAAO,CAAC,CAAC;gBAC/C,CAAC,CAAC;gBACF,MAAM,OAAO,GAAG,GAAG,EAAE;oBACnB,KAAK,IAAI,CAAC,OAAO,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,GAAE,CAAC,CAAC,CAAC;oBACpC,YAAY,CAAC,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC;gBAC1C,CAAC,CAAC;gBAEF,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC,CAAC;gBAC1D,KAAK,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC;qBAClB,IAAI,CAAC,CAAC,KAAK,EAAE,EAAE;oBACd,IAAI,MAAM,CAAC,OAAO,EAAE,CAAC;wBACnB,YAAY,CAAC,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC;wBACxC,OAAO;oBACT,CAAC;oBACD,aAAa,CAAC,KAAK,CAAC,CAAC;gBACvB,CAAC,CAAC;qBACD,KAAK,CAAC,CAAC,KAAK,EAAE,EAAE;oBACf,IAAI,MAAM,CAAC,OAAO,EAAE,CAAC;wBACnB,YAAY,CAAC,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC;wBACxC,OAAO;oBACT,CAAC;oBACD,YAAY,CAAC,KAAK,CAAC,CAAC;gBACtB,CAAC,CAAC,CAAC;YACP,CAAC,CAAC;YACJ,CAAC,CAAC,MAAM,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QAE3B,MAAM,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,EAAE,CAAC;QACnC,MAAM,MAAM,GAAG,MAAM,CAAC,MAAM,IAAI,EAAE,CAAC;QACnC,MAAM,QAAQ,GAAG,MAAM,CAAC,QAAQ,IAAI,CAAC,CAAC;QACtC,OAAO;YACL,OAAO,EAAE,QAAQ,KAAK,CAAC;YACvB,MAAM;YACN,MAAM;YACN,QAAQ;YACR,OAAO,EAAE,QAAQ,KAAK,CAAC,CAAC,CAAC,CAAC,gCAAgC,CAAC,CAAC,CAAC,CAAC,MAAM,IAAI,iCAAiC,QAAQ,EAAE,CAAC;SACrH,CAAC;IACJ,CAAC;IAED,KAAK,CAAC,OAAO;QACX,IAAI,IAAI,CAAC,EAAE,EAAE,CAAC;YACZ,MAAM,IAAI,CAAC,EAAE,CAAC,IAAI,EAAE,CAAC;YACrB,IAAI,CAAC,EAAE,GAAG,IAAI,CAAC;QACjB,CAAC;IACH,CAAC;IAEO,KAAK,CAAC,KAAK;QACjB,IAAI,CAAC,YAAY,EAAE,CAAC;YAClB,MAAM,GAAG,GAAG,MAAM,MAAM,CAAC,oBAAoB,CAAC,CAAC;YAC/C,YAAY,GAAG,GAAG,CAAC,OAAO,CAAC;QAC7B,CAAC;QACD,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC;YACb,IAAI,CAAC,EAAE,GAAG,IAAI,YAAa,CAAC;gBAC1B,OAAO,EAAE,IAAI,CAAC,OAAO;gBACrB,MAAM,EAAE,EAAE,YAAY,EAAE,IAAI,CAAC,aAAa,EAAE;aAC7C,CAAC,CAAC;YACH,MAAM,IAAI,CAAC,EAAE,CAAC,KAAK,EAAE,CAAC;YACtB,MAAM,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,eAAe,CAAC,CAAC;QACtC,CAAC;QACD,OAAO,IAAI,CAAC,EAAE,CAAC;IACjB,CAAC;CACF;AAED,SAAS,UAAU,CAAC,MAAe;IACjC,OAAO,MAAM,YAAY,KAAK;QAC5B,CAAC,CAAC,MAAM;QACR,CAAC,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,KAAK,CAAC,4BAA4B,CAAC,EAAE,EAAE,IAAI,EAAE,YAAY,EAAE,CAAC,CAAC;AACrF,CAAC"}
|