pocket-agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +140 -0
- package/dist/adapters/createLLMEvaluator.d.ts +19 -0
- package/dist/adapters/createLLMEvaluator.d.ts.map +1 -0
- package/dist/adapters/createLLMEvaluator.js +150 -0
- package/dist/adapters/createLLMEvaluator.js.map +1 -0
- package/dist/adapters/models/anthropic.d.ts +15 -0
- package/dist/adapters/models/anthropic.d.ts.map +1 -0
- package/dist/adapters/models/anthropic.js +45 -0
- package/dist/adapters/models/anthropic.js.map +1 -0
- package/dist/adapters/models/gemini.d.ts +16 -0
- package/dist/adapters/models/gemini.d.ts.map +1 -0
- package/dist/adapters/models/gemini.js +58 -0
- package/dist/adapters/models/gemini.js.map +1 -0
- package/dist/adapters/models/lmStudioStreaming.d.ts +21 -0
- package/dist/adapters/models/lmStudioStreaming.d.ts.map +1 -0
- package/dist/adapters/models/lmStudioStreaming.js +90 -0
- package/dist/adapters/models/lmStudioStreaming.js.map +1 -0
- package/dist/adapters/models/lmstudio.d.ts +20 -0
- package/dist/adapters/models/lmstudio.d.ts.map +1 -0
- package/dist/adapters/models/lmstudio.js +59 -0
- package/dist/adapters/models/lmstudio.js.map +1 -0
- package/dist/adapters/models/ollama.d.ts +22 -0
- package/dist/adapters/models/ollama.d.ts.map +1 -0
- package/dist/adapters/models/ollama.js +61 -0
- package/dist/adapters/models/ollama.js.map +1 -0
- package/dist/adapters/models/ollamaStreaming.d.ts +30 -0
- package/dist/adapters/models/ollamaStreaming.d.ts.map +1 -0
- package/dist/adapters/models/ollamaStreaming.js +88 -0
- package/dist/adapters/models/ollamaStreaming.js.map +1 -0
- package/dist/adapters/models/openai.d.ts +17 -0
- package/dist/adapters/models/openai.d.ts.map +1 -0
- package/dist/adapters/models/openai.js +50 -0
- package/dist/adapters/models/openai.js.map +1 -0
- package/dist/defaults/defaultExecutor.d.ts +20 -0
- package/dist/defaults/defaultExecutor.d.ts.map +1 -0
- package/dist/defaults/defaultExecutor.js +67 -0
- package/dist/defaults/defaultExecutor.js.map +1 -0
- package/dist/defaults/llmPlanner.d.ts +20 -0
- package/dist/defaults/llmPlanner.d.ts.map +1 -0
- package/dist/defaults/llmPlanner.js +151 -0
- package/dist/defaults/llmPlanner.js.map +1 -0
- package/dist/defaults/planBuilder.d.ts +48 -0
- package/dist/defaults/planBuilder.d.ts.map +1 -0
- package/dist/defaults/planBuilder.js +105 -0
- package/dist/defaults/planBuilder.js.map +1 -0
- package/dist/defaults/singleStepPlanner.d.ts +17 -0
- package/dist/defaults/singleStepPlanner.d.ts.map +1 -0
- package/dist/defaults/singleStepPlanner.js +52 -0
- package/dist/defaults/singleStepPlanner.js.map +1 -0
- package/dist/evaluator/StepEvaluator.d.ts +9 -0
- package/dist/evaluator/StepEvaluator.d.ts.map +1 -0
- package/dist/evaluator/StepEvaluator.js +5 -0
- package/dist/evaluator/StepEvaluator.js.map +1 -0
- package/dist/events/EventBus.d.ts +13 -0
- package/dist/events/EventBus.d.ts.map +1 -0
- package/dist/events/EventBus.js +34 -0
- package/dist/events/EventBus.js.map +1 -0
- package/dist/executor/StepExecutor.d.ts +9 -0
- package/dist/executor/StepExecutor.d.ts.map +1 -0
- package/dist/executor/StepExecutor.js +5 -0
- package/dist/executor/StepExecutor.js.map +1 -0
- package/dist/index.d.ts +48 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +22 -0
- package/dist/index.js.map +1 -0
- package/dist/models/ModelAdapter.d.ts +5 -0
- package/dist/models/ModelAdapter.d.ts.map +1 -0
- package/dist/models/ModelAdapter.js +5 -0
- package/dist/models/ModelAdapter.js.map +1 -0
- package/dist/planner/PlanValidator.d.ts +16 -0
- package/dist/planner/PlanValidator.d.ts.map +1 -0
- package/dist/planner/PlanValidator.js +44 -0
- package/dist/planner/PlanValidator.js.map +1 -0
- package/dist/planner/Planner.d.ts +10 -0
- package/dist/planner/Planner.d.ts.map +1 -0
- package/dist/planner/Planner.js +5 -0
- package/dist/planner/Planner.js.map +1 -0
- package/dist/quickStart.d.ts +49 -0
- package/dist/quickStart.d.ts.map +1 -0
- package/dist/quickStart.js +77 -0
- package/dist/quickStart.js.map +1 -0
- package/dist/runner/AgentRunner.d.ts +29 -0
- package/dist/runner/AgentRunner.d.ts.map +1 -0
- package/dist/runner/AgentRunner.js +223 -0
- package/dist/runner/AgentRunner.js.map +1 -0
- package/dist/runner/RunStateManager.d.ts +21 -0
- package/dist/runner/RunStateManager.d.ts.map +1 -0
- package/dist/runner/RunStateManager.js +96 -0
- package/dist/runner/RunStateManager.js.map +1 -0
- package/dist/runner/RunningExecution.d.ts +14 -0
- package/dist/runner/RunningExecution.d.ts.map +1 -0
- package/dist/runner/RunningExecution.js +5 -0
- package/dist/runner/RunningExecution.js.map +1 -0
- package/dist/runner/StepScheduler.d.ts +12 -0
- package/dist/runner/StepScheduler.d.ts.map +1 -0
- package/dist/runner/StepScheduler.js +42 -0
- package/dist/runner/StepScheduler.js.map +1 -0
- package/dist/runner/createAgentRunner.d.ts +32 -0
- package/dist/runner/createAgentRunner.d.ts.map +1 -0
- package/dist/runner/createAgentRunner.js +57 -0
- package/dist/runner/createAgentRunner.js.map +1 -0
- package/dist/runner/resolveInputs.d.ts +7 -0
- package/dist/runner/resolveInputs.d.ts.map +1 -0
- package/dist/runner/resolveInputs.js +28 -0
- package/dist/runner/resolveInputs.js.map +1 -0
- package/dist/tools/ToolAdapter.d.ts +5 -0
- package/dist/tools/ToolAdapter.d.ts.map +1 -0
- package/dist/tools/ToolAdapter.js +5 -0
- package/dist/tools/ToolAdapter.js.map +1 -0
- package/dist/types/errors.d.ts +28 -0
- package/dist/types/errors.d.ts.map +1 -0
- package/dist/types/errors.js +37 -0
- package/dist/types/errors.js.map +1 -0
- package/dist/types/evaluator.d.ts +14 -0
- package/dist/types/evaluator.d.ts.map +1 -0
- package/dist/types/evaluator.js +5 -0
- package/dist/types/evaluator.js.map +1 -0
- package/dist/types/events.d.ts +79 -0
- package/dist/types/events.d.ts.map +1 -0
- package/dist/types/events.js +17 -0
- package/dist/types/events.js.map +1 -0
- package/dist/types/executor.d.ts +18 -0
- package/dist/types/executor.d.ts.map +1 -0
- package/dist/types/executor.js +5 -0
- package/dist/types/executor.js.map +1 -0
- package/dist/types/models.d.ts +18 -0
- package/dist/types/models.d.ts.map +1 -0
- package/dist/types/models.js +6 -0
- package/dist/types/models.js.map +1 -0
- package/dist/types/plan.d.ts +37 -0
- package/dist/types/plan.d.ts.map +1 -0
- package/dist/types/plan.js +5 -0
- package/dist/types/plan.js.map +1 -0
- package/dist/types/planner.d.ts +24 -0
- package/dist/types/planner.d.ts.map +1 -0
- package/dist/types/planner.js +5 -0
- package/dist/types/planner.js.map +1 -0
- package/dist/types/run.d.ts +48 -0
- package/dist/types/run.d.ts.map +1 -0
- package/dist/types/run.js +14 -0
- package/dist/types/run.js.map +1 -0
- package/dist/types/step.d.ts +52 -0
- package/dist/types/step.d.ts.map +1 -0
- package/dist/types/step.js +5 -0
- package/dist/types/step.js.map +1 -0
- package/dist/types/tools.d.ts +21 -0
- package/dist/types/tools.d.ts.map +1 -0
- package/dist/types/tools.js +5 -0
- package/dist/types/tools.js.map +1 -0
- package/dist/utils/dag.d.ts +18 -0
- package/dist/utils/dag.d.ts.map +1 -0
- package/dist/utils/dag.js +77 -0
- package/dist/utils/dag.js.map +1 -0
- package/dist/utils/ids.d.ts +6 -0
- package/dist/utils/ids.d.ts.map +1 -0
- package/dist/utils/ids.js +11 -0
- package/dist/utils/ids.js.map +1 -0
- package/dist/utils/schema.d.ts +15 -0
- package/dist/utils/schema.d.ts.map +1 -0
- package/dist/utils/schema.js +11 -0
- package/dist/utils/schema.js.map +1 -0
- package/dist/utils/time.d.ts +6 -0
- package/dist/utils/time.d.ts.map +1 -0
- package/dist/utils/time.js +10 -0
- package/dist/utils/time.js.map +1 -0
- package/package.json +50 -0
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LM Studio model adapter. Supports OpenAI-compatible API and optional native
|
|
3
|
+
* streaming (POST /api/v1/chat with stream: true) to separate reasoning from message.
|
|
4
|
+
*/
|
|
5
|
+
import { createOpenAIModelAdapter } from "./openai.js";
|
|
6
|
+
import { lmStudioStreamingChat, getLMStudioBaseUrl } from "./lmStudioStreaming.js";
|
|
7
|
+
const defaultBaseURL = "http://localhost:1234/v1";
|
|
8
|
+
const defaultModel = "local";
|
|
9
|
+
const defaultConfig = () => ({
|
|
10
|
+
baseURL: process.env.OPENAI_BASE_URL ?? process.env.OPENAI_API_URL ?? defaultBaseURL,
|
|
11
|
+
model: process.env.OPENAI_MODEL ?? process.env.MODEL ?? defaultModel,
|
|
12
|
+
maxTokens: process.env.OPENAI_MAX_TOKENS
|
|
13
|
+
? parseInt(process.env.OPENAI_MAX_TOKENS, 10)
|
|
14
|
+
: 4096,
|
|
15
|
+
useNativeStreaming: process.env.USE_LM_STUDIO_STREAMING === "1" ||
|
|
16
|
+
process.env.USE_LM_STUDIO_STREAMING === "true",
|
|
17
|
+
apiKey: process.env.OPENAI_API_KEY ?? "local",
|
|
18
|
+
});
|
|
19
|
+
/**
|
|
20
|
+
* Creates a ModelAdapter for LM Studio. When useNativeStreaming is true, generate()
|
|
21
|
+
* uses LM Studio's native streaming API (reasoning and message separated).
|
|
22
|
+
* Otherwise uses the OpenAI-compatible endpoint. Install the `openai` package for non-streaming.
|
|
23
|
+
*/
|
|
24
|
+
export function createLmStudioModelAdapter(config = {}) {
|
|
25
|
+
const opts = { ...defaultConfig(), ...config };
|
|
26
|
+
const useStreaming = opts.useNativeStreaming === true;
|
|
27
|
+
const baseUrl = opts.baseURL ?? defaultBaseURL;
|
|
28
|
+
const lmBase = getLMStudioBaseUrl(baseUrl);
|
|
29
|
+
if (useStreaming && lmBase) {
|
|
30
|
+
return {
|
|
31
|
+
async generate(input) {
|
|
32
|
+
const prompt = typeof input.prompt === "string"
|
|
33
|
+
? input.prompt
|
|
34
|
+
: Array.isArray(input.messages) && input.messages.length
|
|
35
|
+
? input.messages
|
|
36
|
+
.map((m) => `${m.role}: ${m.content}`)
|
|
37
|
+
.join("\n")
|
|
38
|
+
: "";
|
|
39
|
+
const maxTokens = Number.isFinite(opts.maxTokens) && opts.maxTokens > 0 ? opts.maxTokens : 4096;
|
|
40
|
+
const result = await lmStudioStreamingChat({
|
|
41
|
+
baseUrl: lmBase,
|
|
42
|
+
model: opts.model ?? defaultModel,
|
|
43
|
+
userMessage: prompt,
|
|
44
|
+
maxOutputTokens: maxTokens,
|
|
45
|
+
apiKey: opts.apiKey,
|
|
46
|
+
});
|
|
47
|
+
return { content: result.content, raw: { reasoning: result.reasoning } };
|
|
48
|
+
},
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
const openaiConfig = {
|
|
52
|
+
apiKey: opts.apiKey ?? "local",
|
|
53
|
+
baseURL: baseUrl,
|
|
54
|
+
model: opts.model ?? defaultModel,
|
|
55
|
+
maxTokens: opts.maxTokens,
|
|
56
|
+
};
|
|
57
|
+
return createOpenAIModelAdapter(openaiConfig);
|
|
58
|
+
}
|
|
59
|
+
//# sourceMappingURL=lmstudio.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"lmstudio.js","sourceRoot":"","sources":["../../../src/adapters/models/lmstudio.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAIH,OAAO,EAAE,wBAAwB,EAAE,MAAM,aAAa,CAAC;AAEvD,OAAO,EAAE,qBAAqB,EAAE,kBAAkB,EAAE,MAAM,wBAAwB,CAAC;AAWnF,MAAM,cAAc,GAAG,0BAA0B,CAAC;AAClD,MAAM,YAAY,GAAG,OAAO,CAAC;AAE7B,MAAM,aAAa,GAAG,GAAwB,EAAE,CAAC,CAAC;IAChD,OAAO,EAAE,OAAO,CAAC,GAAG,CAAC,eAAe,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,cAAc;IACpF,KAAK,EAAE,OAAO,CAAC,GAAG,CAAC,YAAY,IAAI,OAAO,CAAC,GAAG,CAAC,KAAK,IAAI,YAAY;IACpE,SAAS,EAAE,OAAO,CAAC,GAAG,CAAC,iBAAiB;QACtC,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,iBAAiB,EAAE,EAAE,CAAC;QAC7C,CAAC,CAAC,IAAI;IACR,kBAAkB,EAChB,OAAO,CAAC,GAAG,CAAC,uBAAuB,KAAK,GAAG;QAC3C,OAAO,CAAC,GAAG,CAAC,uBAAuB,KAAK,MAAM;IAChD,MAAM,EAAE,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,OAAO;CAC9C,CAAC,CAAC;AAEH;;;;GAIG;AACH,MAAM,UAAU,0BAA0B,CAAC,SAA8B,EAAE;IACzE,MAAM,IAAI,GAAG,EAAE,GAAG,aAAa,EAAE,EAAE,GAAG,MAAM,EAAE,CAAC;IAC/C,MAAM,YAAY,GAAG,IAAI,CAAC,kBAAkB,KAAK,IAAI,CAAC;IACtD,MAAM,OAAO,GAAG,IAAI,CAAC,OAAO,IAAI,cAAc,CAAC;IAC/C,MAAM,MAAM,GAAG,kBAAkB,CAAC,OAAO,CAAC,CAAC;IAE3C,IAAI,YAAY,IAAI,MAAM,EAAE,CAAC;QAC3B,OAAO;YACL,KAAK,CAAC,QAAQ,CAAC,KAAyB;gBACtC,MAAM,MAAM,GACV,OAAO,KAAK,CAAC,MAAM,KAAK,QAAQ;oBAC9B,CAAC,CAAC,KAAK,CAAC,MAAM;oBACd,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,KAAK,CAAC,QAAQ,CAAC,MAAM;wBACtD,CAAC,CAAE,KAAK,CAAC,QAAgD;6BACpD,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,OAAO,EAAE,CAAC;6BACrC,IAAI,CAAC,IAAI,CAAC;wBACf,CAAC,CAAC,EAAE,CAAC;gBACX,MAAM,SAAS,GACb,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,SAAS,CAAC,IAAI,IAAI,CAAC,SAAU,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,SAAU,CAAC,CAAC,CAAC,IAAI,CAAC;gBAClF,MAAM,MAAM,GAAG,MAAM,qBAAqB,CAAC;oBACzC,OAAO,EAAE,MAAM;oBACf,KAAK,EAAE,IAAI,CAAC,KAAK,IAAI,YAAY;oBACjC,WAAW,EAAE,MAAM;oBACnB,eAAe,EAAE,SAAS;oBAC1B,MAAM,EAAE,IAAI,CAAC,MAAM;iBACpB,CAAC,CAAC;gBACH,OAAO,EAAE,OAAO,EAAE,MAAM,CAAC,OAAO,EAAE,GAAG,EAAE,EAAE,SAAS,EAAE,MAAM,CAAC,SAAS,EAAE,EAAE,CAAC;YAC3E,CAAC;SACF,CAAC;IACJ,CAAC;IAED,MAAM,YAAY,GAAsB;QACtC,MAAM,EAAE,IAAI,CAAC,MAAM,IAAI,OAAO;QAC9B,OAAO,EAAE,OAAO;QAChB,KAAK,EAAE,IAAI,CAAC,KAAK,IAAI,YAAY;QACjC,SAAS,EAAE,IAAI,CAAC,SAAS;KAC1B,CAAC;IACF,OAAO,wBAAwB,CAAC,YAAY,CAAC,CAAC;AAChD,CAAC"}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama model adapter. Uses OpenAI-compatible API at http://localhost:11434/v1 by default.
|
|
3
|
+
* Optional native streaming (POST /api/chat with stream: true) for content + thinking.
|
|
4
|
+
* No API key required for local Ollama.
|
|
5
|
+
* @see https://docs.ollama.com/capabilities/streaming
|
|
6
|
+
*/
|
|
7
|
+
import type { ModelAdapter } from "../../types/models.js";
|
|
8
|
+
export interface OllamaModelConfig {
|
|
9
|
+
baseURL?: string;
|
|
10
|
+
model?: string;
|
|
11
|
+
maxTokens?: number;
|
|
12
|
+
/** Use native Ollama /api/chat streaming (content + thinking). Default from OLLAMA_USE_STREAMING env. */
|
|
13
|
+
useNativeStreaming?: boolean;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Creates a ModelAdapter for Ollama. When useNativeStreaming is true, generate() uses
|
|
17
|
+
* native /api/chat streaming (content and optional thinking). Otherwise uses the
|
|
18
|
+
* OpenAI-compatible endpoint. Uses OPENAI_BASE_URL (default http://localhost:11434/v1),
|
|
19
|
+
* OPENAI_MODEL (default llama3). Install the `openai` package for non-streaming.
|
|
20
|
+
*/
|
|
21
|
+
export declare function createOllamaModelAdapter(config?: OllamaModelConfig): ModelAdapter;
|
|
22
|
+
//# sourceMappingURL=ollama.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ollama.d.ts","sourceRoot":"","sources":["../../../src/adapters/models/ollama.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAM1D,MAAM,WAAW,iBAAiB;IAChC,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,yGAAyG;IACzG,kBAAkB,CAAC,EAAE,OAAO,CAAC;CAC9B;AAgBD;;;;;GAKG;AACH,wBAAgB,wBAAwB,CAAC,MAAM,GAAE,iBAAsB,GAAG,YAAY,CAqCrF"}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama model adapter. Uses OpenAI-compatible API at http://localhost:11434/v1 by default.
|
|
3
|
+
* Optional native streaming (POST /api/chat with stream: true) for content + thinking.
|
|
4
|
+
* No API key required for local Ollama.
|
|
5
|
+
* @see https://docs.ollama.com/capabilities/streaming
|
|
6
|
+
*/
|
|
7
|
+
import { createOpenAIModelAdapter } from "./openai.js";
|
|
8
|
+
import { ollamaStreamingChat, getOllamaBaseUrl } from "./ollamaStreaming.js";
|
|
9
|
+
const defaultBaseURL = "http://localhost:11434/v1";
|
|
10
|
+
const defaultModel = "llama3";
|
|
11
|
+
const defaultConfig = () => ({
|
|
12
|
+
apiKey: "local",
|
|
13
|
+
baseURL: process.env.OPENAI_BASE_URL ?? process.env.OPENAI_API_URL ?? defaultBaseURL,
|
|
14
|
+
model: process.env.OPENAI_MODEL ?? process.env.MODEL ?? defaultModel,
|
|
15
|
+
maxTokens: process.env.OPENAI_MAX_TOKENS
|
|
16
|
+
? parseInt(process.env.OPENAI_MAX_TOKENS, 10)
|
|
17
|
+
: 4096,
|
|
18
|
+
useNativeStreaming: process.env.OLLAMA_USE_STREAMING === "1" || process.env.OLLAMA_USE_STREAMING === "true",
|
|
19
|
+
});
|
|
20
|
+
/**
|
|
21
|
+
* Creates a ModelAdapter for Ollama. When useNativeStreaming is true, generate() uses
|
|
22
|
+
* native /api/chat streaming (content and optional thinking). Otherwise uses the
|
|
23
|
+
* OpenAI-compatible endpoint. Uses OPENAI_BASE_URL (default http://localhost:11434/v1),
|
|
24
|
+
* OPENAI_MODEL (default llama3). Install the `openai` package for non-streaming.
|
|
25
|
+
*/
|
|
26
|
+
export function createOllamaModelAdapter(config = {}) {
|
|
27
|
+
const opts = { ...defaultConfig(), ...config };
|
|
28
|
+
const useStreaming = opts.useNativeStreaming === true;
|
|
29
|
+
const baseURL = opts.baseURL ?? defaultBaseURL;
|
|
30
|
+
const ollamaBase = getOllamaBaseUrl(baseURL);
|
|
31
|
+
if (useStreaming) {
|
|
32
|
+
return {
|
|
33
|
+
async generate(input) {
|
|
34
|
+
const message = typeof input.prompt === "string"
|
|
35
|
+
? input.prompt
|
|
36
|
+
: Array.isArray(input.messages) && input.messages.length
|
|
37
|
+
? input.messages
|
|
38
|
+
.map((m) => `${m.role}: ${m.content}`)
|
|
39
|
+
.join("\n")
|
|
40
|
+
: "";
|
|
41
|
+
const result = await ollamaStreamingChat({
|
|
42
|
+
baseUrl: ollamaBase,
|
|
43
|
+
model: opts.model ?? defaultModel,
|
|
44
|
+
message,
|
|
45
|
+
});
|
|
46
|
+
return {
|
|
47
|
+
content: result.content,
|
|
48
|
+
raw: result.reasoning != null ? { reasoning: result.reasoning } : undefined,
|
|
49
|
+
};
|
|
50
|
+
},
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
const openaiOpts = {
|
|
54
|
+
apiKey: "local",
|
|
55
|
+
baseURL,
|
|
56
|
+
model: opts.model ?? defaultModel,
|
|
57
|
+
maxTokens: opts.maxTokens,
|
|
58
|
+
};
|
|
59
|
+
return createOpenAIModelAdapter(openaiOpts);
|
|
60
|
+
}
|
|
61
|
+
//# sourceMappingURL=ollama.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ollama.js","sourceRoot":"","sources":["../../../src/adapters/models/ollama.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAIH,OAAO,EAAE,wBAAwB,EAAE,MAAM,aAAa,CAAC;AAEvD,OAAO,EAAE,mBAAmB,EAAE,gBAAgB,EAAE,MAAM,sBAAsB,CAAC;AAU7E,MAAM,cAAc,GAAG,2BAA2B,CAAC;AACnD,MAAM,YAAY,GAAG,QAAQ,CAAC;AAE9B,MAAM,aAAa,GAAG,GAAyD,EAAE,CAAC,CAAC;IACjF,MAAM,EAAE,OAAO;IACf,OAAO,EAAE,OAAO,CAAC,GAAG,CAAC,eAAe,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc,IAAI,cAAc;IACpF,KAAK,EAAE,OAAO,CAAC,GAAG,CAAC,YAAY,IAAI,OAAO,CAAC,GAAG,CAAC,KAAK,IAAI,YAAY;IACpE,SAAS,EAAE,OAAO,CAAC,GAAG,CAAC,iBAAiB;QACtC,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,iBAAiB,EAAE,EAAE,CAAC;QAC7C,CAAC,CAAC,IAAI;IACR,kBAAkB,EAChB,OAAO,CAAC,GAAG,CAAC,oBAAoB,KAAK,GAAG,IAAI,OAAO,CAAC,GAAG,CAAC,oBAAoB,KAAK,MAAM;CAC1F,CAAC,CAAC;AAEH;;;;;GAKG;AACH,MAAM,UAAU,wBAAwB,CAAC,SAA4B,EAAE;IACrE,MAAM,IAAI,GAAG,EAAE,GAAG,aAAa,EAAE,EAAE,GAAG,MAAM,EAAE,CAAC;IAC/C,MAAM,YAAY,GAAG,IAAI,CAAC,kBAAkB,KAAK,IAAI,CAAC;IACtD,MAAM,OAAO,GAAG,IAAI,CAAC,OAAO,IAAI,cAAc,CAAC;IAC/C,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,CAAC,CAAC;IAE7C,IAAI,YAAY,EAAE,CAAC;QACjB,OAAO;YACL,KAAK,CAAC,QAAQ,CAAC,KAAyB;gBACtC,MAAM,OAAO,GACX,OAAO,KAAK,CAAC,MAAM,KAAK,QAAQ;oBAC9B,CAAC,CAAC,KAAK,CAAC,MAAM;oBACd,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,KAAK,CAAC,QAAQ,CAAC,MAAM;wBACtD,CAAC,CAAE,KAAK,CAAC,QAAgD;6BACpD,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,OAAO,EAAE,CAAC;6BACrC,IAAI,CAAC,IAAI,CAAC;wBACf,CAAC,CAAC,EAAE,CAAC;gBACX,MAAM,MAAM,GAAG,MAAM,mBAAmB,CAAC;oBACvC,OAAO,EAAE,UAAU;oBACnB,KAAK,EAAE,IAAI,CAAC,KAAK,IAAI,YAAY;oBACjC,OAAO;iBACR,CAAC,CAAC;gBACH,OAAO;oBACL,OAAO,EAAE,MAAM,CAAC,OAAO;oBACvB,GAAG,EAAE,MAAM,CAAC,SAAS,IAAI,IAAI,CAAC,CAAC,CAAC,EAAE,SAAS,EAAE,MAAM,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,SAAS;iBAC5E,CAAC;YACJ,CAAC;SACF,CAAC;IACJ,CAAC;IAED,MAAM,UAAU,GAAsB;QACpC,MAAM,EAAE,OAAO;QACf,OAAO;QACP,KAAK,EAAE,IAAI,CAAC,KAAK,IAAI,YAAY;QACjC,SAAS,EAAE,IAAI,CAAC,SAAS;KAC1B,CAAC;IACF,OAAO,wBAAwB,CAAC,UAAU,CAAC,CAAC;AAC9C,CAAC"}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama native streaming: POST /api/chat with stream: true (default).
|
|
3
|
+
* Accumulates content and optional thinking from NDJSON chunks.
|
|
4
|
+
* @see https://docs.ollama.com/capabilities/streaming
|
|
5
|
+
* @see https://docs.ollama.com/api/chat
|
|
6
|
+
*/
|
|
7
|
+
export interface OllamaStreamingOptions {
|
|
8
|
+
/** Base URL of Ollama server (e.g. http://localhost:11434), without /v1 or /api. */
|
|
9
|
+
baseUrl: string;
|
|
10
|
+
model: string;
|
|
11
|
+
/** User message content. */
|
|
12
|
+
message: string;
|
|
13
|
+
/** Optional system prompt. */
|
|
14
|
+
system?: string;
|
|
15
|
+
}
|
|
16
|
+
export interface OllamaStreamingResult {
|
|
17
|
+
content: string;
|
|
18
|
+
/** Present for thinking-capable models; reasoning trace. */
|
|
19
|
+
reasoning?: string;
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Returns the Ollama server base URL (no /v1). Use this when calling native /api/chat.
|
|
23
|
+
*/
|
|
24
|
+
export declare function getOllamaBaseUrl(openAIBaseUrl: string | undefined): string;
|
|
25
|
+
/**
|
|
26
|
+
* Call Ollama /api/chat with stream: true; accumulate content and optional thinking from chunks.
|
|
27
|
+
* Response is application/x-ndjson: each line is a JSON object with message.content, message.thinking?, done.
|
|
28
|
+
*/
|
|
29
|
+
export declare function ollamaStreamingChat(options: OllamaStreamingOptions): Promise<OllamaStreamingResult>;
|
|
30
|
+
//# sourceMappingURL=ollamaStreaming.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ollamaStreaming.d.ts","sourceRoot":"","sources":["../../../src/adapters/models/ollamaStreaming.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAEH,MAAM,WAAW,sBAAsB;IACrC,oFAAoF;IACpF,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,4BAA4B;IAC5B,OAAO,EAAE,MAAM,CAAC;IAChB,8BAA8B;IAC9B,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,qBAAqB;IACpC,OAAO,EAAE,MAAM,CAAC;IAChB,4DAA4D;IAC5D,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,wBAAgB,gBAAgB,CAAC,aAAa,EAAE,MAAM,GAAG,SAAS,GAAG,MAAM,CAI1E;AAED;;;GAGG;AACH,wBAAsB,mBAAmB,CACvC,OAAO,EAAE,sBAAsB,GAC9B,OAAO,CAAC,qBAAqB,CAAC,CAyEhC"}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama native streaming: POST /api/chat with stream: true (default).
|
|
3
|
+
* Accumulates content and optional thinking from NDJSON chunks.
|
|
4
|
+
* @see https://docs.ollama.com/capabilities/streaming
|
|
5
|
+
* @see https://docs.ollama.com/api/chat
|
|
6
|
+
*/
|
|
7
|
+
/**
|
|
8
|
+
* Returns the Ollama server base URL (no /v1). Use this when calling native /api/chat.
|
|
9
|
+
*/
|
|
10
|
+
export function getOllamaBaseUrl(openAIBaseUrl) {
|
|
11
|
+
if (!openAIBaseUrl)
|
|
12
|
+
return "http://localhost:11434";
|
|
13
|
+
const u = openAIBaseUrl.replace(/\/v1\/?$/, "").replace(/\/api\/?$/, "").trim();
|
|
14
|
+
return u || "http://localhost:11434";
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Call Ollama /api/chat with stream: true; accumulate content and optional thinking from chunks.
|
|
18
|
+
* Response is application/x-ndjson: each line is a JSON object with message.content, message.thinking?, done.
|
|
19
|
+
*/
|
|
20
|
+
export async function ollamaStreamingChat(options) {
|
|
21
|
+
const { baseUrl, model, message, system } = options;
|
|
22
|
+
const url = `${baseUrl.replace(/\/$/, "")}/api/chat`;
|
|
23
|
+
const body = {
|
|
24
|
+
model,
|
|
25
|
+
messages: [{ role: "user", content: message }],
|
|
26
|
+
stream: true,
|
|
27
|
+
};
|
|
28
|
+
if (system != null && system !== "") {
|
|
29
|
+
body.messages = [
|
|
30
|
+
{ role: "system", content: system },
|
|
31
|
+
{ role: "user", content: message },
|
|
32
|
+
];
|
|
33
|
+
}
|
|
34
|
+
const res = await fetch(url, {
|
|
35
|
+
method: "POST",
|
|
36
|
+
headers: { "Content-Type": "application/json" },
|
|
37
|
+
body: JSON.stringify(body),
|
|
38
|
+
});
|
|
39
|
+
if (!res.ok || !res.body) {
|
|
40
|
+
throw new Error(`Ollama chat failed: ${res.status} ${res.statusText}`);
|
|
41
|
+
}
|
|
42
|
+
const contentChunks = [];
|
|
43
|
+
const thinkingChunks = [];
|
|
44
|
+
const decoder = new TextDecoder();
|
|
45
|
+
let buffer = "";
|
|
46
|
+
for await (const chunk of res.body) {
|
|
47
|
+
buffer += decoder.decode(chunk, { stream: true });
|
|
48
|
+
const lines = buffer.split("\n");
|
|
49
|
+
buffer = lines.pop() ?? "";
|
|
50
|
+
for (const line of lines) {
|
|
51
|
+
const trimmed = line.trim();
|
|
52
|
+
if (!trimmed)
|
|
53
|
+
continue;
|
|
54
|
+
try {
|
|
55
|
+
const data = JSON.parse(trimmed);
|
|
56
|
+
const msg = data.message;
|
|
57
|
+
if (msg?.thinking != null && typeof msg.thinking === "string") {
|
|
58
|
+
thinkingChunks.push(msg.thinking);
|
|
59
|
+
}
|
|
60
|
+
if (msg?.content != null && typeof msg.content === "string") {
|
|
61
|
+
contentChunks.push(msg.content);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
// skip malformed line
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
if (buffer.trim()) {
|
|
70
|
+
try {
|
|
71
|
+
const data = JSON.parse(buffer.trim());
|
|
72
|
+
const msg = data.message;
|
|
73
|
+
if (msg?.thinking != null && typeof msg.thinking === "string") {
|
|
74
|
+
thinkingChunks.push(msg.thinking);
|
|
75
|
+
}
|
|
76
|
+
if (msg?.content != null && typeof msg.content === "string") {
|
|
77
|
+
contentChunks.push(msg.content);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
catch {
|
|
81
|
+
// ignore
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
const content = contentChunks.join("").trim();
|
|
85
|
+
const reasoning = thinkingChunks.length > 0 ? thinkingChunks.join("") : undefined;
|
|
86
|
+
return { content, reasoning };
|
|
87
|
+
}
|
|
88
|
+
//# sourceMappingURL=ollamaStreaming.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"ollamaStreaming.js","sourceRoot":"","sources":["../../../src/adapters/models/ollamaStreaming.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AAkBH;;GAEG;AACH,MAAM,UAAU,gBAAgB,CAAC,aAAiC;IAChE,IAAI,CAAC,aAAa;QAAE,OAAO,wBAAwB,CAAC;IACpD,MAAM,CAAC,GAAG,aAAa,CAAC,OAAO,CAAC,UAAU,EAAE,EAAE,CAAC,CAAC,OAAO,CAAC,WAAW,EAAE,EAAE,CAAC,CAAC,IAAI,EAAE,CAAC;IAChF,OAAO,CAAC,IAAI,wBAAwB,CAAC;AACvC,CAAC;AAED;;;GAGG;AACH,MAAM,CAAC,KAAK,UAAU,mBAAmB,CACvC,OAA+B;IAE/B,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC;IACpD,MAAM,GAAG,GAAG,GAAG,OAAO,CAAC,OAAO,CAAC,KAAK,EAAE,EAAE,CAAC,WAAW,CAAC;IACrD,MAAM,IAAI,GAA4B;QACpC,KAAK;QACL,QAAQ,EAAE,CAAC,EAAE,IAAI,EAAE,MAAe,EAAE,OAAO,EAAE,OAAO,EAAE,CAAC;QACvD,MAAM,EAAE,IAAI;KACb,CAAC;IACF,IAAI,MAAM,IAAI,IAAI,IAAI,MAAM,KAAK,EAAE,EAAE,CAAC;QACpC,IAAI,CAAC,QAAQ,GAAG;YACd,EAAE,IAAI,EAAE,QAAiB,EAAE,OAAO,EAAE,MAAM,EAAE;YAC5C,EAAE,IAAI,EAAE,MAAe,EAAE,OAAO,EAAE,OAAO,EAAE;SAC5C,CAAC;IACJ,CAAC;IAED,MAAM,GAAG,GAAG,MAAM,KAAK,CAAC,GAAG,EAAE;QAC3B,MAAM,EAAE,MAAM;QACd,OAAO,EAAE,EAAE,cAAc,EAAE,kBAAkB,EAAE;QAC/C,IAAI,EAAE,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC;KAC3B,CAAC,CAAC;IACH,IAAI,CAAC,GAAG,CAAC,EAAE,IAAI,CAAC,GAAG,CAAC,IAAI,EAAE,CAAC;QACzB,MAAM,IAAI,KAAK,CAAC,uBAAuB,GAAG,CAAC,MAAM,IAAI,GAAG,CAAC,UAAU,EAAE,CAAC,CAAC;IACzE,CAAC;IAED,MAAM,aAAa,GAAa,EAAE,CAAC;IACnC,MAAM,cAAc,GAAa,EAAE,CAAC;IACpC,MAAM,OAAO,GAAG,IAAI,WAAW,EAAE,CAAC;IAClC,IAAI,MAAM,GAAG,EAAE,CAAC;IAEhB,IAAI,KAAK,EAAE,MAAM,KAAK,IAAI,GAAG,CAAC,IAAiC,EAAE,CAAC;QAChE,MAAM,IAAI,OAAO,CAAC,MAAM,CAAC,KAAK,EAAE,EAAE,MAAM,EAAE,IAAI,EAAE,CAAC,CAAC;QAClD,MAAM,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,CAAC;QACjC,MAAM,GAAG,KAAK,CAAC,GAAG,EAAE,IAAI,EAAE,CAAC;QAC3B,KAAK,MAAM,IAAI,IAAI,KAAK,EAAE,CAAC;YACzB,MAAM,OAAO,GAAG,IAAI,CAAC,IAAI,EAAE,CAAC;YAC5B,IAAI,CAAC,OAAO;gBAAE,SAAS;YACvB,IAAI,CAAC;gBACH,MAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,OAAO,CAG9B,CAAC;gBACF,MAAM,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC;gBACzB,IAAI,GAAG,EAAE,QAAQ,IAAI,IAAI,IAAI,OAAO,GAAG,CAAC,QAAQ,KAAK,QAAQ,EAAE,CAAC;oBAC9D,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC;gBACpC,CAAC;gBACD,IAAI,GAAG,EAAE,OAAO,IAAI,IAAI,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;oBAC5D,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;gBAClC,CAAC;YACH,CAAC;YAAC,MAAM,CAAC;gBACP,sBAAsB;YACxB,CAAC;QACH,CAAC;IACH,CAAC;IACD,IAAI,MAAM,CAAC,IAAI,EAAE,EAAE,CAAC;QAClB,IAAI,CAAC;YACH,MAAM,IAAI,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,CAAC,IAAI,EAAE,CAEpC,CAAC;YACF,MAAM,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC;YACzB,IAAI,GAAG,EAAE,QAAQ,IAAI,IAAI,IAAI,OAAO,GAAG,CAAC,QAAQ,KAAK,QAAQ,EAAE,CAAC;gBAC9D,cAAc,CAAC,IAAI,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC;YACpC,CAAC;YACD,IAAI,GAAG,EAAE,OAAO,IAAI,IAAI,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBAC5D,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC;YAClC,CAAC;QACH,CAAC;QAAC,MAAM,CAAC;YACP,SAAS;QACX,CAAC;IACH,CAAC;IAED,MAAM,OAAO,GAAG,aAAa,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,IAAI,EAAE,CAAC;IAC9C,MAAM,SAAS,GAAG,cAAc,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,cAAc,CAAC,IAAI,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;IAClF,OAAO,EAAE,OAAO,EAAE,SAAS,EAAE,CAAC;AAChC,CAAC"}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI (and OpenAI-compatible) model adapter. Uses OPENAI_API_KEY, OPENAI_BASE_URL,
|
|
3
|
+
* OPENAI_MODEL, OPENAI_MAX_TOKENS from env when not provided.
|
|
4
|
+
*/
|
|
5
|
+
import type { ModelAdapter } from "../../types/models.js";
|
|
6
|
+
export interface OpenAIModelConfig {
|
|
7
|
+
apiKey?: string;
|
|
8
|
+
baseURL?: string;
|
|
9
|
+
model?: string;
|
|
10
|
+
maxTokens?: number;
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Creates a ModelAdapter for OpenAI or any OpenAI-compatible API (Ollama, LM Studio, etc.).
|
|
14
|
+
* Install the `openai` package: npm install openai
|
|
15
|
+
*/
|
|
16
|
+
export declare function createOpenAIModelAdapter(config?: OpenAIModelConfig): ModelAdapter;
|
|
17
|
+
//# sourceMappingURL=openai.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../../../src/adapters/models/openai.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAE1D,MAAM,WAAW,iBAAiB;IAChC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAWD;;;GAGG;AACH,wBAAgB,wBAAwB,CAAC,MAAM,GAAE,iBAAsB,GAAG,YAAY,CAsCrF"}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI (and OpenAI-compatible) model adapter. Uses OPENAI_API_KEY, OPENAI_BASE_URL,
|
|
3
|
+
* OPENAI_MODEL, OPENAI_MAX_TOKENS from env when not provided.
|
|
4
|
+
*/
|
|
5
|
+
const defaultConfig = () => ({
|
|
6
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
7
|
+
baseURL: process.env.OPENAI_BASE_URL ?? process.env.OPENAI_API_URL,
|
|
8
|
+
model: process.env.OPENAI_MODEL ?? process.env.MODEL ?? "gpt-4o-mini",
|
|
9
|
+
maxTokens: process.env.OPENAI_MAX_TOKENS
|
|
10
|
+
? parseInt(process.env.OPENAI_MAX_TOKENS, 10)
|
|
11
|
+
: 4096,
|
|
12
|
+
});
|
|
13
|
+
/**
|
|
14
|
+
* Creates a ModelAdapter for OpenAI or any OpenAI-compatible API (Ollama, LM Studio, etc.).
|
|
15
|
+
* Install the `openai` package: npm install openai
|
|
16
|
+
*/
|
|
17
|
+
export function createOpenAIModelAdapter(config = {}) {
|
|
18
|
+
const opts = { ...defaultConfig(), ...config };
|
|
19
|
+
const apiKey = opts.apiKey ?? (opts.baseURL ? "local" : undefined);
|
|
20
|
+
if (!apiKey && !opts.baseURL) {
|
|
21
|
+
throw new Error("OpenAI adapter requires OPENAI_API_KEY (or baseURL for local endpoints). Set OPENAI_API_KEY in env or pass apiKey in config.");
|
|
22
|
+
}
|
|
23
|
+
return {
|
|
24
|
+
async generate(input) {
|
|
25
|
+
const openai = await import("openai").catch(() => {
|
|
26
|
+
throw new Error("OpenAI adapter requires the 'openai' package. Install it: npm install openai");
|
|
27
|
+
});
|
|
28
|
+
const client = new openai.default({
|
|
29
|
+
apiKey: apiKey ?? "local",
|
|
30
|
+
baseURL: opts.baseURL,
|
|
31
|
+
});
|
|
32
|
+
const prompt = typeof input.prompt === "string"
|
|
33
|
+
? input.prompt
|
|
34
|
+
: Array.isArray(input.messages) && input.messages.length
|
|
35
|
+
? input.messages
|
|
36
|
+
.map((m) => `${m.role}: ${m.content}`)
|
|
37
|
+
.join("\n")
|
|
38
|
+
: "";
|
|
39
|
+
const maxTokens = Number.isFinite(opts.maxTokens) && opts.maxTokens > 0 ? opts.maxTokens : 4096;
|
|
40
|
+
const completion = await client.chat.completions.create({
|
|
41
|
+
model: opts.model ?? "gpt-4o-mini",
|
|
42
|
+
messages: [{ role: "user", content: prompt }],
|
|
43
|
+
max_tokens: maxTokens,
|
|
44
|
+
});
|
|
45
|
+
const content = completion.choices[0]?.message?.content ?? "";
|
|
46
|
+
return { content, raw: completion };
|
|
47
|
+
},
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
//# sourceMappingURL=openai.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openai.js","sourceRoot":"","sources":["../../../src/adapters/models/openai.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAWH,MAAM,aAAa,GAAG,GAAsB,EAAE,CAAC,CAAC;IAC9C,MAAM,EAAE,OAAO,CAAC,GAAG,CAAC,cAAc;IAClC,OAAO,EAAE,OAAO,CAAC,GAAG,CAAC,eAAe,IAAI,OAAO,CAAC,GAAG,CAAC,cAAc;IAClE,KAAK,EAAE,OAAO,CAAC,GAAG,CAAC,YAAY,IAAI,OAAO,CAAC,GAAG,CAAC,KAAK,IAAI,aAAa;IACrE,SAAS,EAAE,OAAO,CAAC,GAAG,CAAC,iBAAiB;QACtC,CAAC,CAAC,QAAQ,CAAC,OAAO,CAAC,GAAG,CAAC,iBAAiB,EAAE,EAAE,CAAC;QAC7C,CAAC,CAAC,IAAI;CACT,CAAC,CAAC;AAEH;;;GAGG;AACH,MAAM,UAAU,wBAAwB,CAAC,SAA4B,EAAE;IACrE,MAAM,IAAI,GAAG,EAAE,GAAG,aAAa,EAAE,EAAE,GAAG,MAAM,EAAE,CAAC;IAC/C,MAAM,MAAM,GAAG,IAAI,CAAC,MAAM,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC;IACnE,IAAI,CAAC,MAAM,IAAI,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC;QAC7B,MAAM,IAAI,KAAK,CACb,8HAA8H,CAC/H,CAAC;IACJ,CAAC;IACD,OAAO;QACL,KAAK,CAAC,QAAQ,CAAC,KAAK;YAClB,MAAM,MAAM,GAAG,MAAM,MAAM,CAAC,QAAQ,CAAC,CAAC,KAAK,CAAC,GAAG,EAAE;gBAC/C,MAAM,IAAI,KAAK,CACb,8EAA8E,CAC/E,CAAC;YACJ,CAAC,CAAC,CAAC;YACH,MAAM,MAAM,GAAG,IAAI,MAAM,CAAC,OAAO,CAAC;gBAChC,MAAM,EAAE,MAAM,IAAI,OAAO;gBACzB,OAAO,EAAE,IAAI,CAAC,OAAO;aACtB,CAAC,CAAC;YACH,MAAM,MAAM,GACV,OAAO,KAAK,CAAC,MAAM,KAAK,QAAQ;gBAC9B,CAAC,CAAC,KAAK,CAAC,MAAM;gBACd,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,KAAK,CAAC,QAAQ,CAAC,MAAM;oBACtD,CAAC,CAAE,KAAK,CAAC,QAAgD;yBACpD,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,GAAG,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,OAAO,EAAE,CAAC;yBACrC,IAAI,CAAC,IAAI,CAAC;oBACf,CAAC,CAAC,EAAE,CAAC;YACX,MAAM,SAAS,GACb,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,SAAS,CAAC,IAAI,IAAI,CAAC,SAAU,GAAG,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC,SAAU,CAAC,CAAC,CAAC,IAAI,CAAC;YAClF,MAAM,UAAU,GAAG,MAAM,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,MAAM,CAAC;gBACtD,KAAK,EAAE,IAAI,CAAC,KAAK,IAAI,aAAa;gBAClC,QAAQ,EAAE,CAAC,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,CAAC;gBAC7C,UAAU,EAAE,SAAS;aACtB,CAAC,CAAC;YACH,MAAM,OAAO,GAAG,UAAU,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,OAAO,EAAE,OAAO,IAAI,EAAE,CAAC;YAC9D,OAAO,EAAE,OAAO,EAAE,GAAG,EAAE,UAAU,EAAE,CAAC;QACtC,CAAC;KACF,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Default executor that runs each step by calling the model with the step objective
|
|
3
|
+
* and resolved inputs. No tool-calling; use for transform-only plans (e.g. quick start).
|
|
4
|
+
*/
|
|
5
|
+
import type { StepExecutor } from "../executor/StepExecutor.js";
|
|
6
|
+
import type { ModelAdapter } from "../types/models.js";
|
|
7
|
+
export interface DefaultExecutorOptions {
|
|
8
|
+
/** Model used to generate step output. Required. */
|
|
9
|
+
model: ModelAdapter;
|
|
10
|
+
/** Optional instruction appended to every prompt (e.g. "Respond concisely."). */
|
|
11
|
+
systemPrompt?: string;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Creates an executor that runs each step by prompting the model with the step's
|
|
15
|
+
* objective and resolved inputs, then mapping the model output to the step's first
|
|
16
|
+
* output key. No tools; use for simple transform steps. For tool-calling steps,
|
|
17
|
+
* use a custom executor or see the run-with-openai example.
|
|
18
|
+
*/
|
|
19
|
+
export declare function createDefaultExecutor(options: DefaultExecutorOptions): StepExecutor;
|
|
20
|
+
//# sourceMappingURL=defaultExecutor.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"defaultExecutor.d.ts","sourceRoot":"","sources":["../../src/defaults/defaultExecutor.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,6BAA6B,CAAC;AAGhE,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AAGvD,MAAM,WAAW,sBAAsB;IACrC,oDAAoD;IACpD,KAAK,EAAE,YAAY,CAAC;IACpB,iFAAiF;IACjF,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB;AAED;;;;;GAKG;AACH,wBAAgB,qBAAqB,CAAC,OAAO,EAAE,sBAAsB,GAAG,YAAY,CAsDnF"}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Default executor that runs each step by calling the model with the step objective
|
|
3
|
+
* and resolved inputs. No tool-calling; use for transform-only plans (e.g. quick start).
|
|
4
|
+
*/
|
|
5
|
+
import { nowISO } from "../utils/time.js";
|
|
6
|
+
/**
|
|
7
|
+
* Creates an executor that runs each step by prompting the model with the step's
|
|
8
|
+
* objective and resolved inputs, then mapping the model output to the step's first
|
|
9
|
+
* output key. No tools; use for simple transform steps. For tool-calling steps,
|
|
10
|
+
* use a custom executor or see the run-with-openai example.
|
|
11
|
+
*/
|
|
12
|
+
export function createDefaultExecutor(options) {
|
|
13
|
+
const { model, systemPrompt } = options;
|
|
14
|
+
return {
|
|
15
|
+
async execute(input) {
|
|
16
|
+
const startedAt = nowISO();
|
|
17
|
+
const outKey = input.step.outputs?.[0] ?? "output";
|
|
18
|
+
if (!model) {
|
|
19
|
+
const completedAt = nowISO();
|
|
20
|
+
return {
|
|
21
|
+
stepId: input.step.id,
|
|
22
|
+
attempt: input.attempt,
|
|
23
|
+
status: "error",
|
|
24
|
+
error: { code: "NO_MODEL", message: "No model adapter provided." },
|
|
25
|
+
startedAt,
|
|
26
|
+
completedAt,
|
|
27
|
+
durationMs: 0,
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
try {
|
|
31
|
+
const contextStr = JSON.stringify(input.resolvedInputs, null, 2);
|
|
32
|
+
const prompt = (systemPrompt ? systemPrompt + "\n\n" : "") +
|
|
33
|
+
`Step: ${input.step.name}\nObjective: ${input.step.objective}\n\nContext from previous steps and run:\n${contextStr}\n\nProduce a single result for the output "${outKey}". Write only the answer content, no preamble.`;
|
|
34
|
+
const out = await model.generate({ prompt });
|
|
35
|
+
const content = (out.content ?? "").trim();
|
|
36
|
+
const completedAt = nowISO();
|
|
37
|
+
const durationMs = new Date(completedAt).getTime() - new Date(startedAt).getTime();
|
|
38
|
+
return {
|
|
39
|
+
stepId: input.step.id,
|
|
40
|
+
attempt: input.attempt,
|
|
41
|
+
status: "success",
|
|
42
|
+
structuredOutput: { [outKey]: content },
|
|
43
|
+
rawOutput: content,
|
|
44
|
+
startedAt,
|
|
45
|
+
completedAt,
|
|
46
|
+
durationMs,
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
catch (err) {
|
|
50
|
+
const completedAt = nowISO();
|
|
51
|
+
return {
|
|
52
|
+
stepId: input.step.id,
|
|
53
|
+
attempt: input.attempt,
|
|
54
|
+
status: "error",
|
|
55
|
+
error: {
|
|
56
|
+
code: "EXEC_ERROR",
|
|
57
|
+
message: err instanceof Error ? err.message : String(err),
|
|
58
|
+
},
|
|
59
|
+
startedAt,
|
|
60
|
+
completedAt,
|
|
61
|
+
durationMs: new Date(completedAt).getTime() - new Date(startedAt).getTime(),
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
},
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
//# sourceMappingURL=defaultExecutor.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"defaultExecutor.js","sourceRoot":"","sources":["../../src/defaults/defaultExecutor.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAMH,OAAO,EAAE,MAAM,EAAE,MAAM,kBAAkB,CAAC;AAS1C;;;;;GAKG;AACH,MAAM,UAAU,qBAAqB,CAAC,OAA+B;IACnE,MAAM,EAAE,KAAK,EAAE,YAAY,EAAE,GAAG,OAAO,CAAC;IACxC,OAAO;QACL,KAAK,CAAC,OAAO,CAAC,KAAyB;YACrC,MAAM,SAAS,GAAG,MAAM,EAAE,CAAC;YAC3B,MAAM,MAAM,GAAG,KAAK,CAAC,IAAI,CAAC,OAAO,EAAE,CAAC,CAAC,CAAC,IAAI,QAAQ,CAAC;YACnD,IAAI,CAAC,KAAK,EAAE,CAAC;gBACX,MAAM,WAAW,GAAG,MAAM,EAAE,CAAC;gBAC7B,OAAO;oBACL,MAAM,EAAE,KAAK,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,EAAE,KAAK,CAAC,OAAO;oBACtB,MAAM,EAAE,OAAO;oBACf,KAAK,EAAE,EAAE,IAAI,EAAE,UAAU,EAAE,OAAO,EAAE,4BAA4B,EAAE;oBAClE,SAAS;oBACT,WAAW;oBACX,UAAU,EAAE,CAAC;iBACd,CAAC;YACJ,CAAC;YACD,IAAI,CAAC;gBACH,MAAM,UAAU,GAAG,IAAI,CAAC,SAAS,CAAC,KAAK,CAAC,cAAc,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC;gBACjE,MAAM,MAAM,GACV,CAAC,YAAY,CAAC,CAAC,CAAC,YAAY,GAAG,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;oBAC3C,SAAS,KAAK,CAAC,IAAI,CAAC,IAAI,gBAAgB,KAAK,CAAC,IAAI,CAAC,SAAS,6CAA6C,UAAU,+CAA+C,MAAM,gDAAgD,CAAC;gBAC3N,MAAM,GAAG,GAAG,MAAM,KAAK,CAAC,QAAQ,CAAC,EAAE,MAAM,EAAE,CAAC,CAAC;gBAC7C,MAAM,OAAO,GAAG,CAAC,GAAG,CAAC,OAAO,IAAI,EAAE,CAAC,CAAC,IAAI,EAAE,CAAC;gBAC3C,MAAM,WAAW,GAAG,MAAM,EAAE,CAAC;gBAC7B,MAAM,UAAU,GAAG,IAAI,IAAI,CAAC,WAAW,CAAC,CAAC,OAAO,EAAE,GAAG,IAAI,IAAI,CAAC,SAAS,CAAC,CAAC,OAAO,EAAE,CAAC;gBACnF,OAAO;oBACL,MAAM,EAAE,KAAK,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,EAAE,KAAK,CAAC,OAAO;oBACtB,MAAM,EAAE,SAAS;oBACjB,gBAAgB,EAAE,EAAE,CAAC,MAAM,CAAC,EAAE,OAAO,EAAE;oBACvC,SAAS,EAAE,OAAO;oBAClB,SAAS;oBACT,WAAW;oBACX,UAAU;iBACX,CAAC;YACJ,CAAC;YAAC,OAAO,GAAG,EAAE,CAAC;gBACb,MAAM,WAAW,GAAG,MAAM,EAAE,CAAC;gBAC7B,OAAO;oBACL,MAAM,EAAE,KAAK,CAAC,IAAI,CAAC,EAAE;oBACrB,OAAO,EAAE,KAAK,CAAC,OAAO;oBACtB,MAAM,EAAE,OAAO;oBACf,KAAK,EAAE;wBACL,IAAI,EAAE,YAAY;wBAClB,OAAO,EAAE,GAAG,YAAY,KAAK,CAAC,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,GAAG,CAAC;qBAC1D;oBACD,SAAS;oBACT,WAAW;oBACX,UAAU,EAAE,IAAI,IAAI,CAAC,WAAW,CAAC,CAAC,OAAO,EAAE,GAAG,IAAI,IAAI,CAAC,SAAS,CAAC,CAAC,OAAO,EAAE;iBAC5E,CAAC;YACJ,CAAC;QACH,CAAC;KACF,CAAC;AACJ,CAAC"}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Planner that uses an LLM to generate steps from the goal and context.
|
|
3
|
+
* Lets you run goal-only (no predefined steps); the model defines the plan.
|
|
4
|
+
*/
|
|
5
|
+
import type { Planner } from "../planner/Planner.js";
|
|
6
|
+
import type { ModelAdapter } from "../types/models.js";
|
|
7
|
+
export interface LLMPlannerOptions {
|
|
8
|
+
/** If true, prompt asks for transform-only steps (no tools). Default true. */
|
|
9
|
+
transformOnly?: boolean;
|
|
10
|
+
/** Max steps to allow. Default 10. */
|
|
11
|
+
maxSteps?: number;
|
|
12
|
+
/** System hint for the plan (e.g. "Keep plans to 2–3 steps."). */
|
|
13
|
+
planHint?: string;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Creates a planner that calls the model to generate a plan from the goal and context.
|
|
17
|
+
* Use for goal-only runs: no steps defined upfront; the LLM proposes the steps.
|
|
18
|
+
*/
|
|
19
|
+
export declare function createLLMPlanner(model: ModelAdapter, options?: LLMPlannerOptions): Planner;
|
|
20
|
+
//# sourceMappingURL=llmPlanner.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"llmPlanner.d.ts","sourceRoot":"","sources":["../../src/defaults/llmPlanner.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAGH,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,uBAAuB,CAAC;AAErD,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AA2DvD,MAAM,WAAW,iBAAiB;IAChC,8EAA8E;IAC9E,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,sCAAsC;IACtC,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,kEAAkE;IAClE,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB;AA8CD;;;GAGG;AACH,wBAAgB,gBAAgB,CAC9B,KAAK,EAAE,YAAY,EACnB,OAAO,GAAE,iBAAsB,GAC9B,OAAO,CA2DT"}
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Planner that uses an LLM to generate steps from the goal and context.
|
|
3
|
+
* Lets you run goal-only (no predefined steps); the model defines the plan.
|
|
4
|
+
*/
|
|
5
|
+
import { buildPlan } from "./planBuilder.js";
|
|
6
|
+
const DEFAULT_RETRY = { maxAttempts: 2, strategy: "retry_with_feedback" };
|
|
7
|
+
function parseStepsFromContent(content) {
|
|
8
|
+
const trimmed = content.trim();
|
|
9
|
+
const block = trimmed.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
10
|
+
const jsonStr = block ? block[1].trim() : trimmed;
|
|
11
|
+
let data;
|
|
12
|
+
try {
|
|
13
|
+
data = JSON.parse(jsonStr);
|
|
14
|
+
}
|
|
15
|
+
catch {
|
|
16
|
+
return [];
|
|
17
|
+
}
|
|
18
|
+
const steps = data.steps;
|
|
19
|
+
if (!Array.isArray(steps) || steps.length === 0)
|
|
20
|
+
return [];
|
|
21
|
+
return steps;
|
|
22
|
+
}
|
|
23
|
+
function specToStepDefinition(spec, index) {
|
|
24
|
+
const id = (spec.id ?? `step_${index}`).replace(/\s+/g, "_").slice(0, 80);
|
|
25
|
+
const name = spec.name ?? id;
|
|
26
|
+
const objective = spec.objective ?? "Complete this step.";
|
|
27
|
+
const outputs = Array.isArray(spec.outputs) && spec.outputs.length > 0 ? spec.outputs : ["output"];
|
|
28
|
+
const dependencies = Array.isArray(spec.dependencies) ? spec.dependencies : [];
|
|
29
|
+
const inputs = [];
|
|
30
|
+
for (const key of spec.inputsFromContext ?? []) {
|
|
31
|
+
inputs.push({ source: "runContext", key });
|
|
32
|
+
}
|
|
33
|
+
for (const { stepId, key } of spec.inputsFromStep ?? []) {
|
|
34
|
+
if (stepId && key)
|
|
35
|
+
inputs.push({ source: "stepOutput", stepId, key });
|
|
36
|
+
}
|
|
37
|
+
return {
|
|
38
|
+
id,
|
|
39
|
+
name,
|
|
40
|
+
type: "transform",
|
|
41
|
+
objective,
|
|
42
|
+
dependencies,
|
|
43
|
+
allowedTools: [],
|
|
44
|
+
inputs,
|
|
45
|
+
outputs,
|
|
46
|
+
completionCriteria: outputs.map((k) => `Output "${k}" is produced.`),
|
|
47
|
+
retryPolicy: DEFAULT_RETRY,
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
const DEFAULT_OPTIONS = {
|
|
51
|
+
transformOnly: true,
|
|
52
|
+
maxSteps: 10,
|
|
53
|
+
planHint: undefined,
|
|
54
|
+
};
|
|
55
|
+
function buildPlanPrompt(input, options) {
|
|
56
|
+
const { goal, context, constraints } = input;
|
|
57
|
+
const hint = options.planHint ?? "Use 1–4 steps. Each step should have a clear objective and output key(s).";
|
|
58
|
+
const toolList = input.availableTools.length > 0
|
|
59
|
+
? `Available tools (use only if needed): ${input.availableTools.map((t) => t.name).join(", ")}.`
|
|
60
|
+
: "No tools are available; use only reasoning/transform steps.";
|
|
61
|
+
const constraintStr = constraints.length > 0 ? `Constraints: ${constraints.join("; ")}` : "";
|
|
62
|
+
return `You are a planning assistant. Given a goal and context, output a plan as a single JSON object.
|
|
63
|
+
|
|
64
|
+
Goal: ${goal}
|
|
65
|
+
|
|
66
|
+
Context (key-value): ${JSON.stringify(context)}
|
|
67
|
+
|
|
68
|
+
${constraintStr ? constraintStr + "\n\n" : ""}${toolList}
|
|
69
|
+
|
|
70
|
+
${hint}
|
|
71
|
+
|
|
72
|
+
Respond with ONLY a JSON object in this exact shape (no markdown, no explanation):
|
|
73
|
+
\`\`\`json
|
|
74
|
+
{
|
|
75
|
+
"steps": [
|
|
76
|
+
{
|
|
77
|
+
"id": "unique_snake_case_id",
|
|
78
|
+
"name": "Human-readable step name",
|
|
79
|
+
"objective": "What this step must do",
|
|
80
|
+
"outputs": ["output_key"],
|
|
81
|
+
"dependencies": [],
|
|
82
|
+
"inputsFromContext": ["goal"],
|
|
83
|
+
"inputsFromStep": []
|
|
84
|
+
}
|
|
85
|
+
]
|
|
86
|
+
}
|
|
87
|
+
\`\`\`
|
|
88
|
+
|
|
89
|
+
Rules: Step ids must be unique. Later steps can list earlier step ids in "dependencies" and reference their outputs in "inputsFromStep" as { "stepId": "id", "key": "output_key" }. Use "inputsFromContext" for keys from the run context (e.g. "goal"). Output only the JSON block.`;
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Creates a planner that calls the model to generate a plan from the goal and context.
|
|
93
|
+
* Use for goal-only runs: no steps defined upfront; the LLM proposes the steps.
|
|
94
|
+
*/
|
|
95
|
+
export function createLLMPlanner(model, options = {}) {
|
|
96
|
+
const opts = { ...DEFAULT_OPTIONS, ...options };
|
|
97
|
+
return {
|
|
98
|
+
async createPlan(input) {
|
|
99
|
+
const prompt = buildPlanPrompt(input, opts);
|
|
100
|
+
const out = await model.generate({ prompt });
|
|
101
|
+
const content = (out.content ?? "").trim();
|
|
102
|
+
const specs = parseStepsFromContent(content);
|
|
103
|
+
const steps = specs
|
|
104
|
+
.slice(0, opts.maxSteps)
|
|
105
|
+
.map((s, i) => specToStepDefinition(s, i));
|
|
106
|
+
if (steps.length === 0) {
|
|
107
|
+
const fallback = {
|
|
108
|
+
id: "answer",
|
|
109
|
+
name: "Answer",
|
|
110
|
+
type: "transform",
|
|
111
|
+
objective: input.goal,
|
|
112
|
+
dependencies: [],
|
|
113
|
+
allowedTools: [],
|
|
114
|
+
inputs: [{ source: "runContext", key: "goal" }],
|
|
115
|
+
outputs: ["answer"],
|
|
116
|
+
completionCriteria: ["Answer is produced."],
|
|
117
|
+
retryPolicy: DEFAULT_RETRY,
|
|
118
|
+
};
|
|
119
|
+
return buildPlan(input.goal, [fallback]);
|
|
120
|
+
}
|
|
121
|
+
return buildPlan(input.goal, steps);
|
|
122
|
+
},
|
|
123
|
+
async replan(input) {
|
|
124
|
+
const prompt = buildPlanPrompt({
|
|
125
|
+
goal: input.goal,
|
|
126
|
+
context: input.context,
|
|
127
|
+
constraints: input.constraints,
|
|
128
|
+
availableTools: input.availableTools,
|
|
129
|
+
options: input.options,
|
|
130
|
+
}, opts) +
|
|
131
|
+
`\n\nPrevious plan failed at step "${input.failedStepId}". Suggest a revised plan (you may simplify or add recovery steps). Same JSON format.`;
|
|
132
|
+
const out = await model.generate({ prompt });
|
|
133
|
+
const content = (out.content ?? "").trim();
|
|
134
|
+
const specs = parseStepsFromContent(content);
|
|
135
|
+
const steps = specs
|
|
136
|
+
.slice(0, opts.maxSteps)
|
|
137
|
+
.map((s, i) => specToStepDefinition(s, i));
|
|
138
|
+
if (steps.length === 0) {
|
|
139
|
+
return {
|
|
140
|
+
...input.currentPlan,
|
|
141
|
+
version: input.currentPlan.version + 1,
|
|
142
|
+
};
|
|
143
|
+
}
|
|
144
|
+
return buildPlan(input.goal, steps, {
|
|
145
|
+
planId: input.currentPlan.id,
|
|
146
|
+
version: input.currentPlan.version + 1,
|
|
147
|
+
});
|
|
148
|
+
},
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
//# sourceMappingURL=llmPlanner.js.map
|