@directive-run/ai 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -0
- package/dist/anthropic.cjs +3 -0
- package/dist/anthropic.cjs.map +1 -0
- package/dist/anthropic.d.cts +103 -0
- package/dist/anthropic.d.ts +103 -0
- package/dist/anthropic.js +3 -0
- package/dist/anthropic.js.map +1 -0
- package/dist/index.cjs +78 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +4663 -0
- package/dist/index.d.ts +4663 -0
- package/dist/index.js +78 -0
- package/dist/index.js.map +1 -0
- package/dist/ollama.cjs +3 -0
- package/dist/ollama.cjs.map +1 -0
- package/dist/ollama.d.cts +47 -0
- package/dist/ollama.d.ts +47 -0
- package/dist/ollama.js +3 -0
- package/dist/ollama.js.map +1 -0
- package/dist/openai.cjs +3 -0
- package/dist/openai.cjs.map +1 -0
- package/dist/openai.d.cts +127 -0
- package/dist/openai.d.ts +127 -0
- package/dist/openai.js +3 -0
- package/dist/openai.js.map +1 -0
- package/dist/testing.cjs +14 -0
- package/dist/testing.cjs.map +1 -0
- package/dist/testing.d.cts +345 -0
- package/dist/testing.d.ts +345 -0
- package/dist/testing.js +14 -0
- package/dist/testing.js.map +1 -0
- package/dist/types-BKCdgKC-.d.cts +300 -0
- package/dist/types-BKCdgKC-.d.ts +300 -0
- package/package.json +83 -0
package/dist/ollama.cjs
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
'use strict';var y=new Set(["http:","https:"]);function w(r){try{let n=new URL(r);if(!y.has(n.protocol))throw new Error(`[Directive] Invalid baseURL protocol "${n.protocol}" \u2013 only http: and https: are allowed`)}catch(n){throw n instanceof Error&&n.message.startsWith("[Directive]")?n:new Error(`[Directive] Invalid baseURL "${r}" \u2013 must be a valid URL (e.g. "https://api.openai.com/v1")`)}}function b(r){let{fetch:n=globalThis.fetch,buildRequest:u,parseResponse:h,parseOutput:g,hooks:l}=r,s=g??(t=>{try{return JSON.parse(t)}catch{return t}});return async(t,e,i)=>{let R=Date.now();l?.onBeforeCall?.({agent:t,input:e,timestamp:R});let f=[{role:"user",content:e}];try{let{url:c,init:m}=u(t,e,f),A=i?.signal?{...m,signal:i.signal}:m,p=await n(c,A);if(!p.ok){let k=await p.text().catch(()=>"");throw new Error(`[Directive] AgentRunner request failed: ${p.status} ${p.statusText}${k?` \u2013 ${k.slice(0,300)}`:""}`)}let o=await h(p,f),d={inputTokens:o.inputTokens??0,outputTokens:o.outputTokens??0},T={role:"assistant",content:o.text},O=[...f,T];i?.onMessage?.(T);let v=Date.now()-R;return l?.onAfterCall?.({agent:t,input:e,output:o.text,totalTokens:o.totalTokens,tokenUsage:d,durationMs:v,timestamp:Date.now()}),{output:s(o.text),messages:O,toolCalls:[],totalTokens:o.totalTokens,tokenUsage:d}}catch(c){let m=Date.now()-R;throw c instanceof Error&&l?.onError?.({agent:t,input:e,error:c,durationMs:m,timestamp:Date.now()}),c}}}function M(r={}){let{model:n="llama3",baseURL:u="http://localhost:11434",fetch:h=globalThis.fetch,timeoutMs:g,hooks:l}=r;return w(u),b({fetch:h,hooks:l,buildRequest:(a,s,t)=>({url:`${u}/api/chat`,init:{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({model:a.model??n,messages:[...a.instructions?[{role:"system",content:a.instructions}]:[],...t.map(e=>({role:e.role,content:e.content}))],stream:false}),...g!=null?{signal:AbortSignal.timeout(g)}:{}}}),parseResponse:async a=>{let s;try{s=await a.json();}catch{throw new Error(`[Directive] Ollama returned non-JSON response. Is Ollama running at ${u}? Start it with: ollama serve`)}let t=s.message?.content??"",e=s.prompt_eval_count??0,i=s.eval_count??0;return {text:t,totalTokens:e+i,inputTokens:e,outputTokens:i}}})}
|
|
2
|
+
exports.createOllamaRunner=M;//# sourceMappingURL=ollama.cjs.map
|
|
3
|
+
//# sourceMappingURL=ollama.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/helpers.ts","../src/adapters/ollama.ts"],"names":["ALLOWED_PROTOCOLS","validateBaseURL","baseURL","url","err","createRunner","options","fetchFn","buildRequest","parseResponse","parseOutput","hooks","parse","text","agent","input","runOptions","startTime","messages","init","fetchInit","response","errBody","parsed","tokenUsage","assistantMessage","allMessages","durationMs","createOllamaRunner","model","timeoutMs","_input","m","res","data","inputTokens","outputTokens"],"mappings":"aAoDA,IAAMA,CAAAA,CAAoB,IAAI,GAAA,CAAI,CAAC,OAAA,CAAS,QAAQ,CAAC,CAAA,CAM9C,SAASC,CAAAA,CAAgBC,CAAAA,CAAuB,CACrD,GAAI,CACF,IAAMC,CAAAA,CAAM,IAAI,GAAA,CAAID,CAAO,CAAA,CAC3B,GAAI,CAACF,CAAAA,CAAkB,GAAA,CAAIG,CAAAA,CAAI,QAAQ,CAAA,CACrC,MAAM,IAAI,KAAA,CACR,CAAA,sCAAA,EAAyCA,CAAAA,CAAI,QAAQ,CAAA,0CAAA,CACvD,CAEJ,CAAA,MAASC,CAAAA,CAAK,CACZ,MAAIA,CAAAA,YAAe,KAAA,EAASA,CAAAA,CAAI,QAAQ,UAAA,CAAW,aAAa,CAAA,CACxDA,CAAAA,CAGF,IAAI,KAAA,CACR,CAAA,6BAAA,EAAgCF,CAAO,CAAA,+DAAA,CACzC,CACF,CACF,CA4EO,SAASG,CAAAA,CAAaC,EAA2C,CACtE,GAAM,CACJ,KAAA,CAAOC,CAAAA,CAAU,UAAA,CAAW,MAC5B,YAAA,CAAAC,CAAAA,CACA,aAAA,CAAAC,CAAAA,CACA,WAAA,CAAAC,CAAAA,CACA,MAAAC,CACF,CAAA,CAAIL,CAAAA,CAUEM,CAAAA,CAAQF,CAAAA,GARiBG,CAAAA,EAAoB,CACjD,GAAI,CACF,OAAO,IAAA,CAAK,KAAA,CAAMA,CAAI,CACxB,MAAQ,CACN,OAAOA,CACT,CACF,CAAA,CAAA,CAIA,OAAO,MACLC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,GAC0B,CAC1B,IAAMC,CAAAA,CAAY,IAAA,CAAK,KAAI,CAC3BN,CAAAA,EAAO,YAAA,GAAe,CAAE,KAAA,CAAAG,CAAAA,CAAO,KAAA,CAAAC,CAAAA,CAAO,SAAA,CAAWE,CAAU,CAAC,CAAA,CAE5D,IAAMC,CAAAA,CAAsB,CAAC,CAAE,IAAA,CAAM,MAAA,CAAQ,OAAA,CAASH,CAAM,CAAC,EAE7D,GAAI,CACF,GAAM,CAAE,GAAA,CAAAZ,CAAAA,CAAK,KAAAgB,CAAK,CAAA,CAAIX,CAAAA,CAAaM,CAAAA,CAAOC,CAAAA,CAAOG,CAAQ,CAAA,CAEnDE,CAAAA,CAAyBJ,CAAAA,EAAY,MAAA,CACvC,CAAE,GAAGG,CAAAA,CAAM,MAAA,CAAQH,EAAW,MAAO,CAAA,CACrCG,CAAAA,CAEEE,CAAAA,CAAW,MAAMd,CAAAA,CAAQJ,CAAAA,CAAKiB,CAAS,CAAA,CAE7C,GAAI,CAACC,CAAAA,CAAS,EAAA,CAAI,CAChB,IAAMC,CAAAA,CAAU,MAAMD,CAAAA,CAAS,IAAA,EAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACR,CAAA,wCAAA,EAA2CA,CAAAA,CAAS,MAAM,CAAA,CAAA,EAAIA,CAAAA,CAAS,UAAU,CAAA,EAAGC,CAAAA,CAAU,CAAA,QAAA,EAAMA,EAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,CAAA,CAAA,CAAK,EAAE,EAClI,CACF,CAEA,IAAMC,CAAAA,CAAS,MAAMd,CAAAA,CAAcY,CAAAA,CAAUH,CAAQ,CAAA,CAC/CM,CAAAA,CAAyB,CAC7B,WAAA,CAAaD,CAAAA,CAAO,WAAA,EAAe,EACnC,YAAA,CAAcA,CAAAA,CAAO,YAAA,EAAgB,CACvC,CAAA,CAEME,CAAAA,CAA4B,CAAE,IAAA,CAAM,WAAA,CAAa,OAAA,CAASF,CAAAA,CAAO,IAAK,CAAA,CACtEG,CAAAA,CAAyB,CAAC,GAAGR,CAAAA,CAAUO,CAAgB,CAAA,CAE7DT,CAAAA,EAAY,SAAA,GAAYS,CAAgB,CAAA,CAExC,IAAME,CAAAA,CAAa,IAAA,CAAK,GAAA,EAAI,CAAIV,CAAAA,CAChC,OAAAN,CAAAA,EAAO,WAAA,GAAc,CACnB,KAAA,CAAAG,CAAAA,CACA,KAAA,CAAAC,EACA,MAAA,CAAQQ,CAAAA,CAAO,IAAA,CACf,WAAA,CAAaA,CAAAA,CAAO,WAAA,CACpB,WAAAC,CAAAA,CACA,UAAA,CAAAG,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,GAAA,EAClB,CAAC,CAAA,CAEM,CACL,MAAA,CAAQf,CAAAA,CAASW,CAAAA,CAAO,IAAI,EAC5B,QAAA,CAAUG,CAAAA,CACV,SAAA,CAAW,EAAC,CACZ,WAAA,CAAaH,CAAAA,CAAO,WAAA,CACpB,UAAA,CAAAC,CACF,CACF,CAAA,MAASpB,CAAAA,CAAK,CACZ,IAAMuB,CAAAA,CAAa,IAAA,CAAK,GAAA,EAAI,CAAIV,CAAAA,CAChC,MAAIb,CAAAA,YAAe,KAAA,EACjBO,CAAAA,EAAO,OAAA,GAAU,CACf,KAAA,CAAAG,CAAAA,CACA,KAAA,CAAAC,EACA,KAAA,CAAOX,CAAAA,CACP,UAAA,CAAAuB,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,KAClB,CAAC,CAAA,CAGGvB,CACR,CACF,CACF,CChMO,SAASwB,CAAAA,CACftB,CAAAA,CAA+B,EAAC,CAClB,CACd,GAAM,CACL,KAAA,CAAAuB,CAAAA,CAAQ,QAAA,CACR,OAAA,CAAA3B,CAAAA,CAAU,wBAAA,CACV,MAAOK,CAAAA,CAAU,UAAA,CAAW,KAAA,CAC5B,SAAA,CAAAuB,CAAAA,CACA,KAAA,CAAAnB,CACD,CAAA,CAAIL,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,CAAA,CAEhBG,CAAAA,CAAa,CACnB,KAAA,CAAOE,CAAAA,CACP,KAAA,CAAAI,CAAAA,CACA,YAAA,CAAc,CAACG,CAAAA,CAAOiB,CAAAA,CAAQb,CAAAA,IAAc,CAC3C,GAAA,CAAK,CAAA,EAAGhB,CAAO,CAAA,SAAA,CAAA,CACf,KAAM,CACL,MAAA,CAAQ,MAAA,CACR,OAAA,CAAS,CAAE,cAAA,CAAgB,kBAAmB,CAAA,CAC9C,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CACpB,KAAA,CAAOY,EAAM,KAAA,EAASe,CAAAA,CACtB,QAAA,CAAU,CACT,GAAIf,CAAAA,CAAM,YAAA,CACP,CAAC,CAAE,IAAA,CAAM,QAAA,CAAU,OAAA,CAASA,CAAAA,CAAM,YAAa,CAAC,CAAA,CAChD,EAAC,CACJ,GAAGI,CAAAA,CAAS,GAAA,CAAKc,CAAAA,GAAO,CAAE,IAAA,CAAMA,CAAAA,CAAE,IAAA,CAAM,OAAA,CAASA,CAAAA,CAAE,OAAQ,EAAE,CAC9D,CAAA,CACA,MAAA,CAAQ,KACT,CAAC,CAAA,CACD,GAAIF,CAAAA,EAAa,IAAA,CAAO,CAAE,MAAA,CAAQ,WAAA,CAAY,OAAA,CAAQA,CAAS,CAAE,CAAA,CAAI,EACtE,CACD,CAAA,CAAA,CACA,aAAA,CAAe,MAAOG,CAAAA,EAAQ,CAC7B,IAAIC,CAAAA,CACJ,GAAI,CACHA,EAAO,MAAMD,CAAAA,CAAI,IAAA,GAClB,CAAA,KAAQ,CACP,MAAM,IAAI,KAAA,CACT,CAAA,oEAAA,EAAuE/B,CAAO,CAAA,6BAAA,CAC/E,CACD,CACA,IAAMW,CAAAA,CAAQqB,CAAAA,CAAK,OAAA,EAAqC,OAAA,EAAqB,EAAA,CACvEC,CAAAA,CAAeD,CAAAA,CAAK,iBAAA,EAAgC,CAAA,CACpDE,CAAAA,CAAgBF,CAAAA,CAAK,UAAA,EAAyB,CAAA,CAEpD,OAAO,CACN,IAAA,CAAArB,CAAAA,CACA,WAAA,CAAasB,CAAAA,CAAcC,CAAAA,CAC3B,WAAA,CAAAD,CAAAA,CACA,YAAA,CAAAC,CACD,CACD,CACD,CAAC,CACF","file":"ollama.cjs","sourcesContent":["/**\n * Helper functions for AI adapter — createRunner, estimateCost, state queries, validation.\n */\n\nimport type {\n AdapterHooks,\n AgentLike,\n AgentRunner,\n RunResult,\n RunOptions,\n Message,\n TokenUsage,\n AgentState,\n ApprovalState,\n} from \"./types.js\";\n\n// ============================================================================\n// State Query Helpers\n// ============================================================================\n\n/** Check if agent is currently running. */\nexport function isAgentRunning(state: AgentState): boolean {\n return state.status === \"running\";\n}\n\n/** Check if there are pending approvals. */\nexport function hasPendingApprovals(state: ApprovalState): boolean {\n return state.pending.length > 0;\n}\n\n// ============================================================================\n// Cost Estimation\n// ============================================================================\n\n/**\n * Get total cost estimate based on token usage.\n *\n * @param tokenUsage - Total token count\n * @param ratePerMillionTokens - Cost per million tokens (required, no default to avoid stale pricing)\n * @returns Estimated cost in dollars\n */\nexport function estimateCost(\n tokenUsage: number,\n ratePerMillionTokens: number\n): number {\n return (tokenUsage / 1_000_000) * ratePerMillionTokens;\n}\n\n// ============================================================================\n// Validation Helpers\n// ============================================================================\n\nconst ALLOWED_PROTOCOLS = new Set([\"http:\", \"https:\"]);\n\n/**\n * Validate that a baseURL uses http or https.\n * Throws immediately at adapter creation time (not at call time) to catch config errors early.\n */\nexport function validateBaseURL(baseURL: string): void {\n try {\n const url = new URL(baseURL);\n if (!ALLOWED_PROTOCOLS.has(url.protocol)) {\n throw new Error(\n `[Directive] Invalid baseURL protocol \"${url.protocol}\" – only http: and https: are allowed`,\n );\n }\n } catch (err) {\n if (err instanceof Error && err.message.startsWith(\"[Directive]\")) {\n throw err;\n }\n\n throw new Error(\n `[Directive] Invalid baseURL \"${baseURL}\" – must be a valid URL (e.g. \"https://api.openai.com/v1\")`,\n );\n }\n}\n\n// ============================================================================\n// createRunner Helper\n// ============================================================================\n\n/** Parsed response from an LLM provider */\nexport interface ParsedResponse {\n text: string;\n totalTokens: number;\n /** Input token count, when available from the provider */\n inputTokens?: number;\n /** Output token count, when available from the provider */\n outputTokens?: number;\n}\n\n/** Options for creating an AgentRunner from buildRequest/parseResponse */\nexport interface CreateRunnerOptions {\n fetch?: typeof globalThis.fetch;\n buildRequest: (\n agent: AgentLike,\n input: string,\n messages: Message[]\n ) => { url: string; init: RequestInit };\n parseResponse: (\n response: Response,\n messages: Message[]\n ) => Promise<ParsedResponse>;\n parseOutput?: <T>(text: string) => T;\n /** Lifecycle hooks for tracing, logging, and metrics */\n hooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner from buildRequest/parseResponse helpers.\n * Reduces ~50 lines of fetch boilerplate to ~20 lines of configuration.\n *\n * Supports lifecycle hooks for observability:\n * - `onBeforeCall` fires before each API request\n * - `onAfterCall` fires after a successful response (includes token breakdown)\n * - `onError` fires when the request fails\n *\n * @example\n * ```typescript\n * const runClaude = createRunner({\n * buildRequest: (agent, input) => ({\n * url: \"/api/claude\",\n * init: {\n * method: \"POST\",\n * headers: { \"Content-Type\": \"application/json\" },\n * body: JSON.stringify({\n * model: agent.model ?? \"claude-haiku-4-5-20251001\",\n * system: agent.instructions ?? \"\",\n * messages: [{ role: \"user\", content: input }],\n * }),\n * },\n * }),\n * parseResponse: async (res) => {\n * const data = await res.json();\n * const inputTokens = data.usage?.input_tokens ?? 0;\n * const outputTokens = data.usage?.output_tokens ?? 0;\n * return {\n * text: data.content?.[0]?.text ?? \"\",\n * totalTokens: inputTokens + outputTokens,\n * inputTokens,\n * outputTokens,\n * };\n * },\n * hooks: {\n * onAfterCall: ({ durationMs, tokenUsage }) => {\n * console.log(`LLM call: ${durationMs}ms, ${tokenUsage.inputTokens}in/${tokenUsage.outputTokens}out`);\n * },\n * },\n * });\n * ```\n */\nexport function createRunner(options: CreateRunnerOptions): AgentRunner {\n const {\n fetch: fetchFn = globalThis.fetch,\n buildRequest,\n parseResponse,\n parseOutput,\n hooks,\n } = options;\n\n const defaultParseOutput = <T>(text: string): T => {\n try {\n return JSON.parse(text) as T;\n } catch {\n return text as unknown as T;\n }\n };\n\n const parse = parseOutput ?? defaultParseOutput;\n\n return async <T = unknown>(\n agent: AgentLike,\n input: string,\n runOptions?: RunOptions\n ): Promise<RunResult<T>> => {\n const startTime = Date.now();\n hooks?.onBeforeCall?.({ agent, input, timestamp: startTime });\n\n const messages: Message[] = [{ role: \"user\", content: input }];\n\n try {\n const { url, init } = buildRequest(agent, input, messages);\n\n const fetchInit: RequestInit = runOptions?.signal\n ? { ...init, signal: runOptions.signal }\n : init;\n\n const response = await fetchFn(url, fetchInit);\n\n if (!response.ok) {\n const errBody = await response.text().catch(() => \"\");\n\n throw new Error(\n `[Directive] AgentRunner request failed: ${response.status} ${response.statusText}${errBody ? ` – ${errBody.slice(0, 300)}` : \"\"}`,\n );\n }\n\n const parsed = await parseResponse(response, messages);\n const tokenUsage: TokenUsage = {\n inputTokens: parsed.inputTokens ?? 0,\n outputTokens: parsed.outputTokens ?? 0,\n };\n\n const assistantMessage: Message = { role: \"assistant\", content: parsed.text };\n const allMessages: Message[] = [...messages, assistantMessage];\n\n runOptions?.onMessage?.(assistantMessage);\n\n const durationMs = Date.now() - startTime;\n hooks?.onAfterCall?.({\n agent,\n input,\n output: parsed.text,\n totalTokens: parsed.totalTokens,\n tokenUsage,\n durationMs,\n timestamp: Date.now(),\n });\n\n return {\n output: parse<T>(parsed.text),\n messages: allMessages,\n toolCalls: [],\n totalTokens: parsed.totalTokens,\n tokenUsage,\n };\n } catch (err) {\n const durationMs = Date.now() - startTime;\n if (err instanceof Error) {\n hooks?.onError?.({\n agent,\n input,\n error: err,\n durationMs,\n timestamp: Date.now(),\n });\n }\n\n throw err;\n }\n };\n}\n","/**\n * @directive-run/ai/ollama\n *\n * Ollama adapter for Directive AI. Provides runners for local\n * Ollama inference. No API key required.\n *\n * Requires Ollama to be running locally. Start it with: `ollama serve`\n *\n * @example\n * ```typescript\n * import { createOllamaRunner } from '@directive-run/ai/ollama';\n *\n * const runner = createOllamaRunner({ model: 'llama3' });\n * ```\n */\n\nimport { createRunner, validateBaseURL } from \"../helpers.js\";\nimport type { AdapterHooks, AgentRunner } from \"../types.js\";\n\n// ============================================================================\n// Ollama Runner\n// ============================================================================\n\n/** Options for createOllamaRunner */\nexport interface OllamaRunnerOptions {\n\tmodel?: string;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** @default undefined */\n\ttimeoutMs?: number;\n\t/** Lifecycle hooks for tracing, logging, and metrics */\n\thooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner for local Ollama inference.\n *\n * Ollama runs locally – no API key or cloud service needed. Default model\n * is `llama3`, default base URL is `http://localhost:11434`.\n *\n * Returns `tokenUsage` with input/output breakdown for cost tracking\n * (useful for monitoring local resource usage).\n *\n * @example\n * ```typescript\n * const runner = createOllamaRunner({ model: \"llama3\" });\n * const stack = createAgentStack({ runner, agents: { ... } });\n * ```\n */\nexport function createOllamaRunner(\n\toptions: OllamaRunnerOptions = {},\n): AgentRunner {\n\tconst {\n\t\tmodel = \"llama3\",\n\t\tbaseURL = \"http://localhost:11434\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\ttimeoutMs,\n\t\thooks,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\treturn createRunner({\n\t\tfetch: fetchFn,\n\t\thooks,\n\t\tbuildRequest: (agent, _input, messages) => ({\n\t\t\turl: `${baseURL}/api/chat`,\n\t\t\tinit: {\n\t\t\t\tmethod: \"POST\",\n\t\t\t\theaders: { \"Content-Type\": \"application/json\" },\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\tmodel: agent.model ?? model,\n\t\t\t\t\tmessages: [\n\t\t\t\t\t\t...(agent.instructions\n\t\t\t\t\t\t\t? [{ role: \"system\", content: agent.instructions }]\n\t\t\t\t\t\t\t: []),\n\t\t\t\t\t\t...messages.map((m) => ({ role: m.role, content: m.content })),\n\t\t\t\t\t],\n\t\t\t\t\tstream: false,\n\t\t\t\t}),\n\t\t\t\t...(timeoutMs != null ? { signal: AbortSignal.timeout(timeoutMs) } : {}),\n\t\t\t},\n\t\t}),\n\t\tparseResponse: async (res) => {\n\t\t\tlet data: Record<string, unknown>;\n\t\t\ttry {\n\t\t\t\tdata = await res.json();\n\t\t\t} catch {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`[Directive] Ollama returned non-JSON response. Is Ollama running at ${baseURL}? Start it with: ollama serve`,\n\t\t\t\t);\n\t\t\t}\n\t\t\tconst text = (data.message as Record<string, unknown>)?.content as string ?? \"\";\n\t\t\tconst inputTokens = (data.prompt_eval_count as number) ?? 0;\n\t\t\tconst outputTokens = (data.eval_count as number) ?? 0;\n\n\t\t\treturn {\n\t\t\t\ttext,\n\t\t\t\ttotalTokens: inputTokens + outputTokens,\n\t\t\t\tinputTokens,\n\t\t\t\toutputTokens,\n\t\t\t};\n\t\t},\n\t});\n}\n"]}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { f as AdapterHooks, c as AgentRunner } from './types-BKCdgKC-.cjs';
|
|
2
|
+
import '@directive-run/core';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* @directive-run/ai/ollama
|
|
6
|
+
*
|
|
7
|
+
* Ollama adapter for Directive AI. Provides runners for local
|
|
8
|
+
* Ollama inference. No API key required.
|
|
9
|
+
*
|
|
10
|
+
* Requires Ollama to be running locally. Start it with: `ollama serve`
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { createOllamaRunner } from '@directive-run/ai/ollama';
|
|
15
|
+
*
|
|
16
|
+
* const runner = createOllamaRunner({ model: 'llama3' });
|
|
17
|
+
* ```
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
/** Options for createOllamaRunner */
|
|
21
|
+
interface OllamaRunnerOptions {
|
|
22
|
+
model?: string;
|
|
23
|
+
baseURL?: string;
|
|
24
|
+
fetch?: typeof globalThis.fetch;
|
|
25
|
+
/** @default undefined */
|
|
26
|
+
timeoutMs?: number;
|
|
27
|
+
/** Lifecycle hooks for tracing, logging, and metrics */
|
|
28
|
+
hooks?: AdapterHooks;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Create an AgentRunner for local Ollama inference.
|
|
32
|
+
*
|
|
33
|
+
* Ollama runs locally – no API key or cloud service needed. Default model
|
|
34
|
+
* is `llama3`, default base URL is `http://localhost:11434`.
|
|
35
|
+
*
|
|
36
|
+
* Returns `tokenUsage` with input/output breakdown for cost tracking
|
|
37
|
+
* (useful for monitoring local resource usage).
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* ```typescript
|
|
41
|
+
* const runner = createOllamaRunner({ model: "llama3" });
|
|
42
|
+
* const stack = createAgentStack({ runner, agents: { ... } });
|
|
43
|
+
* ```
|
|
44
|
+
*/
|
|
45
|
+
declare function createOllamaRunner(options?: OllamaRunnerOptions): AgentRunner;
|
|
46
|
+
|
|
47
|
+
export { type OllamaRunnerOptions, createOllamaRunner };
|
package/dist/ollama.d.ts
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { f as AdapterHooks, c as AgentRunner } from './types-BKCdgKC-.js';
|
|
2
|
+
import '@directive-run/core';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* @directive-run/ai/ollama
|
|
6
|
+
*
|
|
7
|
+
* Ollama adapter for Directive AI. Provides runners for local
|
|
8
|
+
* Ollama inference. No API key required.
|
|
9
|
+
*
|
|
10
|
+
* Requires Ollama to be running locally. Start it with: `ollama serve`
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { createOllamaRunner } from '@directive-run/ai/ollama';
|
|
15
|
+
*
|
|
16
|
+
* const runner = createOllamaRunner({ model: 'llama3' });
|
|
17
|
+
* ```
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
/** Options for createOllamaRunner */
|
|
21
|
+
interface OllamaRunnerOptions {
|
|
22
|
+
model?: string;
|
|
23
|
+
baseURL?: string;
|
|
24
|
+
fetch?: typeof globalThis.fetch;
|
|
25
|
+
/** @default undefined */
|
|
26
|
+
timeoutMs?: number;
|
|
27
|
+
/** Lifecycle hooks for tracing, logging, and metrics */
|
|
28
|
+
hooks?: AdapterHooks;
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Create an AgentRunner for local Ollama inference.
|
|
32
|
+
*
|
|
33
|
+
* Ollama runs locally – no API key or cloud service needed. Default model
|
|
34
|
+
* is `llama3`, default base URL is `http://localhost:11434`.
|
|
35
|
+
*
|
|
36
|
+
* Returns `tokenUsage` with input/output breakdown for cost tracking
|
|
37
|
+
* (useful for monitoring local resource usage).
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* ```typescript
|
|
41
|
+
* const runner = createOllamaRunner({ model: "llama3" });
|
|
42
|
+
* const stack = createAgentStack({ runner, agents: { ... } });
|
|
43
|
+
* ```
|
|
44
|
+
*/
|
|
45
|
+
declare function createOllamaRunner(options?: OllamaRunnerOptions): AgentRunner;
|
|
46
|
+
|
|
47
|
+
export { type OllamaRunnerOptions, createOllamaRunner };
|
package/dist/ollama.js
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
var y=new Set(["http:","https:"]);function w(r){try{let n=new URL(r);if(!y.has(n.protocol))throw new Error(`[Directive] Invalid baseURL protocol "${n.protocol}" \u2013 only http: and https: are allowed`)}catch(n){throw n instanceof Error&&n.message.startsWith("[Directive]")?n:new Error(`[Directive] Invalid baseURL "${r}" \u2013 must be a valid URL (e.g. "https://api.openai.com/v1")`)}}function b(r){let{fetch:n=globalThis.fetch,buildRequest:u,parseResponse:h,parseOutput:g,hooks:l}=r,s=g??(t=>{try{return JSON.parse(t)}catch{return t}});return async(t,e,i)=>{let R=Date.now();l?.onBeforeCall?.({agent:t,input:e,timestamp:R});let f=[{role:"user",content:e}];try{let{url:c,init:m}=u(t,e,f),A=i?.signal?{...m,signal:i.signal}:m,p=await n(c,A);if(!p.ok){let k=await p.text().catch(()=>"");throw new Error(`[Directive] AgentRunner request failed: ${p.status} ${p.statusText}${k?` \u2013 ${k.slice(0,300)}`:""}`)}let o=await h(p,f),d={inputTokens:o.inputTokens??0,outputTokens:o.outputTokens??0},T={role:"assistant",content:o.text},O=[...f,T];i?.onMessage?.(T);let v=Date.now()-R;return l?.onAfterCall?.({agent:t,input:e,output:o.text,totalTokens:o.totalTokens,tokenUsage:d,durationMs:v,timestamp:Date.now()}),{output:s(o.text),messages:O,toolCalls:[],totalTokens:o.totalTokens,tokenUsage:d}}catch(c){let m=Date.now()-R;throw c instanceof Error&&l?.onError?.({agent:t,input:e,error:c,durationMs:m,timestamp:Date.now()}),c}}}function M(r={}){let{model:n="llama3",baseURL:u="http://localhost:11434",fetch:h=globalThis.fetch,timeoutMs:g,hooks:l}=r;return w(u),b({fetch:h,hooks:l,buildRequest:(a,s,t)=>({url:`${u}/api/chat`,init:{method:"POST",headers:{"Content-Type":"application/json"},body:JSON.stringify({model:a.model??n,messages:[...a.instructions?[{role:"system",content:a.instructions}]:[],...t.map(e=>({role:e.role,content:e.content}))],stream:false}),...g!=null?{signal:AbortSignal.timeout(g)}:{}}}),parseResponse:async a=>{let s;try{s=await a.json();}catch{throw new Error(`[Directive] Ollama returned non-JSON response. Is Ollama running at ${u}? Start it with: ollama serve`)}let t=s.message?.content??"",e=s.prompt_eval_count??0,i=s.eval_count??0;return {text:t,totalTokens:e+i,inputTokens:e,outputTokens:i}}})}
|
|
2
|
+
export{M as createOllamaRunner};//# sourceMappingURL=ollama.js.map
|
|
3
|
+
//# sourceMappingURL=ollama.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/helpers.ts","../src/adapters/ollama.ts"],"names":["ALLOWED_PROTOCOLS","validateBaseURL","baseURL","url","err","createRunner","options","fetchFn","buildRequest","parseResponse","parseOutput","hooks","parse","text","agent","input","runOptions","startTime","messages","init","fetchInit","response","errBody","parsed","tokenUsage","assistantMessage","allMessages","durationMs","createOllamaRunner","model","timeoutMs","_input","m","res","data","inputTokens","outputTokens"],"mappings":"AAoDA,IAAMA,CAAAA,CAAoB,IAAI,GAAA,CAAI,CAAC,OAAA,CAAS,QAAQ,CAAC,CAAA,CAM9C,SAASC,CAAAA,CAAgBC,CAAAA,CAAuB,CACrD,GAAI,CACF,IAAMC,CAAAA,CAAM,IAAI,GAAA,CAAID,CAAO,CAAA,CAC3B,GAAI,CAACF,CAAAA,CAAkB,GAAA,CAAIG,CAAAA,CAAI,QAAQ,CAAA,CACrC,MAAM,IAAI,KAAA,CACR,CAAA,sCAAA,EAAyCA,CAAAA,CAAI,QAAQ,CAAA,0CAAA,CACvD,CAEJ,CAAA,MAASC,CAAAA,CAAK,CACZ,MAAIA,CAAAA,YAAe,KAAA,EAASA,CAAAA,CAAI,QAAQ,UAAA,CAAW,aAAa,CAAA,CACxDA,CAAAA,CAGF,IAAI,KAAA,CACR,CAAA,6BAAA,EAAgCF,CAAO,CAAA,+DAAA,CACzC,CACF,CACF,CA4EO,SAASG,CAAAA,CAAaC,EAA2C,CACtE,GAAM,CACJ,KAAA,CAAOC,CAAAA,CAAU,UAAA,CAAW,MAC5B,YAAA,CAAAC,CAAAA,CACA,aAAA,CAAAC,CAAAA,CACA,WAAA,CAAAC,CAAAA,CACA,MAAAC,CACF,CAAA,CAAIL,CAAAA,CAUEM,CAAAA,CAAQF,CAAAA,GARiBG,CAAAA,EAAoB,CACjD,GAAI,CACF,OAAO,IAAA,CAAK,KAAA,CAAMA,CAAI,CACxB,MAAQ,CACN,OAAOA,CACT,CACF,CAAA,CAAA,CAIA,OAAO,MACLC,CAAAA,CACAC,CAAAA,CACAC,CAAAA,GAC0B,CAC1B,IAAMC,CAAAA,CAAY,IAAA,CAAK,KAAI,CAC3BN,CAAAA,EAAO,YAAA,GAAe,CAAE,KAAA,CAAAG,CAAAA,CAAO,KAAA,CAAAC,CAAAA,CAAO,SAAA,CAAWE,CAAU,CAAC,CAAA,CAE5D,IAAMC,CAAAA,CAAsB,CAAC,CAAE,IAAA,CAAM,MAAA,CAAQ,OAAA,CAASH,CAAM,CAAC,EAE7D,GAAI,CACF,GAAM,CAAE,GAAA,CAAAZ,CAAAA,CAAK,KAAAgB,CAAK,CAAA,CAAIX,CAAAA,CAAaM,CAAAA,CAAOC,CAAAA,CAAOG,CAAQ,CAAA,CAEnDE,CAAAA,CAAyBJ,CAAAA,EAAY,MAAA,CACvC,CAAE,GAAGG,CAAAA,CAAM,MAAA,CAAQH,EAAW,MAAO,CAAA,CACrCG,CAAAA,CAEEE,CAAAA,CAAW,MAAMd,CAAAA,CAAQJ,CAAAA,CAAKiB,CAAS,CAAA,CAE7C,GAAI,CAACC,CAAAA,CAAS,EAAA,CAAI,CAChB,IAAMC,CAAAA,CAAU,MAAMD,CAAAA,CAAS,IAAA,EAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACR,CAAA,wCAAA,EAA2CA,CAAAA,CAAS,MAAM,CAAA,CAAA,EAAIA,CAAAA,CAAS,UAAU,CAAA,EAAGC,CAAAA,CAAU,CAAA,QAAA,EAAMA,EAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,CAAA,CAAA,CAAK,EAAE,EAClI,CACF,CAEA,IAAMC,CAAAA,CAAS,MAAMd,CAAAA,CAAcY,CAAAA,CAAUH,CAAQ,CAAA,CAC/CM,CAAAA,CAAyB,CAC7B,WAAA,CAAaD,CAAAA,CAAO,WAAA,EAAe,EACnC,YAAA,CAAcA,CAAAA,CAAO,YAAA,EAAgB,CACvC,CAAA,CAEME,CAAAA,CAA4B,CAAE,IAAA,CAAM,WAAA,CAAa,OAAA,CAASF,CAAAA,CAAO,IAAK,CAAA,CACtEG,CAAAA,CAAyB,CAAC,GAAGR,CAAAA,CAAUO,CAAgB,CAAA,CAE7DT,CAAAA,EAAY,SAAA,GAAYS,CAAgB,CAAA,CAExC,IAAME,CAAAA,CAAa,IAAA,CAAK,GAAA,EAAI,CAAIV,CAAAA,CAChC,OAAAN,CAAAA,EAAO,WAAA,GAAc,CACnB,KAAA,CAAAG,CAAAA,CACA,KAAA,CAAAC,EACA,MAAA,CAAQQ,CAAAA,CAAO,IAAA,CACf,WAAA,CAAaA,CAAAA,CAAO,WAAA,CACpB,WAAAC,CAAAA,CACA,UAAA,CAAAG,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,GAAA,EAClB,CAAC,CAAA,CAEM,CACL,MAAA,CAAQf,CAAAA,CAASW,CAAAA,CAAO,IAAI,EAC5B,QAAA,CAAUG,CAAAA,CACV,SAAA,CAAW,EAAC,CACZ,WAAA,CAAaH,CAAAA,CAAO,WAAA,CACpB,UAAA,CAAAC,CACF,CACF,CAAA,MAASpB,CAAAA,CAAK,CACZ,IAAMuB,CAAAA,CAAa,IAAA,CAAK,GAAA,EAAI,CAAIV,CAAAA,CAChC,MAAIb,CAAAA,YAAe,KAAA,EACjBO,CAAAA,EAAO,OAAA,GAAU,CACf,KAAA,CAAAG,CAAAA,CACA,KAAA,CAAAC,EACA,KAAA,CAAOX,CAAAA,CACP,UAAA,CAAAuB,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,KAClB,CAAC,CAAA,CAGGvB,CACR,CACF,CACF,CChMO,SAASwB,CAAAA,CACftB,CAAAA,CAA+B,EAAC,CAClB,CACd,GAAM,CACL,KAAA,CAAAuB,CAAAA,CAAQ,QAAA,CACR,OAAA,CAAA3B,CAAAA,CAAU,wBAAA,CACV,MAAOK,CAAAA,CAAU,UAAA,CAAW,KAAA,CAC5B,SAAA,CAAAuB,CAAAA,CACA,KAAA,CAAAnB,CACD,CAAA,CAAIL,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,CAAA,CAEhBG,CAAAA,CAAa,CACnB,KAAA,CAAOE,CAAAA,CACP,KAAA,CAAAI,CAAAA,CACA,YAAA,CAAc,CAACG,CAAAA,CAAOiB,CAAAA,CAAQb,CAAAA,IAAc,CAC3C,GAAA,CAAK,CAAA,EAAGhB,CAAO,CAAA,SAAA,CAAA,CACf,KAAM,CACL,MAAA,CAAQ,MAAA,CACR,OAAA,CAAS,CAAE,cAAA,CAAgB,kBAAmB,CAAA,CAC9C,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CACpB,KAAA,CAAOY,EAAM,KAAA,EAASe,CAAAA,CACtB,QAAA,CAAU,CACT,GAAIf,CAAAA,CAAM,YAAA,CACP,CAAC,CAAE,IAAA,CAAM,QAAA,CAAU,OAAA,CAASA,CAAAA,CAAM,YAAa,CAAC,CAAA,CAChD,EAAC,CACJ,GAAGI,CAAAA,CAAS,GAAA,CAAKc,CAAAA,GAAO,CAAE,IAAA,CAAMA,CAAAA,CAAE,IAAA,CAAM,OAAA,CAASA,CAAAA,CAAE,OAAQ,EAAE,CAC9D,CAAA,CACA,MAAA,CAAQ,KACT,CAAC,CAAA,CACD,GAAIF,CAAAA,EAAa,IAAA,CAAO,CAAE,MAAA,CAAQ,WAAA,CAAY,OAAA,CAAQA,CAAS,CAAE,CAAA,CAAI,EACtE,CACD,CAAA,CAAA,CACA,aAAA,CAAe,MAAOG,CAAAA,EAAQ,CAC7B,IAAIC,CAAAA,CACJ,GAAI,CACHA,EAAO,MAAMD,CAAAA,CAAI,IAAA,GAClB,CAAA,KAAQ,CACP,MAAM,IAAI,KAAA,CACT,CAAA,oEAAA,EAAuE/B,CAAO,CAAA,6BAAA,CAC/E,CACD,CACA,IAAMW,CAAAA,CAAQqB,CAAAA,CAAK,OAAA,EAAqC,OAAA,EAAqB,EAAA,CACvEC,CAAAA,CAAeD,CAAAA,CAAK,iBAAA,EAAgC,CAAA,CACpDE,CAAAA,CAAgBF,CAAAA,CAAK,UAAA,EAAyB,CAAA,CAEpD,OAAO,CACN,IAAA,CAAArB,CAAAA,CACA,WAAA,CAAasB,CAAAA,CAAcC,CAAAA,CAC3B,WAAA,CAAAD,CAAAA,CACA,YAAA,CAAAC,CACD,CACD,CACD,CAAC,CACF","file":"ollama.js","sourcesContent":["/**\n * Helper functions for AI adapter — createRunner, estimateCost, state queries, validation.\n */\n\nimport type {\n AdapterHooks,\n AgentLike,\n AgentRunner,\n RunResult,\n RunOptions,\n Message,\n TokenUsage,\n AgentState,\n ApprovalState,\n} from \"./types.js\";\n\n// ============================================================================\n// State Query Helpers\n// ============================================================================\n\n/** Check if agent is currently running. */\nexport function isAgentRunning(state: AgentState): boolean {\n return state.status === \"running\";\n}\n\n/** Check if there are pending approvals. */\nexport function hasPendingApprovals(state: ApprovalState): boolean {\n return state.pending.length > 0;\n}\n\n// ============================================================================\n// Cost Estimation\n// ============================================================================\n\n/**\n * Get total cost estimate based on token usage.\n *\n * @param tokenUsage - Total token count\n * @param ratePerMillionTokens - Cost per million tokens (required, no default to avoid stale pricing)\n * @returns Estimated cost in dollars\n */\nexport function estimateCost(\n tokenUsage: number,\n ratePerMillionTokens: number\n): number {\n return (tokenUsage / 1_000_000) * ratePerMillionTokens;\n}\n\n// ============================================================================\n// Validation Helpers\n// ============================================================================\n\nconst ALLOWED_PROTOCOLS = new Set([\"http:\", \"https:\"]);\n\n/**\n * Validate that a baseURL uses http or https.\n * Throws immediately at adapter creation time (not at call time) to catch config errors early.\n */\nexport function validateBaseURL(baseURL: string): void {\n try {\n const url = new URL(baseURL);\n if (!ALLOWED_PROTOCOLS.has(url.protocol)) {\n throw new Error(\n `[Directive] Invalid baseURL protocol \"${url.protocol}\" – only http: and https: are allowed`,\n );\n }\n } catch (err) {\n if (err instanceof Error && err.message.startsWith(\"[Directive]\")) {\n throw err;\n }\n\n throw new Error(\n `[Directive] Invalid baseURL \"${baseURL}\" – must be a valid URL (e.g. \"https://api.openai.com/v1\")`,\n );\n }\n}\n\n// ============================================================================\n// createRunner Helper\n// ============================================================================\n\n/** Parsed response from an LLM provider */\nexport interface ParsedResponse {\n text: string;\n totalTokens: number;\n /** Input token count, when available from the provider */\n inputTokens?: number;\n /** Output token count, when available from the provider */\n outputTokens?: number;\n}\n\n/** Options for creating an AgentRunner from buildRequest/parseResponse */\nexport interface CreateRunnerOptions {\n fetch?: typeof globalThis.fetch;\n buildRequest: (\n agent: AgentLike,\n input: string,\n messages: Message[]\n ) => { url: string; init: RequestInit };\n parseResponse: (\n response: Response,\n messages: Message[]\n ) => Promise<ParsedResponse>;\n parseOutput?: <T>(text: string) => T;\n /** Lifecycle hooks for tracing, logging, and metrics */\n hooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner from buildRequest/parseResponse helpers.\n * Reduces ~50 lines of fetch boilerplate to ~20 lines of configuration.\n *\n * Supports lifecycle hooks for observability:\n * - `onBeforeCall` fires before each API request\n * - `onAfterCall` fires after a successful response (includes token breakdown)\n * - `onError` fires when the request fails\n *\n * @example\n * ```typescript\n * const runClaude = createRunner({\n * buildRequest: (agent, input) => ({\n * url: \"/api/claude\",\n * init: {\n * method: \"POST\",\n * headers: { \"Content-Type\": \"application/json\" },\n * body: JSON.stringify({\n * model: agent.model ?? \"claude-haiku-4-5-20251001\",\n * system: agent.instructions ?? \"\",\n * messages: [{ role: \"user\", content: input }],\n * }),\n * },\n * }),\n * parseResponse: async (res) => {\n * const data = await res.json();\n * const inputTokens = data.usage?.input_tokens ?? 0;\n * const outputTokens = data.usage?.output_tokens ?? 0;\n * return {\n * text: data.content?.[0]?.text ?? \"\",\n * totalTokens: inputTokens + outputTokens,\n * inputTokens,\n * outputTokens,\n * };\n * },\n * hooks: {\n * onAfterCall: ({ durationMs, tokenUsage }) => {\n * console.log(`LLM call: ${durationMs}ms, ${tokenUsage.inputTokens}in/${tokenUsage.outputTokens}out`);\n * },\n * },\n * });\n * ```\n */\nexport function createRunner(options: CreateRunnerOptions): AgentRunner {\n const {\n fetch: fetchFn = globalThis.fetch,\n buildRequest,\n parseResponse,\n parseOutput,\n hooks,\n } = options;\n\n const defaultParseOutput = <T>(text: string): T => {\n try {\n return JSON.parse(text) as T;\n } catch {\n return text as unknown as T;\n }\n };\n\n const parse = parseOutput ?? defaultParseOutput;\n\n return async <T = unknown>(\n agent: AgentLike,\n input: string,\n runOptions?: RunOptions\n ): Promise<RunResult<T>> => {\n const startTime = Date.now();\n hooks?.onBeforeCall?.({ agent, input, timestamp: startTime });\n\n const messages: Message[] = [{ role: \"user\", content: input }];\n\n try {\n const { url, init } = buildRequest(agent, input, messages);\n\n const fetchInit: RequestInit = runOptions?.signal\n ? { ...init, signal: runOptions.signal }\n : init;\n\n const response = await fetchFn(url, fetchInit);\n\n if (!response.ok) {\n const errBody = await response.text().catch(() => \"\");\n\n throw new Error(\n `[Directive] AgentRunner request failed: ${response.status} ${response.statusText}${errBody ? ` – ${errBody.slice(0, 300)}` : \"\"}`,\n );\n }\n\n const parsed = await parseResponse(response, messages);\n const tokenUsage: TokenUsage = {\n inputTokens: parsed.inputTokens ?? 0,\n outputTokens: parsed.outputTokens ?? 0,\n };\n\n const assistantMessage: Message = { role: \"assistant\", content: parsed.text };\n const allMessages: Message[] = [...messages, assistantMessage];\n\n runOptions?.onMessage?.(assistantMessage);\n\n const durationMs = Date.now() - startTime;\n hooks?.onAfterCall?.({\n agent,\n input,\n output: parsed.text,\n totalTokens: parsed.totalTokens,\n tokenUsage,\n durationMs,\n timestamp: Date.now(),\n });\n\n return {\n output: parse<T>(parsed.text),\n messages: allMessages,\n toolCalls: [],\n totalTokens: parsed.totalTokens,\n tokenUsage,\n };\n } catch (err) {\n const durationMs = Date.now() - startTime;\n if (err instanceof Error) {\n hooks?.onError?.({\n agent,\n input,\n error: err,\n durationMs,\n timestamp: Date.now(),\n });\n }\n\n throw err;\n }\n };\n}\n","/**\n * @directive-run/ai/ollama\n *\n * Ollama adapter for Directive AI. Provides runners for local\n * Ollama inference. No API key required.\n *\n * Requires Ollama to be running locally. Start it with: `ollama serve`\n *\n * @example\n * ```typescript\n * import { createOllamaRunner } from '@directive-run/ai/ollama';\n *\n * const runner = createOllamaRunner({ model: 'llama3' });\n * ```\n */\n\nimport { createRunner, validateBaseURL } from \"../helpers.js\";\nimport type { AdapterHooks, AgentRunner } from \"../types.js\";\n\n// ============================================================================\n// Ollama Runner\n// ============================================================================\n\n/** Options for createOllamaRunner */\nexport interface OllamaRunnerOptions {\n\tmodel?: string;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** @default undefined */\n\ttimeoutMs?: number;\n\t/** Lifecycle hooks for tracing, logging, and metrics */\n\thooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner for local Ollama inference.\n *\n * Ollama runs locally – no API key or cloud service needed. Default model\n * is `llama3`, default base URL is `http://localhost:11434`.\n *\n * Returns `tokenUsage` with input/output breakdown for cost tracking\n * (useful for monitoring local resource usage).\n *\n * @example\n * ```typescript\n * const runner = createOllamaRunner({ model: \"llama3\" });\n * const stack = createAgentStack({ runner, agents: { ... } });\n * ```\n */\nexport function createOllamaRunner(\n\toptions: OllamaRunnerOptions = {},\n): AgentRunner {\n\tconst {\n\t\tmodel = \"llama3\",\n\t\tbaseURL = \"http://localhost:11434\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\ttimeoutMs,\n\t\thooks,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\treturn createRunner({\n\t\tfetch: fetchFn,\n\t\thooks,\n\t\tbuildRequest: (agent, _input, messages) => ({\n\t\t\turl: `${baseURL}/api/chat`,\n\t\t\tinit: {\n\t\t\t\tmethod: \"POST\",\n\t\t\t\theaders: { \"Content-Type\": \"application/json\" },\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\tmodel: agent.model ?? model,\n\t\t\t\t\tmessages: [\n\t\t\t\t\t\t...(agent.instructions\n\t\t\t\t\t\t\t? [{ role: \"system\", content: agent.instructions }]\n\t\t\t\t\t\t\t: []),\n\t\t\t\t\t\t...messages.map((m) => ({ role: m.role, content: m.content })),\n\t\t\t\t\t],\n\t\t\t\t\tstream: false,\n\t\t\t\t}),\n\t\t\t\t...(timeoutMs != null ? { signal: AbortSignal.timeout(timeoutMs) } : {}),\n\t\t\t},\n\t\t}),\n\t\tparseResponse: async (res) => {\n\t\t\tlet data: Record<string, unknown>;\n\t\t\ttry {\n\t\t\t\tdata = await res.json();\n\t\t\t} catch {\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`[Directive] Ollama returned non-JSON response. Is Ollama running at ${baseURL}? Start it with: ollama serve`,\n\t\t\t\t);\n\t\t\t}\n\t\t\tconst text = (data.message as Record<string, unknown>)?.content as string ?? \"\";\n\t\t\tconst inputTokens = (data.prompt_eval_count as number) ?? 0;\n\t\t\tconst outputTokens = (data.eval_count as number) ?? 0;\n\n\t\t\treturn {\n\t\t\t\ttext,\n\t\t\t\ttotalTokens: inputTokens + outputTokens,\n\t\t\t\tinputTokens,\n\t\t\t\toutputTokens,\n\t\t\t};\n\t\t},\n\t});\n}\n"]}
|
package/dist/openai.cjs
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
'use strict';var S=new Set(["http:","https:"]);function O(i){try{let n=new URL(i);if(!S.has(n.protocol))throw new Error(`[Directive] Invalid baseURL protocol "${n.protocol}" \u2013 only http: and https: are allowed`)}catch(n){throw n instanceof Error&&n.message.startsWith("[Directive]")?n:new Error(`[Directive] Invalid baseURL "${i}" \u2013 must be a valid URL (e.g. "https://api.openai.com/v1")`)}}function I(i){let{fetch:n=globalThis.fetch,buildRequest:f,parseResponse:u,parseOutput:c,hooks:l}=i,r=c??(e=>{try{return JSON.parse(e)}catch{return e}});return async(e,o,s)=>{let t=Date.now();l?.onBeforeCall?.({agent:e,input:o,timestamp:t});let p=[{role:"user",content:o}];try{let{url:h,init:g}=f(e,o,p),k=s?.signal?{...g,signal:s.signal}:g,d=await n(h,k);if(!d.ok){let A=await d.text().catch(()=>"");throw new Error(`[Directive] AgentRunner request failed: ${d.status} ${d.statusText}${A?` \u2013 ${A.slice(0,300)}`:""}`)}let a=await u(d,p),T={inputTokens:a.inputTokens??0,outputTokens:a.outputTokens??0},w={role:"assistant",content:a.text},R=[...p,w];s?.onMessage?.(w);let y=Date.now()-t;return l?.onAfterCall?.({agent:e,input:o,output:a.text,totalTokens:a.totalTokens,tokenUsage:T,durationMs:y,timestamp:Date.now()}),{output:r(a.text),messages:R,toolCalls:[],totalTokens:a.totalTokens,tokenUsage:T}}catch(h){let g=Date.now()-t;throw h instanceof Error&&l?.onError?.({agent:e,input:o,error:h,durationMs:g,timestamp:Date.now()}),h}}}var $={"gpt-4o":{input:2.5,output:10},"gpt-4o-mini":{input:.15,output:.6},"gpt-4-turbo":{input:10,output:30},"o3-mini":{input:1.1,output:4.4}};function L(i){let{apiKey:n,model:f="gpt-4o",maxTokens:u,baseURL:c="https://api.openai.com/v1",fetch:l=globalThis.fetch,timeoutMs:m,hooks:r}=i;return O(c),typeof process<"u"&&process.env?.NODE_ENV!=="production"&&!n&&console.warn("[Directive] createOpenAIRunner: apiKey is empty. API calls will fail."),I({fetch:l,hooks:r,buildRequest:(e,o,s)=>({url:`${c}/chat/completions`,init:{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${n}`},body:JSON.stringify({model:e.model??f,...u!=null?{max_tokens:u}:{},messages:[...e.instructions?[{role:"system",content:e.instructions}]:[],...s.map(t=>({role:t.role,content:t.content}))]}),...m!=null?{signal:AbortSignal.timeout(m)}:{}}}),parseResponse:async e=>{let o=await e.json(),s=o.choices?.[0]?.message?.content??"",t=o.usage?.prompt_tokens??0,p=o.usage?.completion_tokens??0;return {text:s,totalTokens:t+p,inputTokens:t,outputTokens:p}}})}function N(i){let{apiKey:n,model:f="text-embedding-3-small",dimensions:u=1536,baseURL:c="https://api.openai.com/v1",fetch:l=globalThis.fetch,timeoutMs:m}=i;return O(c),typeof process<"u"&&process.env?.NODE_ENV!=="production"&&!n&&console.warn("[Directive] createOpenAIEmbedder: apiKey is empty. API calls will fail."),async r=>{let e=await l(`${c}/embeddings`,{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${n}`},body:JSON.stringify({model:f,input:r,dimensions:u}),signal:AbortSignal.timeout(m??3e4)});if(!e.ok){let t=await e.text().catch(()=>"");throw new Error(`[Directive] OpenAI embedding failed: ${e.status}${t?` \u2013 ${t.slice(0,200)}`:""}`)}let s=(await e.json()).data[0];if(!s)throw new Error("[Directive] OpenAI embedding response contained no data entries");return s.embedding}}function P(i){let{apiKey:n,model:f="gpt-4o",maxTokens:u,baseURL:c="https://api.openai.com/v1",fetch:l=globalThis.fetch,hooks:m}=i;return O(c),typeof process<"u"&&process.env?.NODE_ENV!=="production"&&!n&&console.warn("[Directive] createOpenAIStreamingRunner: apiKey is empty. API calls will fail."),async(r,e,o)=>{let s=Date.now();m?.onBeforeCall?.({agent:r,input:e,timestamp:s});try{let t=await l(`${c}/chat/completions`,{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${n}`},body:JSON.stringify({model:r.model??f,...u!=null?{max_tokens:u}:{},messages:[...r.instructions?[{role:"system",content:r.instructions}]:[],{role:"user",content:e}],stream:!0,stream_options:{include_usage:!0}}),signal:o.signal});if(!t.ok){let y=await t.text().catch(()=>"");throw new Error(`[Directive] OpenAI streaming error ${t.status}${y?` \u2013 ${y.slice(0,200)}`:""}`)}let p=t.body?.getReader();if(!p)throw new Error("[Directive] No response body");let h=new TextDecoder,g="",k="",d=0,a=0;try{for(;;){let{done:y,value:A}=await p.read();if(y)break;g+=h.decode(A,{stream:!0});let x=g.split(`
|
|
2
|
+
`);g=x.pop()??"";for(let D of x){if(!D.startsWith("data: "))continue;let E=D.slice(6).trim();if(E!=="[DONE]")try{let b=JSON.parse(E),v=b.choices?.[0]?.delta;v?.content&&(k+=v.content,o.onToken?.(v.content)),b.usage&&(d=b.usage.prompt_tokens??0,a=b.usage.completion_tokens??0);}catch(b){if(b instanceof SyntaxError)typeof process<"u"&&process.env?.NODE_ENV==="development"&&console.warn("[Directive] Malformed SSE event from OpenAI:",E);else throw b}}}}finally{p.cancel().catch(()=>{});}let T={role:"assistant",content:k};o.onMessage?.(T);let w={inputTokens:d,outputTokens:a},R=d+a;return m?.onAfterCall?.({agent:r,input:e,output:k,totalTokens:R,tokenUsage:w,durationMs:Date.now()-s,timestamp:Date.now()}),{output:k,messages:[{role:"user",content:e},T],toolCalls:[],totalTokens:R,tokenUsage:w}}catch(t){throw t instanceof Error&&m?.onError?.({agent:r,input:e,error:t,durationMs:Date.now()-s,timestamp:Date.now()}),t}}}exports.OPENAI_PRICING=$;exports.createOpenAIEmbedder=N;exports.createOpenAIRunner=L;exports.createOpenAIStreamingRunner=P;//# sourceMappingURL=openai.cjs.map
|
|
3
|
+
//# sourceMappingURL=openai.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/helpers.ts","../src/adapters/openai.ts"],"names":["ALLOWED_PROTOCOLS","validateBaseURL","baseURL","url","err","createRunner","options","fetchFn","buildRequest","parseResponse","parseOutput","hooks","parse","text","agent","input","runOptions","startTime","messages","init","fetchInit","response","errBody","parsed","tokenUsage","assistantMessage","allMessages","durationMs","OPENAI_PRICING","createOpenAIRunner","apiKey","model","maxTokens","timeoutMs","_input","m","res","data","inputTokens","outputTokens","createOpenAIEmbedder","dimensions","entry","createOpenAIStreamingRunner","callbacks","reader","decoder","buf","fullText","promptTokens","completionTokens","done","value","lines","line","event","delta","parseErr","assistantMsg","totalTokens"],"mappings":"aAoDA,IAAMA,CAAAA,CAAoB,IAAI,GAAA,CAAI,CAAC,QAAS,QAAQ,CAAC,CAAA,CAM9C,SAASC,CAAAA,CAAgBC,CAAAA,CAAuB,CACrD,GAAI,CACF,IAAMC,CAAAA,CAAM,IAAI,GAAA,CAAID,CAAO,CAAA,CAC3B,GAAI,CAACF,CAAAA,CAAkB,GAAA,CAAIG,CAAAA,CAAI,QAAQ,CAAA,CACrC,MAAM,IAAI,KAAA,CACR,CAAA,sCAAA,EAAyCA,CAAAA,CAAI,QAAQ,CAAA,0CAAA,CACvD,CAEJ,CAAA,MAASC,CAAAA,CAAK,CACZ,MAAIA,aAAe,KAAA,EAASA,CAAAA,CAAI,OAAA,CAAQ,UAAA,CAAW,aAAa,CAAA,CACxDA,EAGF,IAAI,KAAA,CACR,CAAA,6BAAA,EAAgCF,CAAO,CAAA,+DAAA,CACzC,CACF,CACF,CA4EO,SAASG,CAAAA,CAAaC,CAAAA,CAA2C,CACtE,GAAM,CACJ,KAAA,CAAOC,CAAAA,CAAU,UAAA,CAAW,KAAA,CAC5B,YAAA,CAAAC,CAAAA,CACA,cAAAC,CAAAA,CACA,WAAA,CAAAC,CAAAA,CACA,KAAA,CAAAC,CACF,CAAA,CAAIL,EAUEM,CAAAA,CAAQF,CAAAA,GARiBG,CAAAA,EAAoB,CACjD,GAAI,CACF,OAAO,IAAA,CAAK,KAAA,CAAMA,CAAI,CACxB,CAAA,KAAQ,CACN,OAAOA,CACT,CACF,CAAA,CAAA,CAIA,OAAO,MACLC,CAAAA,CACAC,EACAC,CAAAA,GAC0B,CAC1B,IAAMC,CAAAA,CAAY,IAAA,CAAK,GAAA,GACvBN,CAAAA,EAAO,YAAA,GAAe,CAAE,KAAA,CAAAG,CAAAA,CAAO,KAAA,CAAAC,EAAO,SAAA,CAAWE,CAAU,CAAC,CAAA,CAE5D,IAAMC,EAAsB,CAAC,CAAE,IAAA,CAAM,MAAA,CAAQ,OAAA,CAASH,CAAM,CAAC,CAAA,CAE7D,GAAI,CACF,GAAM,CAAE,GAAA,CAAAZ,EAAK,IAAA,CAAAgB,CAAK,CAAA,CAAIX,CAAAA,CAAaM,CAAAA,CAAOC,CAAAA,CAAOG,CAAQ,CAAA,CAEnDE,CAAAA,CAAyBJ,CAAAA,EAAY,MAAA,CACvC,CAAE,GAAGG,EAAM,MAAA,CAAQH,CAAAA,CAAW,MAAO,CAAA,CACrCG,CAAAA,CAEEE,CAAAA,CAAW,MAAMd,CAAAA,CAAQJ,CAAAA,CAAKiB,CAAS,CAAA,CAE7C,GAAI,CAACC,EAAS,EAAA,CAAI,CAChB,IAAMC,CAAAA,CAAU,MAAMD,CAAAA,CAAS,MAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACR,CAAA,wCAAA,EAA2CA,CAAAA,CAAS,MAAM,CAAA,CAAA,EAAIA,CAAAA,CAAS,UAAU,CAAA,EAAGC,CAAAA,CAAU,CAAA,QAAA,EAAMA,CAAAA,CAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,CAAA,CAAA,CAAK,EAAE,CAAA,CAClI,CACF,CAEA,IAAMC,CAAAA,CAAS,MAAMd,CAAAA,CAAcY,CAAAA,CAAUH,CAAQ,CAAA,CAC/CM,EAAyB,CAC7B,WAAA,CAAaD,CAAAA,CAAO,WAAA,EAAe,CAAA,CACnC,YAAA,CAAcA,EAAO,YAAA,EAAgB,CACvC,CAAA,CAEME,CAAAA,CAA4B,CAAE,IAAA,CAAM,YAAa,OAAA,CAASF,CAAAA,CAAO,IAAK,CAAA,CACtEG,CAAAA,CAAyB,CAAC,GAAGR,CAAAA,CAAUO,CAAgB,EAE7DT,CAAAA,EAAY,SAAA,GAAYS,CAAgB,CAAA,CAExC,IAAME,CAAAA,CAAa,IAAA,CAAK,GAAA,EAAI,CAAIV,EAChC,OAAAN,CAAAA,EAAO,WAAA,GAAc,CACnB,KAAA,CAAAG,CAAAA,CACA,MAAAC,CAAAA,CACA,MAAA,CAAQQ,CAAAA,CAAO,IAAA,CACf,WAAA,CAAaA,CAAAA,CAAO,YACpB,UAAA,CAAAC,CAAAA,CACA,UAAA,CAAAG,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,KAClB,CAAC,CAAA,CAEM,CACL,MAAA,CAAQf,CAAAA,CAASW,EAAO,IAAI,CAAA,CAC5B,QAAA,CAAUG,CAAAA,CACV,SAAA,CAAW,GACX,WAAA,CAAaH,CAAAA,CAAO,WAAA,CACpB,UAAA,CAAAC,CACF,CACF,OAASpB,CAAAA,CAAK,CACZ,IAAMuB,CAAAA,CAAa,IAAA,CAAK,GAAA,GAAQV,CAAAA,CAChC,MAAIb,CAAAA,YAAe,KAAA,EACjBO,CAAAA,EAAO,OAAA,GAAU,CACf,KAAA,CAAAG,CAAAA,CACA,KAAA,CAAAC,CAAAA,CACA,KAAA,CAAOX,CAAAA,CACP,WAAAuB,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,GAAA,EAClB,CAAC,EAGGvB,CACR,CACF,CACF,CCzMO,IAAMwB,CAAAA,CAAoE,CAChF,QAAA,CAAU,CAAE,KAAA,CAAO,GAAA,CAAK,MAAA,CAAQ,EAAG,EACnC,aAAA,CAAe,CAAE,KAAA,CAAO,GAAA,CAAM,MAAA,CAAQ,EAAI,EAC1C,aAAA,CAAe,CAAE,KAAA,CAAO,EAAA,CAAI,MAAA,CAAQ,EAAG,EACvC,SAAA,CAAW,CAAE,KAAA,CAAO,GAAA,CAAK,MAAA,CAAQ,GAAI,CACtC,EA0CO,SAASC,CAAAA,CAAmBvB,CAAAA,CAA2C,CAC7E,GAAM,CACL,MAAA,CAAAwB,CAAAA,CACA,KAAA,CAAAC,CAAAA,CAAQ,QAAA,CACR,SAAA,CAAAC,EACA,OAAA,CAAA9B,CAAAA,CAAU,2BAAA,CACV,KAAA,CAAOK,CAAAA,CAAU,UAAA,CAAW,MAC5B,SAAA,CAAA0B,CAAAA,CACA,KAAA,CAAAtB,CACD,CAAA,CAAIL,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,CAAA,CAEnB,OAAO,OAAA,CAAY,GAAA,EAAe,QAAQ,GAAA,EAAK,QAAA,GAAa,YAAA,EAAgB,CAAC4B,CAAAA,EAChF,OAAA,CAAQ,KAAK,uEAAuE,CAAA,CAG9EzB,CAAAA,CAAa,CACnB,KAAA,CAAOE,CAAAA,CACP,MAAAI,CAAAA,CACA,YAAA,CAAc,CAACG,CAAAA,CAAOoB,CAAAA,CAAQhB,CAAAA,IAAc,CAC3C,GAAA,CAAK,CAAA,EAAGhB,CAAO,CAAA,iBAAA,CAAA,CACf,IAAA,CAAM,CACL,OAAQ,MAAA,CACR,OAAA,CAAS,CACR,cAAA,CAAgB,kBAAA,CAChB,aAAA,CAAe,UAAU4B,CAAM,CAAA,CAChC,CAAA,CACA,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CACpB,KAAA,CAAOhB,CAAAA,CAAM,KAAA,EAASiB,CAAAA,CACtB,GAAIC,CAAAA,EAAa,KAAO,CAAE,UAAA,CAAYA,CAAU,CAAA,CAAI,EAAC,CACrD,SAAU,CACT,GAAIlB,CAAAA,CAAM,YAAA,CACP,CAAC,CAAE,KAAM,QAAA,CAAU,OAAA,CAASA,CAAAA,CAAM,YAAa,CAAC,CAAA,CAChD,EAAC,CACJ,GAAGI,EAAS,GAAA,CAAKiB,CAAAA,GAAO,CAAE,IAAA,CAAMA,CAAAA,CAAE,IAAA,CAAM,OAAA,CAASA,CAAAA,CAAE,OAAQ,EAAE,CAC9D,CACD,CAAC,CAAA,CACD,GAAIF,CAAAA,EAAa,KAAO,CAAE,MAAA,CAAQ,WAAA,CAAY,OAAA,CAAQA,CAAS,CAAE,EAAI,EACtE,CACD,CAAA,CAAA,CACA,aAAA,CAAe,MAAOG,GAAQ,CAC7B,IAAMC,CAAAA,CAAO,MAAMD,CAAAA,CAAI,IAAA,GACjBvB,CAAAA,CAAOwB,CAAAA,CAAK,OAAA,GAAU,CAAC,CAAA,EAAG,OAAA,EAAS,SAAW,EAAA,CAC9CC,CAAAA,CAAcD,CAAAA,CAAK,KAAA,EAAO,aAAA,EAAiB,CAAA,CAC3CE,EAAeF,CAAAA,CAAK,KAAA,EAAO,iBAAA,EAAqB,CAAA,CAEtD,OAAO,CACN,KAAAxB,CAAAA,CACA,WAAA,CAAayB,CAAAA,CAAcC,CAAAA,CAC3B,WAAA,CAAAD,CAAAA,CACA,aAAAC,CACD,CACD,CACD,CAAC,CACF,CA0BO,SAASC,CAAAA,CACflC,CAAAA,CACa,CACb,GAAM,CACL,MAAA,CAAAwB,EACA,KAAA,CAAAC,CAAAA,CAAQ,wBAAA,CACR,UAAA,CAAAU,CAAAA,CAAa,IAAA,CACb,QAAAvC,CAAAA,CAAU,2BAAA,CACV,KAAA,CAAOK,CAAAA,CAAU,UAAA,CAAW,KAAA,CAC5B,UAAA0B,CACD,CAAA,CAAI3B,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,EAEnB,OAAO,OAAA,CAAY,GAAA,EAAe,OAAA,CAAQ,GAAA,EAAK,QAAA,GAAa,cAAgB,CAAC4B,CAAAA,EAChF,QAAQ,IAAA,CAAK,yEAAyE,EAGhF,MAAOjB,CAAAA,EAAqC,CAClD,IAAMQ,CAAAA,CAAW,MAAMd,EAAQ,CAAA,EAAGL,CAAO,CAAA,WAAA,CAAA,CAAe,CACvD,MAAA,CAAQ,MAAA,CACR,QAAS,CACR,cAAA,CAAgB,kBAAA,CAChB,aAAA,CAAe,CAAA,OAAA,EAAU4B,CAAM,EAChC,CAAA,CACA,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CAAE,KAAA,CAAAC,EAAO,KAAA,CAAOlB,CAAAA,CAAM,UAAA,CAAA4B,CAAW,CAAC,CAAA,CACvD,OAAQ,WAAA,CAAY,OAAA,CAAQR,CAAAA,EAAa,GAAM,CAChD,CAAC,EAED,GAAI,CAACZ,CAAAA,CAAS,EAAA,CAAI,CACjB,IAAMC,EAAU,MAAMD,CAAAA,CAAS,IAAA,EAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACT,CAAA,qCAAA,EAAwCA,CAAAA,CAAS,MAAM,CAAA,EAAGC,CAAAA,CAAU,CAAA,QAAA,EAAMA,CAAAA,CAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,CAAA,CAAA,CAAK,EAAE,CAAA,CACvG,CACD,CAMA,IAAMoB,CAAAA,CAAAA,CAJQ,MAAMrB,CAAAA,CAAS,IAAA,EAAK,EAIf,IAAA,CAAK,CAAC,CAAA,CACzB,GAAI,CAACqB,CAAAA,CACJ,MAAM,IAAI,MACT,iEACD,CAAA,CAGD,OAAOA,CAAAA,CAAM,SACd,CACD,CAmCO,SAASC,CAAAA,CACfrC,CAAAA,CAC0B,CAC1B,GAAM,CACL,OAAAwB,CAAAA,CACA,KAAA,CAAAC,EAAQ,QAAA,CACR,SAAA,CAAAC,EACA,OAAA,CAAA9B,CAAAA,CAAU,2BAAA,CACV,KAAA,CAAOK,CAAAA,CAAU,UAAA,CAAW,MAC5B,KAAA,CAAAI,CACD,CAAA,CAAIL,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,CAAA,CAEnB,OAAO,OAAA,CAAY,GAAA,EAAe,OAAA,CAAQ,GAAA,EAAK,WAAa,YAAA,EAAgB,CAAC4B,CAAAA,EAChF,OAAA,CAAQ,IAAA,CAAK,gFAAgF,EAGvF,MAAOhB,CAAAA,CAAOC,CAAAA,CAAO6B,CAAAA,GAAc,CACzC,IAAM3B,EAAY,IAAA,CAAK,GAAA,EAAI,CAC3BN,CAAAA,EAAO,YAAA,GAAe,CAAE,MAAAG,CAAAA,CAAO,KAAA,CAAAC,CAAAA,CAAO,SAAA,CAAWE,CAAU,CAAC,EAE5D,GAAI,CACH,IAAMI,CAAAA,CAAW,MAAMd,CAAAA,CAAQ,GAAGL,CAAO,CAAA,iBAAA,CAAA,CAAqB,CAC7D,MAAA,CAAQ,MAAA,CACR,OAAA,CAAS,CACR,cAAA,CAAgB,kBAAA,CAChB,aAAA,CAAe,CAAA,OAAA,EAAU4B,CAAM,CAAA,CAChC,EACA,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CACpB,KAAA,CAAOhB,CAAAA,CAAM,OAASiB,CAAAA,CACtB,GAAIC,CAAAA,EAAa,IAAA,CAAO,CAAE,UAAA,CAAYA,CAAU,CAAA,CAAI,EAAC,CACrD,QAAA,CAAU,CACT,GAAIlB,EAAM,YAAA,CACP,CAAC,CAAE,IAAA,CAAM,QAAA,CAAU,OAAA,CAASA,EAAM,YAAa,CAAC,CAAA,CAChD,EAAC,CACJ,CAAE,KAAM,MAAA,CAAQ,OAAA,CAASC,CAAM,CAChC,CAAA,CACA,MAAA,CAAQ,GACR,cAAA,CAAgB,CAAE,aAAA,CAAe,CAAA,CAAK,CACvC,CAAC,EACD,MAAA,CAAQ6B,CAAAA,CAAU,MACnB,CAAC,CAAA,CAED,GAAI,CAACvB,CAAAA,CAAS,EAAA,CAAI,CACjB,IAAMC,CAAAA,CAAU,MAAMD,EAAS,IAAA,EAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACT,CAAA,mCAAA,EAAsCA,CAAAA,CAAS,MAAM,CAAA,EAAGC,EAAU,CAAA,QAAA,EAAMA,CAAAA,CAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,GAAK,EAAE,CAAA,CACrG,CACD,CAEA,IAAMuB,CAAAA,CAASxB,EAAS,IAAA,EAAM,SAAA,EAAU,CACxC,GAAI,CAACwB,CAAAA,CACJ,MAAM,IAAI,KAAA,CAAM,8BAA8B,CAAA,CAG/C,IAAMC,CAAAA,CAAU,IAAI,WAAA,CAChBC,CAAAA,CAAM,EAAA,CACNC,CAAAA,CAAW,EAAA,CACXC,CAAAA,CAAe,EACfC,CAAAA,CAAmB,CAAA,CAEvB,GAAI,CACH,OAAa,CACZ,GAAM,CAAE,IAAA,CAAAC,CAAAA,CAAM,KAAA,CAAAC,CAAM,CAAA,CAAI,MAAMP,CAAAA,CAAO,IAAA,EAAK,CAC1C,GAAIM,CAAAA,CACH,MAGDJ,GAAOD,CAAAA,CAAQ,MAAA,CAAOM,CAAAA,CAAO,CAAE,MAAA,CAAQ,CAAA,CAAK,CAAC,CAAA,CAC7C,IAAMC,CAAAA,CAAQN,CAAAA,CAAI,KAAA,CAAM;AAAA,CAAI,CAAA,CAC5BA,EAAMM,CAAAA,CAAM,GAAA,IAAS,EAAA,CAErB,IAAA,IAAWC,KAAQD,CAAAA,CAAO,CACzB,GAAI,CAACC,CAAAA,CAAK,WAAW,QAAQ,CAAA,CAC5B,SAED,IAAMjB,CAAAA,CAAOiB,EAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK,CAChC,GAAIjB,CAAAA,GAAS,QAAA,CAIb,GAAI,CACH,IAAMkB,EAAQ,IAAA,CAAK,KAAA,CAAMlB,CAAI,CAAA,CAGvBmB,CAAAA,CAAQD,EAAM,OAAA,GAAU,CAAC,GAAG,KAAA,CAC9BC,CAAAA,EAAO,OAAA,GACVR,CAAAA,EAAYQ,CAAAA,CAAM,OAAA,CAClBZ,EAAU,OAAA,GAAUY,CAAAA,CAAM,OAAO,CAAA,CAAA,CAI9BD,CAAAA,CAAM,QACTN,CAAAA,CAAeM,CAAAA,CAAM,MAAM,aAAA,EAAiB,CAAA,CAC5CL,EAAmBK,CAAAA,CAAM,KAAA,CAAM,mBAAqB,CAAA,EAEtD,CAAA,MAASE,EAAU,CAClB,GAAIA,CAAAA,YAAoB,WAAA,CAEtB,OAAO,OAAA,CAAY,KACnB,OAAA,CAAQ,GAAA,EAAK,WAAa,aAAA,EAE1B,OAAA,CAAQ,KACP,8CAAA,CACApB,CACD,OAGD,MAAMoB,CAER,CACD,CACD,CACD,QAAE,CACDZ,CAAAA,CAAO,QAAO,CAAE,KAAA,CAAM,IAAM,CAAC,CAAC,EAC/B,CAEA,IAAMa,CAAAA,CAAwB,CAAE,IAAA,CAAM,WAAA,CAAa,QAASV,CAAS,CAAA,CACrEJ,EAAU,SAAA,GAAYc,CAAY,EAElC,IAAMlC,CAAAA,CAAyB,CAC9B,WAAA,CAAayB,CAAAA,CACb,aAAcC,CACf,CAAA,CACMS,CAAAA,CAAcV,CAAAA,CAAeC,CAAAA,CAEnC,OAAAvC,GAAO,WAAA,GAAc,CACpB,MAAAG,CAAAA,CACA,KAAA,CAAAC,EACA,MAAA,CAAQiC,CAAAA,CACR,YAAAW,CAAAA,CACA,UAAA,CAAAnC,EACA,UAAA,CAAY,IAAA,CAAK,KAAI,CAAIP,CAAAA,CACzB,UAAW,IAAA,CAAK,GAAA,EACjB,CAAC,CAAA,CAEM,CACN,OAAQ+B,CAAAA,CACR,QAAA,CAAU,CAAC,CAAE,IAAA,CAAM,OAAiB,OAAA,CAASjC,CAAM,EAAG2C,CAAY,CAAA,CAClE,UAAW,EAAC,CACZ,YAAAC,CAAAA,CACA,UAAA,CAAAnC,CACD,CACD,CAAA,MAASpB,CAAAA,CAAK,CACb,MAAIA,CAAAA,YAAe,OAClBO,CAAAA,EAAO,OAAA,GAAU,CAChB,KAAA,CAAAG,CAAAA,CACA,MAAAC,CAAAA,CACA,KAAA,CAAOX,EACP,UAAA,CAAY,IAAA,CAAK,KAAI,CAAIa,CAAAA,CACzB,UAAW,IAAA,CAAK,GAAA,EACjB,CAAC,CAAA,CAGIb,CACP,CACD,CACD","file":"openai.cjs","sourcesContent":["/**\n * Helper functions for AI adapter — createRunner, estimateCost, state queries, validation.\n */\n\nimport type {\n AdapterHooks,\n AgentLike,\n AgentRunner,\n RunResult,\n RunOptions,\n Message,\n TokenUsage,\n AgentState,\n ApprovalState,\n} from \"./types.js\";\n\n// ============================================================================\n// State Query Helpers\n// ============================================================================\n\n/** Check if agent is currently running. */\nexport function isAgentRunning(state: AgentState): boolean {\n return state.status === \"running\";\n}\n\n/** Check if there are pending approvals. */\nexport function hasPendingApprovals(state: ApprovalState): boolean {\n return state.pending.length > 0;\n}\n\n// ============================================================================\n// Cost Estimation\n// ============================================================================\n\n/**\n * Get total cost estimate based on token usage.\n *\n * @param tokenUsage - Total token count\n * @param ratePerMillionTokens - Cost per million tokens (required, no default to avoid stale pricing)\n * @returns Estimated cost in dollars\n */\nexport function estimateCost(\n tokenUsage: number,\n ratePerMillionTokens: number\n): number {\n return (tokenUsage / 1_000_000) * ratePerMillionTokens;\n}\n\n// ============================================================================\n// Validation Helpers\n// ============================================================================\n\nconst ALLOWED_PROTOCOLS = new Set([\"http:\", \"https:\"]);\n\n/**\n * Validate that a baseURL uses http or https.\n * Throws immediately at adapter creation time (not at call time) to catch config errors early.\n */\nexport function validateBaseURL(baseURL: string): void {\n try {\n const url = new URL(baseURL);\n if (!ALLOWED_PROTOCOLS.has(url.protocol)) {\n throw new Error(\n `[Directive] Invalid baseURL protocol \"${url.protocol}\" – only http: and https: are allowed`,\n );\n }\n } catch (err) {\n if (err instanceof Error && err.message.startsWith(\"[Directive]\")) {\n throw err;\n }\n\n throw new Error(\n `[Directive] Invalid baseURL \"${baseURL}\" – must be a valid URL (e.g. \"https://api.openai.com/v1\")`,\n );\n }\n}\n\n// ============================================================================\n// createRunner Helper\n// ============================================================================\n\n/** Parsed response from an LLM provider */\nexport interface ParsedResponse {\n text: string;\n totalTokens: number;\n /** Input token count, when available from the provider */\n inputTokens?: number;\n /** Output token count, when available from the provider */\n outputTokens?: number;\n}\n\n/** Options for creating an AgentRunner from buildRequest/parseResponse */\nexport interface CreateRunnerOptions {\n fetch?: typeof globalThis.fetch;\n buildRequest: (\n agent: AgentLike,\n input: string,\n messages: Message[]\n ) => { url: string; init: RequestInit };\n parseResponse: (\n response: Response,\n messages: Message[]\n ) => Promise<ParsedResponse>;\n parseOutput?: <T>(text: string) => T;\n /** Lifecycle hooks for tracing, logging, and metrics */\n hooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner from buildRequest/parseResponse helpers.\n * Reduces ~50 lines of fetch boilerplate to ~20 lines of configuration.\n *\n * Supports lifecycle hooks for observability:\n * - `onBeforeCall` fires before each API request\n * - `onAfterCall` fires after a successful response (includes token breakdown)\n * - `onError` fires when the request fails\n *\n * @example\n * ```typescript\n * const runClaude = createRunner({\n * buildRequest: (agent, input) => ({\n * url: \"/api/claude\",\n * init: {\n * method: \"POST\",\n * headers: { \"Content-Type\": \"application/json\" },\n * body: JSON.stringify({\n * model: agent.model ?? \"claude-haiku-4-5-20251001\",\n * system: agent.instructions ?? \"\",\n * messages: [{ role: \"user\", content: input }],\n * }),\n * },\n * }),\n * parseResponse: async (res) => {\n * const data = await res.json();\n * const inputTokens = data.usage?.input_tokens ?? 0;\n * const outputTokens = data.usage?.output_tokens ?? 0;\n * return {\n * text: data.content?.[0]?.text ?? \"\",\n * totalTokens: inputTokens + outputTokens,\n * inputTokens,\n * outputTokens,\n * };\n * },\n * hooks: {\n * onAfterCall: ({ durationMs, tokenUsage }) => {\n * console.log(`LLM call: ${durationMs}ms, ${tokenUsage.inputTokens}in/${tokenUsage.outputTokens}out`);\n * },\n * },\n * });\n * ```\n */\nexport function createRunner(options: CreateRunnerOptions): AgentRunner {\n const {\n fetch: fetchFn = globalThis.fetch,\n buildRequest,\n parseResponse,\n parseOutput,\n hooks,\n } = options;\n\n const defaultParseOutput = <T>(text: string): T => {\n try {\n return JSON.parse(text) as T;\n } catch {\n return text as unknown as T;\n }\n };\n\n const parse = parseOutput ?? defaultParseOutput;\n\n return async <T = unknown>(\n agent: AgentLike,\n input: string,\n runOptions?: RunOptions\n ): Promise<RunResult<T>> => {\n const startTime = Date.now();\n hooks?.onBeforeCall?.({ agent, input, timestamp: startTime });\n\n const messages: Message[] = [{ role: \"user\", content: input }];\n\n try {\n const { url, init } = buildRequest(agent, input, messages);\n\n const fetchInit: RequestInit = runOptions?.signal\n ? { ...init, signal: runOptions.signal }\n : init;\n\n const response = await fetchFn(url, fetchInit);\n\n if (!response.ok) {\n const errBody = await response.text().catch(() => \"\");\n\n throw new Error(\n `[Directive] AgentRunner request failed: ${response.status} ${response.statusText}${errBody ? ` – ${errBody.slice(0, 300)}` : \"\"}`,\n );\n }\n\n const parsed = await parseResponse(response, messages);\n const tokenUsage: TokenUsage = {\n inputTokens: parsed.inputTokens ?? 0,\n outputTokens: parsed.outputTokens ?? 0,\n };\n\n const assistantMessage: Message = { role: \"assistant\", content: parsed.text };\n const allMessages: Message[] = [...messages, assistantMessage];\n\n runOptions?.onMessage?.(assistantMessage);\n\n const durationMs = Date.now() - startTime;\n hooks?.onAfterCall?.({\n agent,\n input,\n output: parsed.text,\n totalTokens: parsed.totalTokens,\n tokenUsage,\n durationMs,\n timestamp: Date.now(),\n });\n\n return {\n output: parse<T>(parsed.text),\n messages: allMessages,\n toolCalls: [],\n totalTokens: parsed.totalTokens,\n tokenUsage,\n };\n } catch (err) {\n const durationMs = Date.now() - startTime;\n if (err instanceof Error) {\n hooks?.onError?.({\n agent,\n input,\n error: err,\n durationMs,\n timestamp: Date.now(),\n });\n }\n\n throw err;\n }\n };\n}\n","/**\n * @directive-run/ai/openai\n *\n * OpenAI adapter for Directive AI. Provides runners and embedders\n * for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)\n *\n * @example\n * ```typescript\n * import { createOpenAIRunner, createOpenAIEmbedder } from '@directive-run/ai/openai';\n *\n * const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });\n * const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });\n * ```\n */\n\nimport { createRunner, validateBaseURL } from \"../helpers.js\";\nimport type { AdapterHooks, AgentRunner, Message, TokenUsage } from \"../types.js\";\nimport type { StreamingCallbackRunner } from \"../stack.js\";\nimport type { EmbedderFn, Embedding } from \"../guardrails/semantic-cache.js\";\n\n// ============================================================================\n// Pricing Constants\n// ============================================================================\n\n/**\n * OpenAI model pricing (USD per million tokens).\n *\n * Use with `estimateCost()` for per-call cost tracking:\n * ```typescript\n * import { estimateCost } from '@directive-run/ai';\n * import { OPENAI_PRICING } from '@directive-run/ai/openai';\n *\n * const cost =\n * estimateCost(result.tokenUsage!.inputTokens, OPENAI_PRICING[\"gpt-4o\"].input) +\n * estimateCost(result.tokenUsage!.outputTokens, OPENAI_PRICING[\"gpt-4o\"].output);\n * ```\n *\n * **Note:** Pricing changes over time. These values are provided as a convenience\n * and may not reflect the latest rates. Always verify at https://openai.com/pricing\n */\nexport const OPENAI_PRICING: Record<string, { input: number; output: number }> = {\n\t\"gpt-4o\": { input: 2.5, output: 10 },\n\t\"gpt-4o-mini\": { input: 0.15, output: 0.6 },\n\t\"gpt-4-turbo\": { input: 10, output: 30 },\n\t\"o3-mini\": { input: 1.1, output: 4.4 },\n};\n\n// ============================================================================\n// OpenAI Runner\n// ============================================================================\n\n/** Options for createOpenAIRunner */\nexport interface OpenAIRunnerOptions {\n\tapiKey: string;\n\tmodel?: string;\n\tmaxTokens?: number;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** @default undefined */\n\ttimeoutMs?: number;\n\t/** Lifecycle hooks for tracing, logging, and metrics */\n\thooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)\n *\n * Returns `tokenUsage` with input/output breakdown for cost tracking.\n *\n * @example\n * ```typescript\n * // OpenAI\n * const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });\n *\n * // Azure OpenAI\n * const azure = createOpenAIRunner({\n * apiKey: process.env.AZURE_KEY!,\n * baseURL: \"https://your-resource.openai.azure.com/v1\",\n * });\n *\n * // Together.ai (OpenAI-compatible)\n * const together = createOpenAIRunner({\n * apiKey: process.env.TOGETHER_KEY!,\n * baseURL: \"https://api.together.xyz/v1\",\n * });\n * ```\n */\nexport function createOpenAIRunner(options: OpenAIRunnerOptions): AgentRunner {\n\tconst {\n\t\tapiKey,\n\t\tmodel = \"gpt-4o\",\n\t\tmaxTokens,\n\t\tbaseURL = \"https://api.openai.com/v1\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\ttimeoutMs,\n\t\thooks,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\tif (typeof process !== \"undefined\" && process.env?.NODE_ENV !== \"production\" && !apiKey) {\n\t\tconsole.warn(\"[Directive] createOpenAIRunner: apiKey is empty. API calls will fail.\");\n\t}\n\n\treturn createRunner({\n\t\tfetch: fetchFn,\n\t\thooks,\n\t\tbuildRequest: (agent, _input, messages) => ({\n\t\t\turl: `${baseURL}/chat/completions`,\n\t\t\tinit: {\n\t\t\t\tmethod: \"POST\",\n\t\t\t\theaders: {\n\t\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t\t},\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\tmodel: agent.model ?? model,\n\t\t\t\t\t...(maxTokens != null ? { max_tokens: maxTokens } : {}),\n\t\t\t\t\tmessages: [\n\t\t\t\t\t\t...(agent.instructions\n\t\t\t\t\t\t\t? [{ role: \"system\", content: agent.instructions }]\n\t\t\t\t\t\t\t: []),\n\t\t\t\t\t\t...messages.map((m) => ({ role: m.role, content: m.content })),\n\t\t\t\t\t],\n\t\t\t\t}),\n\t\t\t\t...(timeoutMs != null ? { signal: AbortSignal.timeout(timeoutMs) } : {}),\n\t\t\t},\n\t\t}),\n\t\tparseResponse: async (res) => {\n\t\t\tconst data = await res.json();\n\t\t\tconst text = data.choices?.[0]?.message?.content ?? \"\";\n\t\t\tconst inputTokens = data.usage?.prompt_tokens ?? 0;\n\t\t\tconst outputTokens = data.usage?.completion_tokens ?? 0;\n\n\t\t\treturn {\n\t\t\t\ttext,\n\t\t\t\ttotalTokens: inputTokens + outputTokens,\n\t\t\t\tinputTokens,\n\t\t\t\toutputTokens,\n\t\t\t};\n\t\t},\n\t});\n}\n\n// ============================================================================\n// OpenAI Embedder\n// ============================================================================\n\n/** Options for createOpenAIEmbedder */\nexport interface OpenAIEmbedderOptions {\n\tapiKey: string;\n\tmodel?: string;\n\tdimensions?: number;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** @default 30000 */\n\ttimeoutMs?: number;\n}\n\n/**\n * Create an EmbedderFn that calls the OpenAI embeddings API.\n *\n * @example\n * ```typescript\n * const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });\n * const embedding = await embedder('How do constraints work?');\n * ```\n */\nexport function createOpenAIEmbedder(\n\toptions: OpenAIEmbedderOptions,\n): EmbedderFn {\n\tconst {\n\t\tapiKey,\n\t\tmodel = \"text-embedding-3-small\",\n\t\tdimensions = 1536,\n\t\tbaseURL = \"https://api.openai.com/v1\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\ttimeoutMs,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\tif (typeof process !== \"undefined\" && process.env?.NODE_ENV !== \"production\" && !apiKey) {\n\t\tconsole.warn(\"[Directive] createOpenAIEmbedder: apiKey is empty. API calls will fail.\");\n\t}\n\n\treturn async (text: string): Promise<Embedding> => {\n\t\tconst response = await fetchFn(`${baseURL}/embeddings`, {\n\t\t\tmethod: \"POST\",\n\t\t\theaders: {\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t},\n\t\t\tbody: JSON.stringify({ model, input: text, dimensions }),\n\t\t\tsignal: AbortSignal.timeout(timeoutMs ?? 30_000),\n\t\t});\n\n\t\tif (!response.ok) {\n\t\t\tconst errBody = await response.text().catch(() => \"\");\n\n\t\t\tthrow new Error(\n\t\t\t\t`[Directive] OpenAI embedding failed: ${response.status}${errBody ? ` – ${errBody.slice(0, 200)}` : \"\"}`,\n\t\t\t);\n\t\t}\n\n\t\tconst data = (await response.json()) as {\n\t\t\tdata: Array<{ embedding: number[] }>;\n\t\t};\n\n\t\tconst entry = data.data[0];\n\t\tif (!entry) {\n\t\t\tthrow new Error(\n\t\t\t\t\"[Directive] OpenAI embedding response contained no data entries\",\n\t\t\t);\n\t\t}\n\n\t\treturn entry.embedding;\n\t};\n}\n\n// ============================================================================\n// OpenAI Streaming Runner\n// ============================================================================\n\n/** Options for createOpenAIStreamingRunner */\nexport interface OpenAIStreamingRunnerOptions {\n\tapiKey: string;\n\tmodel?: string;\n\tmaxTokens?: number;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** Lifecycle hooks for tracing, logging, and metrics */\n\thooks?: AdapterHooks;\n}\n\n/**\n * Create a StreamingCallbackRunner for OpenAI-compatible chat completions\n * with server-sent events. Can be used standalone or paired with `createOpenAIRunner`.\n *\n * Returns `tokenUsage` with input/output breakdown for cost tracking.\n *\n * @example\n * ```typescript\n * const streamingRunner = createOpenAIStreamingRunner({\n * apiKey: process.env.OPENAI_API_KEY!,\n * });\n * const stack = createAgentStack({\n * runner: createOpenAIRunner({ apiKey }),\n * streaming: { runner: streamingRunner },\n * agents: { ... },\n * });\n * ```\n */\nexport function createOpenAIStreamingRunner(\n\toptions: OpenAIStreamingRunnerOptions,\n): StreamingCallbackRunner {\n\tconst {\n\t\tapiKey,\n\t\tmodel = \"gpt-4o\",\n\t\tmaxTokens,\n\t\tbaseURL = \"https://api.openai.com/v1\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\thooks,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\tif (typeof process !== \"undefined\" && process.env?.NODE_ENV !== \"production\" && !apiKey) {\n\t\tconsole.warn(\"[Directive] createOpenAIStreamingRunner: apiKey is empty. API calls will fail.\");\n\t}\n\n\treturn async (agent, input, callbacks) => {\n\t\tconst startTime = Date.now();\n\t\thooks?.onBeforeCall?.({ agent, input, timestamp: startTime });\n\n\t\ttry {\n\t\t\tconst response = await fetchFn(`${baseURL}/chat/completions`, {\n\t\t\t\tmethod: \"POST\",\n\t\t\t\theaders: {\n\t\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t\t},\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\tmodel: agent.model ?? model,\n\t\t\t\t\t...(maxTokens != null ? { max_tokens: maxTokens } : {}),\n\t\t\t\t\tmessages: [\n\t\t\t\t\t\t...(agent.instructions\n\t\t\t\t\t\t\t? [{ role: \"system\", content: agent.instructions }]\n\t\t\t\t\t\t\t: []),\n\t\t\t\t\t\t{ role: \"user\", content: input },\n\t\t\t\t\t],\n\t\t\t\t\tstream: true,\n\t\t\t\t\tstream_options: { include_usage: true },\n\t\t\t\t}),\n\t\t\t\tsignal: callbacks.signal,\n\t\t\t});\n\n\t\t\tif (!response.ok) {\n\t\t\t\tconst errBody = await response.text().catch(() => \"\");\n\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`[Directive] OpenAI streaming error ${response.status}${errBody ? ` – ${errBody.slice(0, 200)}` : \"\"}`,\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst reader = response.body?.getReader();\n\t\t\tif (!reader) {\n\t\t\t\tthrow new Error(\"[Directive] No response body\");\n\t\t\t}\n\n\t\t\tconst decoder = new TextDecoder();\n\t\t\tlet buf = \"\";\n\t\t\tlet fullText = \"\";\n\t\t\tlet promptTokens = 0;\n\t\t\tlet completionTokens = 0;\n\n\t\t\ttry {\n\t\t\t\twhile (true) {\n\t\t\t\t\tconst { done, value } = await reader.read();\n\t\t\t\t\tif (done) {\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tbuf += decoder.decode(value, { stream: true });\n\t\t\t\t\tconst lines = buf.split(\"\\n\");\n\t\t\t\t\tbuf = lines.pop() ?? \"\";\n\n\t\t\t\t\tfor (const line of lines) {\n\t\t\t\t\t\tif (!line.startsWith(\"data: \")) {\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tconst data = line.slice(6).trim();\n\t\t\t\t\t\tif (data === \"[DONE]\") {\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tconst event = JSON.parse(data);\n\n\t\t\t\t\t\t\t// Extract token content from delta\n\t\t\t\t\t\t\tconst delta = event.choices?.[0]?.delta;\n\t\t\t\t\t\t\tif (delta?.content) {\n\t\t\t\t\t\t\t\tfullText += delta.content;\n\t\t\t\t\t\t\t\tcallbacks.onToken?.(delta.content);\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Extract usage from the final chunk (stream_options: include_usage)\n\t\t\t\t\t\t\tif (event.usage) {\n\t\t\t\t\t\t\t\tpromptTokens = event.usage.prompt_tokens ?? 0;\n\t\t\t\t\t\t\t\tcompletionTokens = event.usage.completion_tokens ?? 0;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} catch (parseErr) {\n\t\t\t\t\t\t\tif (parseErr instanceof SyntaxError) {\n\t\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t\ttypeof process !== \"undefined\" &&\n\t\t\t\t\t\t\t\t\tprocess.env?.NODE_ENV === \"development\"\n\t\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t\t\t\t\t\"[Directive] Malformed SSE event from OpenAI:\",\n\t\t\t\t\t\t\t\t\t\tdata,\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tthrow parseErr;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} finally {\n\t\t\t\treader.cancel().catch(() => {});\n\t\t\t}\n\n\t\t\tconst assistantMsg: Message = { role: \"assistant\", content: fullText };\n\t\t\tcallbacks.onMessage?.(assistantMsg);\n\n\t\t\tconst tokenUsage: TokenUsage = {\n\t\t\t\tinputTokens: promptTokens,\n\t\t\t\toutputTokens: completionTokens,\n\t\t\t};\n\t\t\tconst totalTokens = promptTokens + completionTokens;\n\n\t\t\thooks?.onAfterCall?.({\n\t\t\t\tagent,\n\t\t\t\tinput,\n\t\t\t\toutput: fullText,\n\t\t\t\ttotalTokens,\n\t\t\t\ttokenUsage,\n\t\t\t\tdurationMs: Date.now() - startTime,\n\t\t\t\ttimestamp: Date.now(),\n\t\t\t});\n\n\t\t\treturn {\n\t\t\t\toutput: fullText,\n\t\t\t\tmessages: [{ role: \"user\" as const, content: input }, assistantMsg],\n\t\t\t\ttoolCalls: [],\n\t\t\t\ttotalTokens,\n\t\t\t\ttokenUsage,\n\t\t\t};\n\t\t} catch (err) {\n\t\t\tif (err instanceof Error) {\n\t\t\t\thooks?.onError?.({\n\t\t\t\t\tagent,\n\t\t\t\t\tinput,\n\t\t\t\t\terror: err,\n\t\t\t\t\tdurationMs: Date.now() - startTime,\n\t\t\t\t\ttimestamp: Date.now(),\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tthrow err;\n\t\t}\n\t};\n}\n"]}
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import { f as AdapterHooks, c as AgentRunner } from './types-BKCdgKC-.cjs';
|
|
2
|
+
import { EmbedderFn, StreamingCallbackRunner } from './index.cjs';
|
|
3
|
+
import '@directive-run/core';
|
|
4
|
+
import '@directive-run/core/plugins';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* @directive-run/ai/openai
|
|
8
|
+
*
|
|
9
|
+
* OpenAI adapter for Directive AI. Provides runners and embedders
|
|
10
|
+
* for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { createOpenAIRunner, createOpenAIEmbedder } from '@directive-run/ai/openai';
|
|
15
|
+
*
|
|
16
|
+
* const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });
|
|
17
|
+
* const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });
|
|
18
|
+
* ```
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* OpenAI model pricing (USD per million tokens).
|
|
23
|
+
*
|
|
24
|
+
* Use with `estimateCost()` for per-call cost tracking:
|
|
25
|
+
* ```typescript
|
|
26
|
+
* import { estimateCost } from '@directive-run/ai';
|
|
27
|
+
* import { OPENAI_PRICING } from '@directive-run/ai/openai';
|
|
28
|
+
*
|
|
29
|
+
* const cost =
|
|
30
|
+
* estimateCost(result.tokenUsage!.inputTokens, OPENAI_PRICING["gpt-4o"].input) +
|
|
31
|
+
* estimateCost(result.tokenUsage!.outputTokens, OPENAI_PRICING["gpt-4o"].output);
|
|
32
|
+
* ```
|
|
33
|
+
*
|
|
34
|
+
* **Note:** Pricing changes over time. These values are provided as a convenience
|
|
35
|
+
* and may not reflect the latest rates. Always verify at https://openai.com/pricing
|
|
36
|
+
*/
|
|
37
|
+
declare const OPENAI_PRICING: Record<string, {
|
|
38
|
+
input: number;
|
|
39
|
+
output: number;
|
|
40
|
+
}>;
|
|
41
|
+
/** Options for createOpenAIRunner */
|
|
42
|
+
interface OpenAIRunnerOptions {
|
|
43
|
+
apiKey: string;
|
|
44
|
+
model?: string;
|
|
45
|
+
maxTokens?: number;
|
|
46
|
+
baseURL?: string;
|
|
47
|
+
fetch?: typeof globalThis.fetch;
|
|
48
|
+
/** @default undefined */
|
|
49
|
+
timeoutMs?: number;
|
|
50
|
+
/** Lifecycle hooks for tracing, logging, and metrics */
|
|
51
|
+
hooks?: AdapterHooks;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Create an AgentRunner for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)
|
|
55
|
+
*
|
|
56
|
+
* Returns `tokenUsage` with input/output breakdown for cost tracking.
|
|
57
|
+
*
|
|
58
|
+
* @example
|
|
59
|
+
* ```typescript
|
|
60
|
+
* // OpenAI
|
|
61
|
+
* const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });
|
|
62
|
+
*
|
|
63
|
+
* // Azure OpenAI
|
|
64
|
+
* const azure = createOpenAIRunner({
|
|
65
|
+
* apiKey: process.env.AZURE_KEY!,
|
|
66
|
+
* baseURL: "https://your-resource.openai.azure.com/v1",
|
|
67
|
+
* });
|
|
68
|
+
*
|
|
69
|
+
* // Together.ai (OpenAI-compatible)
|
|
70
|
+
* const together = createOpenAIRunner({
|
|
71
|
+
* apiKey: process.env.TOGETHER_KEY!,
|
|
72
|
+
* baseURL: "https://api.together.xyz/v1",
|
|
73
|
+
* });
|
|
74
|
+
* ```
|
|
75
|
+
*/
|
|
76
|
+
declare function createOpenAIRunner(options: OpenAIRunnerOptions): AgentRunner;
|
|
77
|
+
/** Options for createOpenAIEmbedder */
|
|
78
|
+
interface OpenAIEmbedderOptions {
|
|
79
|
+
apiKey: string;
|
|
80
|
+
model?: string;
|
|
81
|
+
dimensions?: number;
|
|
82
|
+
baseURL?: string;
|
|
83
|
+
fetch?: typeof globalThis.fetch;
|
|
84
|
+
/** @default 30000 */
|
|
85
|
+
timeoutMs?: number;
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Create an EmbedderFn that calls the OpenAI embeddings API.
|
|
89
|
+
*
|
|
90
|
+
* @example
|
|
91
|
+
* ```typescript
|
|
92
|
+
* const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });
|
|
93
|
+
* const embedding = await embedder('How do constraints work?');
|
|
94
|
+
* ```
|
|
95
|
+
*/
|
|
96
|
+
declare function createOpenAIEmbedder(options: OpenAIEmbedderOptions): EmbedderFn;
|
|
97
|
+
/** Options for createOpenAIStreamingRunner */
|
|
98
|
+
interface OpenAIStreamingRunnerOptions {
|
|
99
|
+
apiKey: string;
|
|
100
|
+
model?: string;
|
|
101
|
+
maxTokens?: number;
|
|
102
|
+
baseURL?: string;
|
|
103
|
+
fetch?: typeof globalThis.fetch;
|
|
104
|
+
/** Lifecycle hooks for tracing, logging, and metrics */
|
|
105
|
+
hooks?: AdapterHooks;
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Create a StreamingCallbackRunner for OpenAI-compatible chat completions
|
|
109
|
+
* with server-sent events. Can be used standalone or paired with `createOpenAIRunner`.
|
|
110
|
+
*
|
|
111
|
+
* Returns `tokenUsage` with input/output breakdown for cost tracking.
|
|
112
|
+
*
|
|
113
|
+
* @example
|
|
114
|
+
* ```typescript
|
|
115
|
+
* const streamingRunner = createOpenAIStreamingRunner({
|
|
116
|
+
* apiKey: process.env.OPENAI_API_KEY!,
|
|
117
|
+
* });
|
|
118
|
+
* const stack = createAgentStack({
|
|
119
|
+
* runner: createOpenAIRunner({ apiKey }),
|
|
120
|
+
* streaming: { runner: streamingRunner },
|
|
121
|
+
* agents: { ... },
|
|
122
|
+
* });
|
|
123
|
+
* ```
|
|
124
|
+
*/
|
|
125
|
+
declare function createOpenAIStreamingRunner(options: OpenAIStreamingRunnerOptions): StreamingCallbackRunner;
|
|
126
|
+
|
|
127
|
+
export { OPENAI_PRICING, type OpenAIEmbedderOptions, type OpenAIRunnerOptions, type OpenAIStreamingRunnerOptions, createOpenAIEmbedder, createOpenAIRunner, createOpenAIStreamingRunner };
|
package/dist/openai.d.ts
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import { f as AdapterHooks, c as AgentRunner } from './types-BKCdgKC-.js';
|
|
2
|
+
import { EmbedderFn, StreamingCallbackRunner } from './index.js';
|
|
3
|
+
import '@directive-run/core';
|
|
4
|
+
import '@directive-run/core/plugins';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* @directive-run/ai/openai
|
|
8
|
+
*
|
|
9
|
+
* OpenAI adapter for Directive AI. Provides runners and embedders
|
|
10
|
+
* for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { createOpenAIRunner, createOpenAIEmbedder } from '@directive-run/ai/openai';
|
|
15
|
+
*
|
|
16
|
+
* const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });
|
|
17
|
+
* const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });
|
|
18
|
+
* ```
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* OpenAI model pricing (USD per million tokens).
|
|
23
|
+
*
|
|
24
|
+
* Use with `estimateCost()` for per-call cost tracking:
|
|
25
|
+
* ```typescript
|
|
26
|
+
* import { estimateCost } from '@directive-run/ai';
|
|
27
|
+
* import { OPENAI_PRICING } from '@directive-run/ai/openai';
|
|
28
|
+
*
|
|
29
|
+
* const cost =
|
|
30
|
+
* estimateCost(result.tokenUsage!.inputTokens, OPENAI_PRICING["gpt-4o"].input) +
|
|
31
|
+
* estimateCost(result.tokenUsage!.outputTokens, OPENAI_PRICING["gpt-4o"].output);
|
|
32
|
+
* ```
|
|
33
|
+
*
|
|
34
|
+
* **Note:** Pricing changes over time. These values are provided as a convenience
|
|
35
|
+
* and may not reflect the latest rates. Always verify at https://openai.com/pricing
|
|
36
|
+
*/
|
|
37
|
+
declare const OPENAI_PRICING: Record<string, {
|
|
38
|
+
input: number;
|
|
39
|
+
output: number;
|
|
40
|
+
}>;
|
|
41
|
+
/** Options for createOpenAIRunner */
|
|
42
|
+
interface OpenAIRunnerOptions {
|
|
43
|
+
apiKey: string;
|
|
44
|
+
model?: string;
|
|
45
|
+
maxTokens?: number;
|
|
46
|
+
baseURL?: string;
|
|
47
|
+
fetch?: typeof globalThis.fetch;
|
|
48
|
+
/** @default undefined */
|
|
49
|
+
timeoutMs?: number;
|
|
50
|
+
/** Lifecycle hooks for tracing, logging, and metrics */
|
|
51
|
+
hooks?: AdapterHooks;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Create an AgentRunner for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)
|
|
55
|
+
*
|
|
56
|
+
* Returns `tokenUsage` with input/output breakdown for cost tracking.
|
|
57
|
+
*
|
|
58
|
+
* @example
|
|
59
|
+
* ```typescript
|
|
60
|
+
* // OpenAI
|
|
61
|
+
* const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });
|
|
62
|
+
*
|
|
63
|
+
* // Azure OpenAI
|
|
64
|
+
* const azure = createOpenAIRunner({
|
|
65
|
+
* apiKey: process.env.AZURE_KEY!,
|
|
66
|
+
* baseURL: "https://your-resource.openai.azure.com/v1",
|
|
67
|
+
* });
|
|
68
|
+
*
|
|
69
|
+
* // Together.ai (OpenAI-compatible)
|
|
70
|
+
* const together = createOpenAIRunner({
|
|
71
|
+
* apiKey: process.env.TOGETHER_KEY!,
|
|
72
|
+
* baseURL: "https://api.together.xyz/v1",
|
|
73
|
+
* });
|
|
74
|
+
* ```
|
|
75
|
+
*/
|
|
76
|
+
declare function createOpenAIRunner(options: OpenAIRunnerOptions): AgentRunner;
|
|
77
|
+
/** Options for createOpenAIEmbedder */
|
|
78
|
+
interface OpenAIEmbedderOptions {
|
|
79
|
+
apiKey: string;
|
|
80
|
+
model?: string;
|
|
81
|
+
dimensions?: number;
|
|
82
|
+
baseURL?: string;
|
|
83
|
+
fetch?: typeof globalThis.fetch;
|
|
84
|
+
/** @default 30000 */
|
|
85
|
+
timeoutMs?: number;
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Create an EmbedderFn that calls the OpenAI embeddings API.
|
|
89
|
+
*
|
|
90
|
+
* @example
|
|
91
|
+
* ```typescript
|
|
92
|
+
* const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });
|
|
93
|
+
* const embedding = await embedder('How do constraints work?');
|
|
94
|
+
* ```
|
|
95
|
+
*/
|
|
96
|
+
declare function createOpenAIEmbedder(options: OpenAIEmbedderOptions): EmbedderFn;
|
|
97
|
+
/** Options for createOpenAIStreamingRunner */
|
|
98
|
+
interface OpenAIStreamingRunnerOptions {
|
|
99
|
+
apiKey: string;
|
|
100
|
+
model?: string;
|
|
101
|
+
maxTokens?: number;
|
|
102
|
+
baseURL?: string;
|
|
103
|
+
fetch?: typeof globalThis.fetch;
|
|
104
|
+
/** Lifecycle hooks for tracing, logging, and metrics */
|
|
105
|
+
hooks?: AdapterHooks;
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Create a StreamingCallbackRunner for OpenAI-compatible chat completions
|
|
109
|
+
* with server-sent events. Can be used standalone or paired with `createOpenAIRunner`.
|
|
110
|
+
*
|
|
111
|
+
* Returns `tokenUsage` with input/output breakdown for cost tracking.
|
|
112
|
+
*
|
|
113
|
+
* @example
|
|
114
|
+
* ```typescript
|
|
115
|
+
* const streamingRunner = createOpenAIStreamingRunner({
|
|
116
|
+
* apiKey: process.env.OPENAI_API_KEY!,
|
|
117
|
+
* });
|
|
118
|
+
* const stack = createAgentStack({
|
|
119
|
+
* runner: createOpenAIRunner({ apiKey }),
|
|
120
|
+
* streaming: { runner: streamingRunner },
|
|
121
|
+
* agents: { ... },
|
|
122
|
+
* });
|
|
123
|
+
* ```
|
|
124
|
+
*/
|
|
125
|
+
declare function createOpenAIStreamingRunner(options: OpenAIStreamingRunnerOptions): StreamingCallbackRunner;
|
|
126
|
+
|
|
127
|
+
export { OPENAI_PRICING, type OpenAIEmbedderOptions, type OpenAIRunnerOptions, type OpenAIStreamingRunnerOptions, createOpenAIEmbedder, createOpenAIRunner, createOpenAIStreamingRunner };
|
package/dist/openai.js
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
var S=new Set(["http:","https:"]);function O(i){try{let n=new URL(i);if(!S.has(n.protocol))throw new Error(`[Directive] Invalid baseURL protocol "${n.protocol}" \u2013 only http: and https: are allowed`)}catch(n){throw n instanceof Error&&n.message.startsWith("[Directive]")?n:new Error(`[Directive] Invalid baseURL "${i}" \u2013 must be a valid URL (e.g. "https://api.openai.com/v1")`)}}function I(i){let{fetch:n=globalThis.fetch,buildRequest:f,parseResponse:u,parseOutput:c,hooks:l}=i,r=c??(e=>{try{return JSON.parse(e)}catch{return e}});return async(e,o,s)=>{let t=Date.now();l?.onBeforeCall?.({agent:e,input:o,timestamp:t});let p=[{role:"user",content:o}];try{let{url:h,init:g}=f(e,o,p),k=s?.signal?{...g,signal:s.signal}:g,d=await n(h,k);if(!d.ok){let A=await d.text().catch(()=>"");throw new Error(`[Directive] AgentRunner request failed: ${d.status} ${d.statusText}${A?` \u2013 ${A.slice(0,300)}`:""}`)}let a=await u(d,p),T={inputTokens:a.inputTokens??0,outputTokens:a.outputTokens??0},w={role:"assistant",content:a.text},R=[...p,w];s?.onMessage?.(w);let y=Date.now()-t;return l?.onAfterCall?.({agent:e,input:o,output:a.text,totalTokens:a.totalTokens,tokenUsage:T,durationMs:y,timestamp:Date.now()}),{output:r(a.text),messages:R,toolCalls:[],totalTokens:a.totalTokens,tokenUsage:T}}catch(h){let g=Date.now()-t;throw h instanceof Error&&l?.onError?.({agent:e,input:o,error:h,durationMs:g,timestamp:Date.now()}),h}}}var $={"gpt-4o":{input:2.5,output:10},"gpt-4o-mini":{input:.15,output:.6},"gpt-4-turbo":{input:10,output:30},"o3-mini":{input:1.1,output:4.4}};function L(i){let{apiKey:n,model:f="gpt-4o",maxTokens:u,baseURL:c="https://api.openai.com/v1",fetch:l=globalThis.fetch,timeoutMs:m,hooks:r}=i;return O(c),typeof process<"u"&&process.env?.NODE_ENV!=="production"&&!n&&console.warn("[Directive] createOpenAIRunner: apiKey is empty. API calls will fail."),I({fetch:l,hooks:r,buildRequest:(e,o,s)=>({url:`${c}/chat/completions`,init:{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${n}`},body:JSON.stringify({model:e.model??f,...u!=null?{max_tokens:u}:{},messages:[...e.instructions?[{role:"system",content:e.instructions}]:[],...s.map(t=>({role:t.role,content:t.content}))]}),...m!=null?{signal:AbortSignal.timeout(m)}:{}}}),parseResponse:async e=>{let o=await e.json(),s=o.choices?.[0]?.message?.content??"",t=o.usage?.prompt_tokens??0,p=o.usage?.completion_tokens??0;return {text:s,totalTokens:t+p,inputTokens:t,outputTokens:p}}})}function N(i){let{apiKey:n,model:f="text-embedding-3-small",dimensions:u=1536,baseURL:c="https://api.openai.com/v1",fetch:l=globalThis.fetch,timeoutMs:m}=i;return O(c),typeof process<"u"&&process.env?.NODE_ENV!=="production"&&!n&&console.warn("[Directive] createOpenAIEmbedder: apiKey is empty. API calls will fail."),async r=>{let e=await l(`${c}/embeddings`,{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${n}`},body:JSON.stringify({model:f,input:r,dimensions:u}),signal:AbortSignal.timeout(m??3e4)});if(!e.ok){let t=await e.text().catch(()=>"");throw new Error(`[Directive] OpenAI embedding failed: ${e.status}${t?` \u2013 ${t.slice(0,200)}`:""}`)}let s=(await e.json()).data[0];if(!s)throw new Error("[Directive] OpenAI embedding response contained no data entries");return s.embedding}}function P(i){let{apiKey:n,model:f="gpt-4o",maxTokens:u,baseURL:c="https://api.openai.com/v1",fetch:l=globalThis.fetch,hooks:m}=i;return O(c),typeof process<"u"&&process.env?.NODE_ENV!=="production"&&!n&&console.warn("[Directive] createOpenAIStreamingRunner: apiKey is empty. API calls will fail."),async(r,e,o)=>{let s=Date.now();m?.onBeforeCall?.({agent:r,input:e,timestamp:s});try{let t=await l(`${c}/chat/completions`,{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${n}`},body:JSON.stringify({model:r.model??f,...u!=null?{max_tokens:u}:{},messages:[...r.instructions?[{role:"system",content:r.instructions}]:[],{role:"user",content:e}],stream:!0,stream_options:{include_usage:!0}}),signal:o.signal});if(!t.ok){let y=await t.text().catch(()=>"");throw new Error(`[Directive] OpenAI streaming error ${t.status}${y?` \u2013 ${y.slice(0,200)}`:""}`)}let p=t.body?.getReader();if(!p)throw new Error("[Directive] No response body");let h=new TextDecoder,g="",k="",d=0,a=0;try{for(;;){let{done:y,value:A}=await p.read();if(y)break;g+=h.decode(A,{stream:!0});let x=g.split(`
|
|
2
|
+
`);g=x.pop()??"";for(let D of x){if(!D.startsWith("data: "))continue;let E=D.slice(6).trim();if(E!=="[DONE]")try{let b=JSON.parse(E),v=b.choices?.[0]?.delta;v?.content&&(k+=v.content,o.onToken?.(v.content)),b.usage&&(d=b.usage.prompt_tokens??0,a=b.usage.completion_tokens??0);}catch(b){if(b instanceof SyntaxError)typeof process<"u"&&process.env?.NODE_ENV==="development"&&console.warn("[Directive] Malformed SSE event from OpenAI:",E);else throw b}}}}finally{p.cancel().catch(()=>{});}let T={role:"assistant",content:k};o.onMessage?.(T);let w={inputTokens:d,outputTokens:a},R=d+a;return m?.onAfterCall?.({agent:r,input:e,output:k,totalTokens:R,tokenUsage:w,durationMs:Date.now()-s,timestamp:Date.now()}),{output:k,messages:[{role:"user",content:e},T],toolCalls:[],totalTokens:R,tokenUsage:w}}catch(t){throw t instanceof Error&&m?.onError?.({agent:r,input:e,error:t,durationMs:Date.now()-s,timestamp:Date.now()}),t}}}export{$ as OPENAI_PRICING,N as createOpenAIEmbedder,L as createOpenAIRunner,P as createOpenAIStreamingRunner};//# sourceMappingURL=openai.js.map
|
|
3
|
+
//# sourceMappingURL=openai.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/helpers.ts","../src/adapters/openai.ts"],"names":["ALLOWED_PROTOCOLS","validateBaseURL","baseURL","url","err","createRunner","options","fetchFn","buildRequest","parseResponse","parseOutput","hooks","parse","text","agent","input","runOptions","startTime","messages","init","fetchInit","response","errBody","parsed","tokenUsage","assistantMessage","allMessages","durationMs","OPENAI_PRICING","createOpenAIRunner","apiKey","model","maxTokens","timeoutMs","_input","m","res","data","inputTokens","outputTokens","createOpenAIEmbedder","dimensions","entry","createOpenAIStreamingRunner","callbacks","reader","decoder","buf","fullText","promptTokens","completionTokens","done","value","lines","line","event","delta","parseErr","assistantMsg","totalTokens"],"mappings":"AAoDA,IAAMA,CAAAA,CAAoB,IAAI,GAAA,CAAI,CAAC,QAAS,QAAQ,CAAC,CAAA,CAM9C,SAASC,CAAAA,CAAgBC,CAAAA,CAAuB,CACrD,GAAI,CACF,IAAMC,CAAAA,CAAM,IAAI,GAAA,CAAID,CAAO,CAAA,CAC3B,GAAI,CAACF,CAAAA,CAAkB,GAAA,CAAIG,CAAAA,CAAI,QAAQ,CAAA,CACrC,MAAM,IAAI,KAAA,CACR,CAAA,sCAAA,EAAyCA,CAAAA,CAAI,QAAQ,CAAA,0CAAA,CACvD,CAEJ,CAAA,MAASC,CAAAA,CAAK,CACZ,MAAIA,aAAe,KAAA,EAASA,CAAAA,CAAI,OAAA,CAAQ,UAAA,CAAW,aAAa,CAAA,CACxDA,EAGF,IAAI,KAAA,CACR,CAAA,6BAAA,EAAgCF,CAAO,CAAA,+DAAA,CACzC,CACF,CACF,CA4EO,SAASG,CAAAA,CAAaC,CAAAA,CAA2C,CACtE,GAAM,CACJ,KAAA,CAAOC,CAAAA,CAAU,UAAA,CAAW,KAAA,CAC5B,YAAA,CAAAC,CAAAA,CACA,cAAAC,CAAAA,CACA,WAAA,CAAAC,CAAAA,CACA,KAAA,CAAAC,CACF,CAAA,CAAIL,EAUEM,CAAAA,CAAQF,CAAAA,GARiBG,CAAAA,EAAoB,CACjD,GAAI,CACF,OAAO,IAAA,CAAK,KAAA,CAAMA,CAAI,CACxB,CAAA,KAAQ,CACN,OAAOA,CACT,CACF,CAAA,CAAA,CAIA,OAAO,MACLC,CAAAA,CACAC,EACAC,CAAAA,GAC0B,CAC1B,IAAMC,CAAAA,CAAY,IAAA,CAAK,GAAA,GACvBN,CAAAA,EAAO,YAAA,GAAe,CAAE,KAAA,CAAAG,CAAAA,CAAO,KAAA,CAAAC,EAAO,SAAA,CAAWE,CAAU,CAAC,CAAA,CAE5D,IAAMC,EAAsB,CAAC,CAAE,IAAA,CAAM,MAAA,CAAQ,OAAA,CAASH,CAAM,CAAC,CAAA,CAE7D,GAAI,CACF,GAAM,CAAE,GAAA,CAAAZ,EAAK,IAAA,CAAAgB,CAAK,CAAA,CAAIX,CAAAA,CAAaM,CAAAA,CAAOC,CAAAA,CAAOG,CAAQ,CAAA,CAEnDE,CAAAA,CAAyBJ,CAAAA,EAAY,MAAA,CACvC,CAAE,GAAGG,EAAM,MAAA,CAAQH,CAAAA,CAAW,MAAO,CAAA,CACrCG,CAAAA,CAEEE,CAAAA,CAAW,MAAMd,CAAAA,CAAQJ,CAAAA,CAAKiB,CAAS,CAAA,CAE7C,GAAI,CAACC,EAAS,EAAA,CAAI,CAChB,IAAMC,CAAAA,CAAU,MAAMD,CAAAA,CAAS,MAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACR,CAAA,wCAAA,EAA2CA,CAAAA,CAAS,MAAM,CAAA,CAAA,EAAIA,CAAAA,CAAS,UAAU,CAAA,EAAGC,CAAAA,CAAU,CAAA,QAAA,EAAMA,CAAAA,CAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,CAAA,CAAA,CAAK,EAAE,CAAA,CAClI,CACF,CAEA,IAAMC,CAAAA,CAAS,MAAMd,CAAAA,CAAcY,CAAAA,CAAUH,CAAQ,CAAA,CAC/CM,EAAyB,CAC7B,WAAA,CAAaD,CAAAA,CAAO,WAAA,EAAe,CAAA,CACnC,YAAA,CAAcA,EAAO,YAAA,EAAgB,CACvC,CAAA,CAEME,CAAAA,CAA4B,CAAE,IAAA,CAAM,YAAa,OAAA,CAASF,CAAAA,CAAO,IAAK,CAAA,CACtEG,CAAAA,CAAyB,CAAC,GAAGR,CAAAA,CAAUO,CAAgB,EAE7DT,CAAAA,EAAY,SAAA,GAAYS,CAAgB,CAAA,CAExC,IAAME,CAAAA,CAAa,IAAA,CAAK,GAAA,EAAI,CAAIV,EAChC,OAAAN,CAAAA,EAAO,WAAA,GAAc,CACnB,KAAA,CAAAG,CAAAA,CACA,MAAAC,CAAAA,CACA,MAAA,CAAQQ,CAAAA,CAAO,IAAA,CACf,WAAA,CAAaA,CAAAA,CAAO,YACpB,UAAA,CAAAC,CAAAA,CACA,UAAA,CAAAG,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,KAClB,CAAC,CAAA,CAEM,CACL,MAAA,CAAQf,CAAAA,CAASW,EAAO,IAAI,CAAA,CAC5B,QAAA,CAAUG,CAAAA,CACV,SAAA,CAAW,GACX,WAAA,CAAaH,CAAAA,CAAO,WAAA,CACpB,UAAA,CAAAC,CACF,CACF,OAASpB,CAAAA,CAAK,CACZ,IAAMuB,CAAAA,CAAa,IAAA,CAAK,GAAA,GAAQV,CAAAA,CAChC,MAAIb,CAAAA,YAAe,KAAA,EACjBO,CAAAA,EAAO,OAAA,GAAU,CACf,KAAA,CAAAG,CAAAA,CACA,KAAA,CAAAC,CAAAA,CACA,KAAA,CAAOX,CAAAA,CACP,WAAAuB,CAAAA,CACA,SAAA,CAAW,IAAA,CAAK,GAAA,EAClB,CAAC,EAGGvB,CACR,CACF,CACF,CCzMO,IAAMwB,CAAAA,CAAoE,CAChF,QAAA,CAAU,CAAE,KAAA,CAAO,GAAA,CAAK,MAAA,CAAQ,EAAG,EACnC,aAAA,CAAe,CAAE,KAAA,CAAO,GAAA,CAAM,MAAA,CAAQ,EAAI,EAC1C,aAAA,CAAe,CAAE,KAAA,CAAO,EAAA,CAAI,MAAA,CAAQ,EAAG,EACvC,SAAA,CAAW,CAAE,KAAA,CAAO,GAAA,CAAK,MAAA,CAAQ,GAAI,CACtC,EA0CO,SAASC,CAAAA,CAAmBvB,CAAAA,CAA2C,CAC7E,GAAM,CACL,MAAA,CAAAwB,CAAAA,CACA,KAAA,CAAAC,CAAAA,CAAQ,QAAA,CACR,SAAA,CAAAC,EACA,OAAA,CAAA9B,CAAAA,CAAU,2BAAA,CACV,KAAA,CAAOK,CAAAA,CAAU,UAAA,CAAW,MAC5B,SAAA,CAAA0B,CAAAA,CACA,KAAA,CAAAtB,CACD,CAAA,CAAIL,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,CAAA,CAEnB,OAAO,OAAA,CAAY,GAAA,EAAe,QAAQ,GAAA,EAAK,QAAA,GAAa,YAAA,EAAgB,CAAC4B,CAAAA,EAChF,OAAA,CAAQ,KAAK,uEAAuE,CAAA,CAG9EzB,CAAAA,CAAa,CACnB,KAAA,CAAOE,CAAAA,CACP,MAAAI,CAAAA,CACA,YAAA,CAAc,CAACG,CAAAA,CAAOoB,CAAAA,CAAQhB,CAAAA,IAAc,CAC3C,GAAA,CAAK,CAAA,EAAGhB,CAAO,CAAA,iBAAA,CAAA,CACf,IAAA,CAAM,CACL,OAAQ,MAAA,CACR,OAAA,CAAS,CACR,cAAA,CAAgB,kBAAA,CAChB,aAAA,CAAe,UAAU4B,CAAM,CAAA,CAChC,CAAA,CACA,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CACpB,KAAA,CAAOhB,CAAAA,CAAM,KAAA,EAASiB,CAAAA,CACtB,GAAIC,CAAAA,EAAa,KAAO,CAAE,UAAA,CAAYA,CAAU,CAAA,CAAI,EAAC,CACrD,SAAU,CACT,GAAIlB,CAAAA,CAAM,YAAA,CACP,CAAC,CAAE,KAAM,QAAA,CAAU,OAAA,CAASA,CAAAA,CAAM,YAAa,CAAC,CAAA,CAChD,EAAC,CACJ,GAAGI,EAAS,GAAA,CAAKiB,CAAAA,GAAO,CAAE,IAAA,CAAMA,CAAAA,CAAE,IAAA,CAAM,OAAA,CAASA,CAAAA,CAAE,OAAQ,EAAE,CAC9D,CACD,CAAC,CAAA,CACD,GAAIF,CAAAA,EAAa,KAAO,CAAE,MAAA,CAAQ,WAAA,CAAY,OAAA,CAAQA,CAAS,CAAE,EAAI,EACtE,CACD,CAAA,CAAA,CACA,aAAA,CAAe,MAAOG,GAAQ,CAC7B,IAAMC,CAAAA,CAAO,MAAMD,CAAAA,CAAI,IAAA,GACjBvB,CAAAA,CAAOwB,CAAAA,CAAK,OAAA,GAAU,CAAC,CAAA,EAAG,OAAA,EAAS,SAAW,EAAA,CAC9CC,CAAAA,CAAcD,CAAAA,CAAK,KAAA,EAAO,aAAA,EAAiB,CAAA,CAC3CE,EAAeF,CAAAA,CAAK,KAAA,EAAO,iBAAA,EAAqB,CAAA,CAEtD,OAAO,CACN,KAAAxB,CAAAA,CACA,WAAA,CAAayB,CAAAA,CAAcC,CAAAA,CAC3B,WAAA,CAAAD,CAAAA,CACA,aAAAC,CACD,CACD,CACD,CAAC,CACF,CA0BO,SAASC,CAAAA,CACflC,CAAAA,CACa,CACb,GAAM,CACL,MAAA,CAAAwB,EACA,KAAA,CAAAC,CAAAA,CAAQ,wBAAA,CACR,UAAA,CAAAU,CAAAA,CAAa,IAAA,CACb,QAAAvC,CAAAA,CAAU,2BAAA,CACV,KAAA,CAAOK,CAAAA,CAAU,UAAA,CAAW,KAAA,CAC5B,UAAA0B,CACD,CAAA,CAAI3B,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,EAEnB,OAAO,OAAA,CAAY,GAAA,EAAe,OAAA,CAAQ,GAAA,EAAK,QAAA,GAAa,cAAgB,CAAC4B,CAAAA,EAChF,QAAQ,IAAA,CAAK,yEAAyE,EAGhF,MAAOjB,CAAAA,EAAqC,CAClD,IAAMQ,CAAAA,CAAW,MAAMd,EAAQ,CAAA,EAAGL,CAAO,CAAA,WAAA,CAAA,CAAe,CACvD,MAAA,CAAQ,MAAA,CACR,QAAS,CACR,cAAA,CAAgB,kBAAA,CAChB,aAAA,CAAe,CAAA,OAAA,EAAU4B,CAAM,EAChC,CAAA,CACA,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CAAE,KAAA,CAAAC,EAAO,KAAA,CAAOlB,CAAAA,CAAM,UAAA,CAAA4B,CAAW,CAAC,CAAA,CACvD,OAAQ,WAAA,CAAY,OAAA,CAAQR,CAAAA,EAAa,GAAM,CAChD,CAAC,EAED,GAAI,CAACZ,CAAAA,CAAS,EAAA,CAAI,CACjB,IAAMC,EAAU,MAAMD,CAAAA,CAAS,IAAA,EAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACT,CAAA,qCAAA,EAAwCA,CAAAA,CAAS,MAAM,CAAA,EAAGC,CAAAA,CAAU,CAAA,QAAA,EAAMA,CAAAA,CAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,CAAA,CAAA,CAAK,EAAE,CAAA,CACvG,CACD,CAMA,IAAMoB,CAAAA,CAAAA,CAJQ,MAAMrB,CAAAA,CAAS,IAAA,EAAK,EAIf,IAAA,CAAK,CAAC,CAAA,CACzB,GAAI,CAACqB,CAAAA,CACJ,MAAM,IAAI,MACT,iEACD,CAAA,CAGD,OAAOA,CAAAA,CAAM,SACd,CACD,CAmCO,SAASC,CAAAA,CACfrC,CAAAA,CAC0B,CAC1B,GAAM,CACL,OAAAwB,CAAAA,CACA,KAAA,CAAAC,EAAQ,QAAA,CACR,SAAA,CAAAC,EACA,OAAA,CAAA9B,CAAAA,CAAU,2BAAA,CACV,KAAA,CAAOK,CAAAA,CAAU,UAAA,CAAW,MAC5B,KAAA,CAAAI,CACD,CAAA,CAAIL,CAAAA,CAEJ,OAAAL,CAAAA,CAAgBC,CAAO,CAAA,CAEnB,OAAO,OAAA,CAAY,GAAA,EAAe,OAAA,CAAQ,GAAA,EAAK,WAAa,YAAA,EAAgB,CAAC4B,CAAAA,EAChF,OAAA,CAAQ,IAAA,CAAK,gFAAgF,EAGvF,MAAOhB,CAAAA,CAAOC,CAAAA,CAAO6B,CAAAA,GAAc,CACzC,IAAM3B,EAAY,IAAA,CAAK,GAAA,EAAI,CAC3BN,CAAAA,EAAO,YAAA,GAAe,CAAE,MAAAG,CAAAA,CAAO,KAAA,CAAAC,CAAAA,CAAO,SAAA,CAAWE,CAAU,CAAC,EAE5D,GAAI,CACH,IAAMI,CAAAA,CAAW,MAAMd,CAAAA,CAAQ,GAAGL,CAAO,CAAA,iBAAA,CAAA,CAAqB,CAC7D,MAAA,CAAQ,MAAA,CACR,OAAA,CAAS,CACR,cAAA,CAAgB,kBAAA,CAChB,aAAA,CAAe,CAAA,OAAA,EAAU4B,CAAM,CAAA,CAChC,EACA,IAAA,CAAM,IAAA,CAAK,SAAA,CAAU,CACpB,KAAA,CAAOhB,CAAAA,CAAM,OAASiB,CAAAA,CACtB,GAAIC,CAAAA,EAAa,IAAA,CAAO,CAAE,UAAA,CAAYA,CAAU,CAAA,CAAI,EAAC,CACrD,QAAA,CAAU,CACT,GAAIlB,EAAM,YAAA,CACP,CAAC,CAAE,IAAA,CAAM,QAAA,CAAU,OAAA,CAASA,EAAM,YAAa,CAAC,CAAA,CAChD,EAAC,CACJ,CAAE,KAAM,MAAA,CAAQ,OAAA,CAASC,CAAM,CAChC,CAAA,CACA,MAAA,CAAQ,GACR,cAAA,CAAgB,CAAE,aAAA,CAAe,CAAA,CAAK,CACvC,CAAC,EACD,MAAA,CAAQ6B,CAAAA,CAAU,MACnB,CAAC,CAAA,CAED,GAAI,CAACvB,CAAAA,CAAS,EAAA,CAAI,CACjB,IAAMC,CAAAA,CAAU,MAAMD,EAAS,IAAA,EAAK,CAAE,KAAA,CAAM,IAAM,EAAE,CAAA,CAEpD,MAAM,IAAI,KAAA,CACT,CAAA,mCAAA,EAAsCA,CAAAA,CAAS,MAAM,CAAA,EAAGC,EAAU,CAAA,QAAA,EAAMA,CAAAA,CAAQ,KAAA,CAAM,CAAA,CAAG,GAAG,CAAC,GAAK,EAAE,CAAA,CACrG,CACD,CAEA,IAAMuB,CAAAA,CAASxB,EAAS,IAAA,EAAM,SAAA,EAAU,CACxC,GAAI,CAACwB,CAAAA,CACJ,MAAM,IAAI,KAAA,CAAM,8BAA8B,CAAA,CAG/C,IAAMC,CAAAA,CAAU,IAAI,WAAA,CAChBC,CAAAA,CAAM,EAAA,CACNC,CAAAA,CAAW,EAAA,CACXC,CAAAA,CAAe,EACfC,CAAAA,CAAmB,CAAA,CAEvB,GAAI,CACH,OAAa,CACZ,GAAM,CAAE,IAAA,CAAAC,CAAAA,CAAM,KAAA,CAAAC,CAAM,CAAA,CAAI,MAAMP,CAAAA,CAAO,IAAA,EAAK,CAC1C,GAAIM,CAAAA,CACH,MAGDJ,GAAOD,CAAAA,CAAQ,MAAA,CAAOM,CAAAA,CAAO,CAAE,MAAA,CAAQ,CAAA,CAAK,CAAC,CAAA,CAC7C,IAAMC,CAAAA,CAAQN,CAAAA,CAAI,KAAA,CAAM;AAAA,CAAI,CAAA,CAC5BA,EAAMM,CAAAA,CAAM,GAAA,IAAS,EAAA,CAErB,IAAA,IAAWC,KAAQD,CAAAA,CAAO,CACzB,GAAI,CAACC,CAAAA,CAAK,WAAW,QAAQ,CAAA,CAC5B,SAED,IAAMjB,CAAAA,CAAOiB,EAAK,KAAA,CAAM,CAAC,EAAE,IAAA,EAAK,CAChC,GAAIjB,CAAAA,GAAS,QAAA,CAIb,GAAI,CACH,IAAMkB,EAAQ,IAAA,CAAK,KAAA,CAAMlB,CAAI,CAAA,CAGvBmB,CAAAA,CAAQD,EAAM,OAAA,GAAU,CAAC,GAAG,KAAA,CAC9BC,CAAAA,EAAO,OAAA,GACVR,CAAAA,EAAYQ,CAAAA,CAAM,OAAA,CAClBZ,EAAU,OAAA,GAAUY,CAAAA,CAAM,OAAO,CAAA,CAAA,CAI9BD,CAAAA,CAAM,QACTN,CAAAA,CAAeM,CAAAA,CAAM,MAAM,aAAA,EAAiB,CAAA,CAC5CL,EAAmBK,CAAAA,CAAM,KAAA,CAAM,mBAAqB,CAAA,EAEtD,CAAA,MAASE,EAAU,CAClB,GAAIA,CAAAA,YAAoB,WAAA,CAEtB,OAAO,OAAA,CAAY,KACnB,OAAA,CAAQ,GAAA,EAAK,WAAa,aAAA,EAE1B,OAAA,CAAQ,KACP,8CAAA,CACApB,CACD,OAGD,MAAMoB,CAER,CACD,CACD,CACD,QAAE,CACDZ,CAAAA,CAAO,QAAO,CAAE,KAAA,CAAM,IAAM,CAAC,CAAC,EAC/B,CAEA,IAAMa,CAAAA,CAAwB,CAAE,IAAA,CAAM,WAAA,CAAa,QAASV,CAAS,CAAA,CACrEJ,EAAU,SAAA,GAAYc,CAAY,EAElC,IAAMlC,CAAAA,CAAyB,CAC9B,WAAA,CAAayB,CAAAA,CACb,aAAcC,CACf,CAAA,CACMS,CAAAA,CAAcV,CAAAA,CAAeC,CAAAA,CAEnC,OAAAvC,GAAO,WAAA,GAAc,CACpB,MAAAG,CAAAA,CACA,KAAA,CAAAC,EACA,MAAA,CAAQiC,CAAAA,CACR,YAAAW,CAAAA,CACA,UAAA,CAAAnC,EACA,UAAA,CAAY,IAAA,CAAK,KAAI,CAAIP,CAAAA,CACzB,UAAW,IAAA,CAAK,GAAA,EACjB,CAAC,CAAA,CAEM,CACN,OAAQ+B,CAAAA,CACR,QAAA,CAAU,CAAC,CAAE,IAAA,CAAM,OAAiB,OAAA,CAASjC,CAAM,EAAG2C,CAAY,CAAA,CAClE,UAAW,EAAC,CACZ,YAAAC,CAAAA,CACA,UAAA,CAAAnC,CACD,CACD,CAAA,MAASpB,CAAAA,CAAK,CACb,MAAIA,CAAAA,YAAe,OAClBO,CAAAA,EAAO,OAAA,GAAU,CAChB,KAAA,CAAAG,CAAAA,CACA,MAAAC,CAAAA,CACA,KAAA,CAAOX,EACP,UAAA,CAAY,IAAA,CAAK,KAAI,CAAIa,CAAAA,CACzB,UAAW,IAAA,CAAK,GAAA,EACjB,CAAC,CAAA,CAGIb,CACP,CACD,CACD","file":"openai.js","sourcesContent":["/**\n * Helper functions for AI adapter — createRunner, estimateCost, state queries, validation.\n */\n\nimport type {\n AdapterHooks,\n AgentLike,\n AgentRunner,\n RunResult,\n RunOptions,\n Message,\n TokenUsage,\n AgentState,\n ApprovalState,\n} from \"./types.js\";\n\n// ============================================================================\n// State Query Helpers\n// ============================================================================\n\n/** Check if agent is currently running. */\nexport function isAgentRunning(state: AgentState): boolean {\n return state.status === \"running\";\n}\n\n/** Check if there are pending approvals. */\nexport function hasPendingApprovals(state: ApprovalState): boolean {\n return state.pending.length > 0;\n}\n\n// ============================================================================\n// Cost Estimation\n// ============================================================================\n\n/**\n * Get total cost estimate based on token usage.\n *\n * @param tokenUsage - Total token count\n * @param ratePerMillionTokens - Cost per million tokens (required, no default to avoid stale pricing)\n * @returns Estimated cost in dollars\n */\nexport function estimateCost(\n tokenUsage: number,\n ratePerMillionTokens: number\n): number {\n return (tokenUsage / 1_000_000) * ratePerMillionTokens;\n}\n\n// ============================================================================\n// Validation Helpers\n// ============================================================================\n\nconst ALLOWED_PROTOCOLS = new Set([\"http:\", \"https:\"]);\n\n/**\n * Validate that a baseURL uses http or https.\n * Throws immediately at adapter creation time (not at call time) to catch config errors early.\n */\nexport function validateBaseURL(baseURL: string): void {\n try {\n const url = new URL(baseURL);\n if (!ALLOWED_PROTOCOLS.has(url.protocol)) {\n throw new Error(\n `[Directive] Invalid baseURL protocol \"${url.protocol}\" – only http: and https: are allowed`,\n );\n }\n } catch (err) {\n if (err instanceof Error && err.message.startsWith(\"[Directive]\")) {\n throw err;\n }\n\n throw new Error(\n `[Directive] Invalid baseURL \"${baseURL}\" – must be a valid URL (e.g. \"https://api.openai.com/v1\")`,\n );\n }\n}\n\n// ============================================================================\n// createRunner Helper\n// ============================================================================\n\n/** Parsed response from an LLM provider */\nexport interface ParsedResponse {\n text: string;\n totalTokens: number;\n /** Input token count, when available from the provider */\n inputTokens?: number;\n /** Output token count, when available from the provider */\n outputTokens?: number;\n}\n\n/** Options for creating an AgentRunner from buildRequest/parseResponse */\nexport interface CreateRunnerOptions {\n fetch?: typeof globalThis.fetch;\n buildRequest: (\n agent: AgentLike,\n input: string,\n messages: Message[]\n ) => { url: string; init: RequestInit };\n parseResponse: (\n response: Response,\n messages: Message[]\n ) => Promise<ParsedResponse>;\n parseOutput?: <T>(text: string) => T;\n /** Lifecycle hooks for tracing, logging, and metrics */\n hooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner from buildRequest/parseResponse helpers.\n * Reduces ~50 lines of fetch boilerplate to ~20 lines of configuration.\n *\n * Supports lifecycle hooks for observability:\n * - `onBeforeCall` fires before each API request\n * - `onAfterCall` fires after a successful response (includes token breakdown)\n * - `onError` fires when the request fails\n *\n * @example\n * ```typescript\n * const runClaude = createRunner({\n * buildRequest: (agent, input) => ({\n * url: \"/api/claude\",\n * init: {\n * method: \"POST\",\n * headers: { \"Content-Type\": \"application/json\" },\n * body: JSON.stringify({\n * model: agent.model ?? \"claude-haiku-4-5-20251001\",\n * system: agent.instructions ?? \"\",\n * messages: [{ role: \"user\", content: input }],\n * }),\n * },\n * }),\n * parseResponse: async (res) => {\n * const data = await res.json();\n * const inputTokens = data.usage?.input_tokens ?? 0;\n * const outputTokens = data.usage?.output_tokens ?? 0;\n * return {\n * text: data.content?.[0]?.text ?? \"\",\n * totalTokens: inputTokens + outputTokens,\n * inputTokens,\n * outputTokens,\n * };\n * },\n * hooks: {\n * onAfterCall: ({ durationMs, tokenUsage }) => {\n * console.log(`LLM call: ${durationMs}ms, ${tokenUsage.inputTokens}in/${tokenUsage.outputTokens}out`);\n * },\n * },\n * });\n * ```\n */\nexport function createRunner(options: CreateRunnerOptions): AgentRunner {\n const {\n fetch: fetchFn = globalThis.fetch,\n buildRequest,\n parseResponse,\n parseOutput,\n hooks,\n } = options;\n\n const defaultParseOutput = <T>(text: string): T => {\n try {\n return JSON.parse(text) as T;\n } catch {\n return text as unknown as T;\n }\n };\n\n const parse = parseOutput ?? defaultParseOutput;\n\n return async <T = unknown>(\n agent: AgentLike,\n input: string,\n runOptions?: RunOptions\n ): Promise<RunResult<T>> => {\n const startTime = Date.now();\n hooks?.onBeforeCall?.({ agent, input, timestamp: startTime });\n\n const messages: Message[] = [{ role: \"user\", content: input }];\n\n try {\n const { url, init } = buildRequest(agent, input, messages);\n\n const fetchInit: RequestInit = runOptions?.signal\n ? { ...init, signal: runOptions.signal }\n : init;\n\n const response = await fetchFn(url, fetchInit);\n\n if (!response.ok) {\n const errBody = await response.text().catch(() => \"\");\n\n throw new Error(\n `[Directive] AgentRunner request failed: ${response.status} ${response.statusText}${errBody ? ` – ${errBody.slice(0, 300)}` : \"\"}`,\n );\n }\n\n const parsed = await parseResponse(response, messages);\n const tokenUsage: TokenUsage = {\n inputTokens: parsed.inputTokens ?? 0,\n outputTokens: parsed.outputTokens ?? 0,\n };\n\n const assistantMessage: Message = { role: \"assistant\", content: parsed.text };\n const allMessages: Message[] = [...messages, assistantMessage];\n\n runOptions?.onMessage?.(assistantMessage);\n\n const durationMs = Date.now() - startTime;\n hooks?.onAfterCall?.({\n agent,\n input,\n output: parsed.text,\n totalTokens: parsed.totalTokens,\n tokenUsage,\n durationMs,\n timestamp: Date.now(),\n });\n\n return {\n output: parse<T>(parsed.text),\n messages: allMessages,\n toolCalls: [],\n totalTokens: parsed.totalTokens,\n tokenUsage,\n };\n } catch (err) {\n const durationMs = Date.now() - startTime;\n if (err instanceof Error) {\n hooks?.onError?.({\n agent,\n input,\n error: err,\n durationMs,\n timestamp: Date.now(),\n });\n }\n\n throw err;\n }\n };\n}\n","/**\n * @directive-run/ai/openai\n *\n * OpenAI adapter for Directive AI. Provides runners and embedders\n * for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)\n *\n * @example\n * ```typescript\n * import { createOpenAIRunner, createOpenAIEmbedder } from '@directive-run/ai/openai';\n *\n * const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });\n * const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });\n * ```\n */\n\nimport { createRunner, validateBaseURL } from \"../helpers.js\";\nimport type { AdapterHooks, AgentRunner, Message, TokenUsage } from \"../types.js\";\nimport type { StreamingCallbackRunner } from \"../stack.js\";\nimport type { EmbedderFn, Embedding } from \"../guardrails/semantic-cache.js\";\n\n// ============================================================================\n// Pricing Constants\n// ============================================================================\n\n/**\n * OpenAI model pricing (USD per million tokens).\n *\n * Use with `estimateCost()` for per-call cost tracking:\n * ```typescript\n * import { estimateCost } from '@directive-run/ai';\n * import { OPENAI_PRICING } from '@directive-run/ai/openai';\n *\n * const cost =\n * estimateCost(result.tokenUsage!.inputTokens, OPENAI_PRICING[\"gpt-4o\"].input) +\n * estimateCost(result.tokenUsage!.outputTokens, OPENAI_PRICING[\"gpt-4o\"].output);\n * ```\n *\n * **Note:** Pricing changes over time. These values are provided as a convenience\n * and may not reflect the latest rates. Always verify at https://openai.com/pricing\n */\nexport const OPENAI_PRICING: Record<string, { input: number; output: number }> = {\n\t\"gpt-4o\": { input: 2.5, output: 10 },\n\t\"gpt-4o-mini\": { input: 0.15, output: 0.6 },\n\t\"gpt-4-turbo\": { input: 10, output: 30 },\n\t\"o3-mini\": { input: 1.1, output: 4.4 },\n};\n\n// ============================================================================\n// OpenAI Runner\n// ============================================================================\n\n/** Options for createOpenAIRunner */\nexport interface OpenAIRunnerOptions {\n\tapiKey: string;\n\tmodel?: string;\n\tmaxTokens?: number;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** @default undefined */\n\ttimeoutMs?: number;\n\t/** Lifecycle hooks for tracing, logging, and metrics */\n\thooks?: AdapterHooks;\n}\n\n/**\n * Create an AgentRunner for OpenAI-compatible APIs (OpenAI, Azure, Together, etc.)\n *\n * Returns `tokenUsage` with input/output breakdown for cost tracking.\n *\n * @example\n * ```typescript\n * // OpenAI\n * const runner = createOpenAIRunner({ apiKey: process.env.OPENAI_API_KEY! });\n *\n * // Azure OpenAI\n * const azure = createOpenAIRunner({\n * apiKey: process.env.AZURE_KEY!,\n * baseURL: \"https://your-resource.openai.azure.com/v1\",\n * });\n *\n * // Together.ai (OpenAI-compatible)\n * const together = createOpenAIRunner({\n * apiKey: process.env.TOGETHER_KEY!,\n * baseURL: \"https://api.together.xyz/v1\",\n * });\n * ```\n */\nexport function createOpenAIRunner(options: OpenAIRunnerOptions): AgentRunner {\n\tconst {\n\t\tapiKey,\n\t\tmodel = \"gpt-4o\",\n\t\tmaxTokens,\n\t\tbaseURL = \"https://api.openai.com/v1\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\ttimeoutMs,\n\t\thooks,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\tif (typeof process !== \"undefined\" && process.env?.NODE_ENV !== \"production\" && !apiKey) {\n\t\tconsole.warn(\"[Directive] createOpenAIRunner: apiKey is empty. API calls will fail.\");\n\t}\n\n\treturn createRunner({\n\t\tfetch: fetchFn,\n\t\thooks,\n\t\tbuildRequest: (agent, _input, messages) => ({\n\t\t\turl: `${baseURL}/chat/completions`,\n\t\t\tinit: {\n\t\t\t\tmethod: \"POST\",\n\t\t\t\theaders: {\n\t\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t\t},\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\tmodel: agent.model ?? model,\n\t\t\t\t\t...(maxTokens != null ? { max_tokens: maxTokens } : {}),\n\t\t\t\t\tmessages: [\n\t\t\t\t\t\t...(agent.instructions\n\t\t\t\t\t\t\t? [{ role: \"system\", content: agent.instructions }]\n\t\t\t\t\t\t\t: []),\n\t\t\t\t\t\t...messages.map((m) => ({ role: m.role, content: m.content })),\n\t\t\t\t\t],\n\t\t\t\t}),\n\t\t\t\t...(timeoutMs != null ? { signal: AbortSignal.timeout(timeoutMs) } : {}),\n\t\t\t},\n\t\t}),\n\t\tparseResponse: async (res) => {\n\t\t\tconst data = await res.json();\n\t\t\tconst text = data.choices?.[0]?.message?.content ?? \"\";\n\t\t\tconst inputTokens = data.usage?.prompt_tokens ?? 0;\n\t\t\tconst outputTokens = data.usage?.completion_tokens ?? 0;\n\n\t\t\treturn {\n\t\t\t\ttext,\n\t\t\t\ttotalTokens: inputTokens + outputTokens,\n\t\t\t\tinputTokens,\n\t\t\t\toutputTokens,\n\t\t\t};\n\t\t},\n\t});\n}\n\n// ============================================================================\n// OpenAI Embedder\n// ============================================================================\n\n/** Options for createOpenAIEmbedder */\nexport interface OpenAIEmbedderOptions {\n\tapiKey: string;\n\tmodel?: string;\n\tdimensions?: number;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** @default 30000 */\n\ttimeoutMs?: number;\n}\n\n/**\n * Create an EmbedderFn that calls the OpenAI embeddings API.\n *\n * @example\n * ```typescript\n * const embedder = createOpenAIEmbedder({ apiKey: process.env.OPENAI_API_KEY! });\n * const embedding = await embedder('How do constraints work?');\n * ```\n */\nexport function createOpenAIEmbedder(\n\toptions: OpenAIEmbedderOptions,\n): EmbedderFn {\n\tconst {\n\t\tapiKey,\n\t\tmodel = \"text-embedding-3-small\",\n\t\tdimensions = 1536,\n\t\tbaseURL = \"https://api.openai.com/v1\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\ttimeoutMs,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\tif (typeof process !== \"undefined\" && process.env?.NODE_ENV !== \"production\" && !apiKey) {\n\t\tconsole.warn(\"[Directive] createOpenAIEmbedder: apiKey is empty. API calls will fail.\");\n\t}\n\n\treturn async (text: string): Promise<Embedding> => {\n\t\tconst response = await fetchFn(`${baseURL}/embeddings`, {\n\t\t\tmethod: \"POST\",\n\t\t\theaders: {\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t},\n\t\t\tbody: JSON.stringify({ model, input: text, dimensions }),\n\t\t\tsignal: AbortSignal.timeout(timeoutMs ?? 30_000),\n\t\t});\n\n\t\tif (!response.ok) {\n\t\t\tconst errBody = await response.text().catch(() => \"\");\n\n\t\t\tthrow new Error(\n\t\t\t\t`[Directive] OpenAI embedding failed: ${response.status}${errBody ? ` – ${errBody.slice(0, 200)}` : \"\"}`,\n\t\t\t);\n\t\t}\n\n\t\tconst data = (await response.json()) as {\n\t\t\tdata: Array<{ embedding: number[] }>;\n\t\t};\n\n\t\tconst entry = data.data[0];\n\t\tif (!entry) {\n\t\t\tthrow new Error(\n\t\t\t\t\"[Directive] OpenAI embedding response contained no data entries\",\n\t\t\t);\n\t\t}\n\n\t\treturn entry.embedding;\n\t};\n}\n\n// ============================================================================\n// OpenAI Streaming Runner\n// ============================================================================\n\n/** Options for createOpenAIStreamingRunner */\nexport interface OpenAIStreamingRunnerOptions {\n\tapiKey: string;\n\tmodel?: string;\n\tmaxTokens?: number;\n\tbaseURL?: string;\n\tfetch?: typeof globalThis.fetch;\n\t/** Lifecycle hooks for tracing, logging, and metrics */\n\thooks?: AdapterHooks;\n}\n\n/**\n * Create a StreamingCallbackRunner for OpenAI-compatible chat completions\n * with server-sent events. Can be used standalone or paired with `createOpenAIRunner`.\n *\n * Returns `tokenUsage` with input/output breakdown for cost tracking.\n *\n * @example\n * ```typescript\n * const streamingRunner = createOpenAIStreamingRunner({\n * apiKey: process.env.OPENAI_API_KEY!,\n * });\n * const stack = createAgentStack({\n * runner: createOpenAIRunner({ apiKey }),\n * streaming: { runner: streamingRunner },\n * agents: { ... },\n * });\n * ```\n */\nexport function createOpenAIStreamingRunner(\n\toptions: OpenAIStreamingRunnerOptions,\n): StreamingCallbackRunner {\n\tconst {\n\t\tapiKey,\n\t\tmodel = \"gpt-4o\",\n\t\tmaxTokens,\n\t\tbaseURL = \"https://api.openai.com/v1\",\n\t\tfetch: fetchFn = globalThis.fetch,\n\t\thooks,\n\t} = options;\n\n\tvalidateBaseURL(baseURL);\n\n\tif (typeof process !== \"undefined\" && process.env?.NODE_ENV !== \"production\" && !apiKey) {\n\t\tconsole.warn(\"[Directive] createOpenAIStreamingRunner: apiKey is empty. API calls will fail.\");\n\t}\n\n\treturn async (agent, input, callbacks) => {\n\t\tconst startTime = Date.now();\n\t\thooks?.onBeforeCall?.({ agent, input, timestamp: startTime });\n\n\t\ttry {\n\t\t\tconst response = await fetchFn(`${baseURL}/chat/completions`, {\n\t\t\t\tmethod: \"POST\",\n\t\t\t\theaders: {\n\t\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t\t\tAuthorization: `Bearer ${apiKey}`,\n\t\t\t\t},\n\t\t\t\tbody: JSON.stringify({\n\t\t\t\t\tmodel: agent.model ?? model,\n\t\t\t\t\t...(maxTokens != null ? { max_tokens: maxTokens } : {}),\n\t\t\t\t\tmessages: [\n\t\t\t\t\t\t...(agent.instructions\n\t\t\t\t\t\t\t? [{ role: \"system\", content: agent.instructions }]\n\t\t\t\t\t\t\t: []),\n\t\t\t\t\t\t{ role: \"user\", content: input },\n\t\t\t\t\t],\n\t\t\t\t\tstream: true,\n\t\t\t\t\tstream_options: { include_usage: true },\n\t\t\t\t}),\n\t\t\t\tsignal: callbacks.signal,\n\t\t\t});\n\n\t\t\tif (!response.ok) {\n\t\t\t\tconst errBody = await response.text().catch(() => \"\");\n\n\t\t\t\tthrow new Error(\n\t\t\t\t\t`[Directive] OpenAI streaming error ${response.status}${errBody ? ` – ${errBody.slice(0, 200)}` : \"\"}`,\n\t\t\t\t);\n\t\t\t}\n\n\t\t\tconst reader = response.body?.getReader();\n\t\t\tif (!reader) {\n\t\t\t\tthrow new Error(\"[Directive] No response body\");\n\t\t\t}\n\n\t\t\tconst decoder = new TextDecoder();\n\t\t\tlet buf = \"\";\n\t\t\tlet fullText = \"\";\n\t\t\tlet promptTokens = 0;\n\t\t\tlet completionTokens = 0;\n\n\t\t\ttry {\n\t\t\t\twhile (true) {\n\t\t\t\t\tconst { done, value } = await reader.read();\n\t\t\t\t\tif (done) {\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\tbuf += decoder.decode(value, { stream: true });\n\t\t\t\t\tconst lines = buf.split(\"\\n\");\n\t\t\t\t\tbuf = lines.pop() ?? \"\";\n\n\t\t\t\t\tfor (const line of lines) {\n\t\t\t\t\t\tif (!line.startsWith(\"data: \")) {\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tconst data = line.slice(6).trim();\n\t\t\t\t\t\tif (data === \"[DONE]\") {\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttry {\n\t\t\t\t\t\t\tconst event = JSON.parse(data);\n\n\t\t\t\t\t\t\t// Extract token content from delta\n\t\t\t\t\t\t\tconst delta = event.choices?.[0]?.delta;\n\t\t\t\t\t\t\tif (delta?.content) {\n\t\t\t\t\t\t\t\tfullText += delta.content;\n\t\t\t\t\t\t\t\tcallbacks.onToken?.(delta.content);\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// Extract usage from the final chunk (stream_options: include_usage)\n\t\t\t\t\t\t\tif (event.usage) {\n\t\t\t\t\t\t\t\tpromptTokens = event.usage.prompt_tokens ?? 0;\n\t\t\t\t\t\t\t\tcompletionTokens = event.usage.completion_tokens ?? 0;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} catch (parseErr) {\n\t\t\t\t\t\t\tif (parseErr instanceof SyntaxError) {\n\t\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t\ttypeof process !== \"undefined\" &&\n\t\t\t\t\t\t\t\t\tprocess.env?.NODE_ENV === \"development\"\n\t\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\t\tconsole.warn(\n\t\t\t\t\t\t\t\t\t\t\"[Directive] Malformed SSE event from OpenAI:\",\n\t\t\t\t\t\t\t\t\t\tdata,\n\t\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tthrow parseErr;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} finally {\n\t\t\t\treader.cancel().catch(() => {});\n\t\t\t}\n\n\t\t\tconst assistantMsg: Message = { role: \"assistant\", content: fullText };\n\t\t\tcallbacks.onMessage?.(assistantMsg);\n\n\t\t\tconst tokenUsage: TokenUsage = {\n\t\t\t\tinputTokens: promptTokens,\n\t\t\t\toutputTokens: completionTokens,\n\t\t\t};\n\t\t\tconst totalTokens = promptTokens + completionTokens;\n\n\t\t\thooks?.onAfterCall?.({\n\t\t\t\tagent,\n\t\t\t\tinput,\n\t\t\t\toutput: fullText,\n\t\t\t\ttotalTokens,\n\t\t\t\ttokenUsage,\n\t\t\t\tdurationMs: Date.now() - startTime,\n\t\t\t\ttimestamp: Date.now(),\n\t\t\t});\n\n\t\t\treturn {\n\t\t\t\toutput: fullText,\n\t\t\t\tmessages: [{ role: \"user\" as const, content: input }, assistantMsg],\n\t\t\t\ttoolCalls: [],\n\t\t\t\ttotalTokens,\n\t\t\t\ttokenUsage,\n\t\t\t};\n\t\t} catch (err) {\n\t\t\tif (err instanceof Error) {\n\t\t\t\thooks?.onError?.({\n\t\t\t\t\tagent,\n\t\t\t\t\tinput,\n\t\t\t\t\terror: err,\n\t\t\t\t\tdurationMs: Date.now() - startTime,\n\t\t\t\t\ttimestamp: Date.now(),\n\t\t\t\t});\n\t\t\t}\n\n\t\t\tthrow err;\n\t\t}\n\t};\n}\n"]}
|