@databricks/appkit 0.31.0 → 0.33.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CLAUDE.md +54 -1
- package/NOTICE.md +2 -0
- package/dist/agents/databricks.d.ts.map +1 -1
- package/dist/agents/databricks.js +8 -3
- package/dist/agents/databricks.js.map +1 -1
- package/dist/appkit/package.js +1 -1
- package/dist/beta.d.ts +16 -1
- package/dist/beta.js +14 -1
- package/dist/connectors/index.js +3 -0
- package/dist/connectors/mcp/client.d.ts +85 -0
- package/dist/connectors/mcp/client.d.ts.map +1 -0
- package/dist/connectors/mcp/client.js +296 -0
- package/dist/connectors/mcp/client.js.map +1 -0
- package/dist/connectors/mcp/host-policy.d.ts +51 -0
- package/dist/connectors/mcp/host-policy.d.ts.map +1 -0
- package/dist/connectors/mcp/host-policy.js +168 -0
- package/dist/connectors/mcp/host-policy.js.map +1 -0
- package/dist/connectors/mcp/index.d.ts +3 -0
- package/dist/connectors/mcp/index.js +4 -0
- package/dist/connectors/mcp/types.d.ts +16 -0
- package/dist/connectors/mcp/types.d.ts.map +1 -0
- package/dist/context/index.js +1 -1
- package/dist/core/agent/build-toolkit.d.ts +2 -0
- package/dist/core/agent/build-toolkit.js +45 -0
- package/dist/core/agent/build-toolkit.js.map +1 -0
- package/dist/core/agent/consume-adapter-stream.js +33 -0
- package/dist/core/agent/consume-adapter-stream.js.map +1 -0
- package/dist/core/agent/create-agent.d.ts +27 -0
- package/dist/core/agent/create-agent.d.ts.map +1 -0
- package/dist/core/agent/create-agent.js +50 -0
- package/dist/core/agent/create-agent.js.map +1 -0
- package/dist/core/agent/load-agents.d.ts +72 -0
- package/dist/core/agent/load-agents.d.ts.map +1 -0
- package/dist/core/agent/load-agents.js +268 -0
- package/dist/core/agent/load-agents.js.map +1 -0
- package/dist/core/agent/normalize-result.js +39 -0
- package/dist/core/agent/normalize-result.js.map +1 -0
- package/dist/core/agent/plugins-map.js +44 -0
- package/dist/core/agent/plugins-map.js.map +1 -0
- package/dist/core/agent/run-agent.d.ts +58 -0
- package/dist/core/agent/run-agent.d.ts.map +1 -0
- package/dist/core/agent/run-agent.js +257 -0
- package/dist/core/agent/run-agent.js.map +1 -0
- package/dist/core/agent/system-prompt.js +38 -0
- package/dist/core/agent/system-prompt.js.map +1 -0
- package/dist/core/agent/toolkit-options.js +28 -0
- package/dist/core/agent/toolkit-options.js.map +1 -0
- package/dist/core/agent/toolkit-resolver.js +44 -0
- package/dist/core/agent/toolkit-resolver.js.map +1 -0
- package/dist/core/agent/tools/define-tool.d.ts +66 -0
- package/dist/core/agent/tools/define-tool.d.ts.map +1 -0
- package/dist/core/agent/tools/define-tool.js +50 -0
- package/dist/core/agent/tools/define-tool.js.map +1 -0
- package/dist/core/agent/tools/function-tool.d.ts +38 -0
- package/dist/core/agent/tools/function-tool.d.ts.map +1 -0
- package/dist/core/agent/tools/function-tool.js +22 -0
- package/dist/core/agent/tools/function-tool.js.map +1 -0
- package/dist/core/agent/tools/hosted-tools.d.ts +47 -0
- package/dist/core/agent/tools/hosted-tools.d.ts.map +1 -0
- package/dist/core/agent/tools/hosted-tools.js +67 -0
- package/dist/core/agent/tools/hosted-tools.js.map +1 -0
- package/dist/core/agent/tools/index.d.ts +5 -0
- package/dist/core/agent/tools/index.js +7 -0
- package/dist/core/agent/tools/json-schema.js +24 -0
- package/dist/core/agent/tools/json-schema.js.map +1 -0
- package/dist/core/agent/tools/sql-policy.js +256 -0
- package/dist/core/agent/tools/sql-policy.js.map +1 -0
- package/dist/core/agent/tools/tool.d.ts +63 -0
- package/dist/core/agent/tools/tool.d.ts.map +1 -0
- package/dist/core/agent/tools/tool.js +42 -0
- package/dist/core/agent/tools/tool.js.map +1 -0
- package/dist/core/agent/types.d.ts +299 -0
- package/dist/core/agent/types.d.ts.map +1 -0
- package/dist/core/agent/types.js +12 -0
- package/dist/core/agent/types.js.map +1 -0
- package/dist/core/appkit.d.ts +1 -0
- package/dist/core/appkit.d.ts.map +1 -1
- package/dist/core/appkit.js +31 -4
- package/dist/core/appkit.js.map +1 -1
- package/dist/core/plugin-context.d.ts +133 -0
- package/dist/core/plugin-context.d.ts.map +1 -0
- package/dist/core/plugin-context.js +220 -0
- package/dist/core/plugin-context.js.map +1 -0
- package/dist/index.d.ts +11 -11
- package/dist/internal-telemetry/appkit-log.js +19 -0
- package/dist/internal-telemetry/appkit-log.js.map +1 -0
- package/dist/internal-telemetry/config.js +15 -0
- package/dist/internal-telemetry/config.js.map +1 -0
- package/dist/internal-telemetry/index.js +4 -0
- package/dist/internal-telemetry/reporter.js +132 -0
- package/dist/internal-telemetry/reporter.js.map +1 -0
- package/dist/plugin/plugin.d.ts +18 -3
- package/dist/plugin/plugin.d.ts.map +1 -1
- package/dist/plugin/plugin.js +26 -2
- package/dist/plugin/plugin.js.map +1 -1
- package/dist/plugin/to-plugin.d.ts +3 -2
- package/dist/plugin/to-plugin.d.ts.map +1 -1
- package/dist/plugin/to-plugin.js +7 -4
- package/dist/plugin/to-plugin.js.map +1 -1
- package/dist/plugins/agents/agents.d.ts +186 -0
- package/dist/plugins/agents/agents.d.ts.map +1 -0
- package/dist/plugins/agents/agents.js +979 -0
- package/dist/plugins/agents/agents.js.map +1 -0
- package/dist/plugins/agents/defaults.js +13 -0
- package/dist/plugins/agents/defaults.js.map +1 -0
- package/dist/plugins/agents/event-channel.js +64 -0
- package/dist/plugins/agents/event-channel.js.map +1 -0
- package/dist/plugins/agents/event-translator.js +224 -0
- package/dist/plugins/agents/event-translator.js.map +1 -0
- package/dist/plugins/agents/index.d.ts +4 -0
- package/dist/plugins/agents/index.js +6 -0
- package/dist/plugins/agents/manifest.js +26 -0
- package/dist/plugins/agents/manifest.js.map +1 -0
- package/dist/plugins/agents/schemas.js +51 -0
- package/dist/plugins/agents/schemas.js.map +1 -0
- package/dist/plugins/agents/thread-store.js +58 -0
- package/dist/plugins/agents/thread-store.js.map +1 -0
- package/dist/plugins/agents/tool-approval-gate.js +75 -0
- package/dist/plugins/agents/tool-approval-gate.js.map +1 -0
- package/dist/plugins/analytics/analytics.d.ts +15 -1
- package/dist/plugins/analytics/analytics.d.ts.map +1 -1
- package/dist/plugins/analytics/analytics.js +37 -2
- package/dist/plugins/analytics/analytics.js.map +1 -1
- package/dist/plugins/analytics/index.js +1 -0
- package/dist/plugins/analytics/types.js +15 -0
- package/dist/plugins/analytics/types.js.map +1 -0
- package/dist/plugins/beta-exports.generated.d.ts +2 -0
- package/dist/plugins/beta-exports.generated.js +4 -0
- package/dist/plugins/files/plugin.d.ts +20 -2
- package/dist/plugins/files/plugin.d.ts.map +1 -1
- package/dist/plugins/files/plugin.js +120 -2
- package/dist/plugins/files/plugin.js.map +1 -1
- package/dist/plugins/genie/genie.d.ts +17 -3
- package/dist/plugins/genie/genie.d.ts.map +1 -1
- package/dist/plugins/genie/genie.js +61 -2
- package/dist/plugins/genie/genie.js.map +1 -1
- package/dist/plugins/genie/types.d.ts +10 -2
- package/dist/plugins/genie/types.d.ts.map +1 -1
- package/dist/plugins/jobs/plugin.js +1 -1
- package/dist/plugins/lakebase/index.d.ts +2 -2
- package/dist/plugins/lakebase/index.js +1 -1
- package/dist/plugins/lakebase/lakebase.d.ts +31 -3
- package/dist/plugins/lakebase/lakebase.d.ts.map +1 -1
- package/dist/plugins/lakebase/lakebase.js +77 -5
- package/dist/plugins/lakebase/lakebase.js.map +1 -1
- package/dist/plugins/lakebase/types.d.ts +39 -1
- package/dist/plugins/lakebase/types.d.ts.map +1 -1
- package/dist/plugins/server/index.d.ts +12 -0
- package/dist/plugins/server/index.d.ts.map +1 -1
- package/dist/plugins/server/index.js +47 -10
- package/dist/plugins/server/index.js.map +1 -1
- package/dist/plugins/server/types.d.ts +11 -3
- package/dist/plugins/server/types.d.ts.map +1 -1
- package/dist/shared/src/agent.d.ts +75 -1
- package/dist/shared/src/agent.d.ts.map +1 -1
- package/dist/shared/src/index.d.ts +1 -1
- package/dist/shared/src/plugin.d.ts +8 -0
- package/dist/shared/src/plugin.d.ts.map +1 -1
- package/docs/api/appkit/Class.AppKitMcpClient.md +157 -0
- package/docs/api/appkit/Class.DatabricksAdapter.md +151 -0
- package/docs/api/appkit/Class.Plugin.md +65 -23
- package/docs/api/appkit/Function.agentIdFromMarkdownPath.md +18 -0
- package/docs/api/appkit/Function.createAgent.md +33 -0
- package/docs/api/appkit/Function.createApp.md +10 -8
- package/docs/api/appkit/Function.defineTool.md +26 -0
- package/docs/api/appkit/Function.executeFromRegistry.md +25 -0
- package/docs/api/appkit/Function.functionToolToDefinition.md +16 -0
- package/docs/api/appkit/Function.isFunctionTool.md +16 -0
- package/docs/api/appkit/Function.isHostedTool.md +16 -0
- package/docs/api/appkit/Function.isToolkitEntry.md +18 -0
- package/docs/api/appkit/Function.loadAgentFromFile.md +21 -0
- package/docs/api/appkit/Function.loadAgentsFromDir.md +26 -0
- package/docs/api/appkit/Function.mcpServer.md +28 -0
- package/docs/api/appkit/Function.parseTextToolCalls.md +26 -0
- package/docs/api/appkit/Function.resolveHostedTools.md +16 -0
- package/docs/api/appkit/Function.runAgent.md +26 -0
- package/docs/api/appkit/Function.tool.md +28 -0
- package/docs/api/appkit/Function.toolsFromRegistry.md +20 -0
- package/docs/api/appkit/Interface.AgentAdapter.md +21 -0
- package/docs/api/appkit/Interface.AgentDefinition.md +112 -0
- package/docs/api/appkit/Interface.AgentInput.md +37 -0
- package/docs/api/appkit/Interface.AgentRunContext.md +32 -0
- package/docs/api/appkit/Interface.AgentToolDefinition.md +37 -0
- package/docs/api/appkit/Interface.AgentsPluginConfig.md +241 -0
- package/docs/api/appkit/Interface.AutoInheritToolsConfig.md +27 -0
- package/docs/api/appkit/Interface.BasePluginConfig.md +1 -0
- package/docs/api/appkit/Interface.FunctionTool.md +80 -0
- package/docs/api/appkit/Interface.McpConnectAllResult.md +38 -0
- package/docs/api/appkit/Interface.Message.md +55 -0
- package/docs/api/appkit/Interface.PluginToolkitProvider.md +22 -0
- package/docs/api/appkit/Interface.PromptContext.md +30 -0
- package/docs/api/appkit/Interface.RegisteredAgent.md +75 -0
- package/docs/api/appkit/Interface.RunAgentInput.md +34 -0
- package/docs/api/appkit/Interface.RunAgentResult.md +23 -0
- package/docs/api/appkit/Interface.Thread.md +46 -0
- package/docs/api/appkit/Interface.ThreadStore.md +103 -0
- package/docs/api/appkit/Interface.ToolAnnotations.md +56 -0
- package/docs/api/appkit/Interface.ToolConfig.md +72 -0
- package/docs/api/appkit/Interface.ToolEntry.md +73 -0
- package/docs/api/appkit/Interface.ToolProvider.md +38 -0
- package/docs/api/appkit/Interface.ToolkitEntry.md +59 -0
- package/docs/api/appkit/Interface.ToolkitOptions.md +45 -0
- package/docs/api/appkit/TypeAlias.AgentEvent.md +299 -0
- package/docs/api/appkit/TypeAlias.AgentTool.md +11 -0
- package/docs/api/appkit/TypeAlias.AgentTools.md +8 -0
- package/docs/api/appkit/TypeAlias.AgentToolsFn.md +20 -0
- package/docs/api/appkit/TypeAlias.BaseSystemPromptOption.md +9 -0
- package/docs/api/appkit/TypeAlias.HostedTool.md +10 -0
- package/docs/api/appkit/TypeAlias.Plugins.md +26 -0
- package/docs/api/appkit/TypeAlias.ResolvedToolEntry.md +29 -0
- package/docs/api/appkit/TypeAlias.ToolRegistry.md +6 -0
- package/docs/api/appkit/Variable.agents.md +19 -0
- package/docs/api/appkit.md +113 -62
- package/docs/plugins/agents.md +441 -0
- package/docs/privacy.md +41 -0
- package/llms.txt +54 -1
- package/package.json +4 -2
- package/sbom.cdx.json +1 -1
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { toToolJSONSchema } from "./json-schema.js";
|
|
2
|
+
import { formatZodError } from "./tool.js";
|
|
3
|
+
|
|
4
|
+
//#region src/core/agent/tools/define-tool.ts
|
|
5
|
+
/**
|
|
6
|
+
* Defines a single tool entry for a plugin's internal registry.
|
|
7
|
+
*
|
|
8
|
+
* The generic `S` flows from `schema` through to the `handler` callback so
|
|
9
|
+
* `args` is fully typed from the Zod schema. Names are assigned by the
|
|
10
|
+
* registry key, so they are not repeated inside the entry.
|
|
11
|
+
*/
|
|
12
|
+
function defineTool(config) {
|
|
13
|
+
return config;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Validates tool-call arguments against the entry's schema and invokes its
|
|
17
|
+
* handler. On validation failure, returns an LLM-friendly error string
|
|
18
|
+
* (matching the behavior of `tool()`) rather than throwing, so the model
|
|
19
|
+
* can self-correct on its next turn.
|
|
20
|
+
*/
|
|
21
|
+
async function executeFromRegistry(registry, name, args, signal) {
|
|
22
|
+
const entry = registry[name];
|
|
23
|
+
if (!entry) throw new Error(`Unknown tool: ${name}`);
|
|
24
|
+
const parsed = entry.schema.safeParse(args);
|
|
25
|
+
if (!parsed.success) return formatZodError(parsed.error, name);
|
|
26
|
+
return entry.execute(parsed.data, signal);
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Produces the `AgentToolDefinition[]` a ToolProvider exposes to the LLM,
|
|
30
|
+
* deriving `parameters` JSON Schema from each entry's Zod schema.
|
|
31
|
+
*
|
|
32
|
+
* Tool names come from registry keys (supports dotted names like
|
|
33
|
+
* `uploads.list` for dynamic plugins).
|
|
34
|
+
*/
|
|
35
|
+
function toolsFromRegistry(registry) {
|
|
36
|
+
return Object.entries(registry).map(([name, entry]) => {
|
|
37
|
+
const parameters = toToolJSONSchema(entry.schema);
|
|
38
|
+
const def = {
|
|
39
|
+
name,
|
|
40
|
+
description: entry.description,
|
|
41
|
+
parameters
|
|
42
|
+
};
|
|
43
|
+
if (entry.annotations) def.annotations = entry.annotations;
|
|
44
|
+
return def;
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
//#endregion
|
|
49
|
+
export { defineTool, executeFromRegistry, toolsFromRegistry };
|
|
50
|
+
//# sourceMappingURL=define-tool.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"define-tool.js","names":[],"sources":["../../../../src/core/agent/tools/define-tool.ts"],"sourcesContent":["import type { AgentToolDefinition, ToolAnnotations } from \"shared\";\nimport type { z } from \"zod\";\nimport { toToolJSONSchema } from \"./json-schema\";\nimport { formatZodError } from \"./tool\";\n\n/**\n * Single-tool entry for a plugin's internal tool registry.\n *\n * Plugins collect these into a `Record<string, ToolEntry>` keyed by the tool's\n * public name and dispatch via `executeFromRegistry`.\n */\nexport interface ToolEntry<S extends z.ZodType = z.ZodType> {\n description: string;\n schema: S;\n annotations?: ToolAnnotations;\n /**\n * Whether this tool is eligible for auto-inheritance into markdown or\n * code-defined agents that enable `autoInheritTools`. Defaults to `false`\n * (safe-by-default) — plugin authors must explicitly opt a tool in if they\n * consider it safe enough to appear in every agent's tool record without an\n * explicit `tools:` declaration. Destructive or privilege-sensitive tools\n * should leave this unset so that they only reach agents that wire them\n * explicitly (via `tools:` object/function form, markdown `plugin:NAME`\n * entries in the unified `tools:` list, or\n * `plugins.<name>.toolkit({ only: [...] })`).\n */\n autoInheritable?: boolean;\n /**\n * Callback the agents plugin invokes after Zod validation succeeds.\n *\n * Named `execute` to match the public `tool({ execute })` form — both the\n * agent-author surface and the plugin-author surface now spell their\n * callback the same way. `args` is the inferred Zod output (so `T extends\n * z.ZodType` flows through and `args` is fully typed). `signal` is the\n * per-run AbortSignal: forward it to any awaited I/O so cancellation\n * actually unwinds the call (analytics and lakebase both do this).\n */\n execute: (\n args: z.infer<S>,\n signal?: AbortSignal,\n ) => unknown | Promise<unknown>;\n}\n\nexport type ToolRegistry = Record<string, ToolEntry>;\n\n/**\n * Defines a single tool entry for a plugin's internal registry.\n *\n * The generic `S` flows from `schema` through to the `handler` callback so\n * `args` is fully typed from the Zod schema. Names are assigned by the\n * registry key, so they are not repeated inside the entry.\n */\nexport function defineTool<S extends z.ZodType>(\n config: ToolEntry<S>,\n): ToolEntry<S> {\n return config;\n}\n\n/**\n * Validates tool-call arguments against the entry's schema and invokes its\n * handler. On validation failure, returns an LLM-friendly error string\n * (matching the behavior of `tool()`) rather than throwing, so the model\n * can self-correct on its next turn.\n */\nexport async function executeFromRegistry(\n registry: ToolRegistry,\n name: string,\n args: unknown,\n signal?: AbortSignal,\n): Promise<unknown> {\n const entry = registry[name];\n if (!entry) {\n throw new Error(`Unknown tool: ${name}`);\n }\n const parsed = entry.schema.safeParse(args);\n if (!parsed.success) {\n return formatZodError(parsed.error, name);\n }\n return entry.execute(parsed.data, signal);\n}\n\n/**\n * Produces the `AgentToolDefinition[]` a ToolProvider exposes to the LLM,\n * deriving `parameters` JSON Schema from each entry's Zod schema.\n *\n * Tool names come from registry keys (supports dotted names like\n * `uploads.list` for dynamic plugins).\n */\nexport function toolsFromRegistry(\n registry: ToolRegistry,\n): AgentToolDefinition[] {\n return Object.entries(registry).map(([name, entry]) => {\n const parameters = toToolJSONSchema(\n entry.schema,\n ) as unknown as AgentToolDefinition[\"parameters\"];\n const def: AgentToolDefinition = {\n name,\n description: entry.description,\n parameters,\n };\n if (entry.annotations) {\n def.annotations = entry.annotations;\n }\n return def;\n });\n}\n"],"mappings":";;;;;;;;;;;AAoDA,SAAgB,WACd,QACc;AACd,QAAO;;;;;;;;AAST,eAAsB,oBACpB,UACA,MACA,MACA,QACkB;CAClB,MAAM,QAAQ,SAAS;AACvB,KAAI,CAAC,MACH,OAAM,IAAI,MAAM,iBAAiB,OAAO;CAE1C,MAAM,SAAS,MAAM,OAAO,UAAU,KAAK;AAC3C,KAAI,CAAC,OAAO,QACV,QAAO,eAAe,OAAO,OAAO,KAAK;AAE3C,QAAO,MAAM,QAAQ,OAAO,MAAM,OAAO;;;;;;;;;AAU3C,SAAgB,kBACd,UACuB;AACvB,QAAO,OAAO,QAAQ,SAAS,CAAC,KAAK,CAAC,MAAM,WAAW;EACrD,MAAM,aAAa,iBACjB,MAAM,OACP;EACD,MAAM,MAA2B;GAC/B;GACA,aAAa,MAAM;GACnB;GACD;AACD,MAAI,MAAM,YACR,KAAI,cAAc,MAAM;AAE1B,SAAO;GACP"}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { AgentToolDefinition, ToolAnnotations } from "../../../shared/src/agent.js";
|
|
2
|
+
import "../../../shared/src/index.js";
|
|
3
|
+
|
|
4
|
+
//#region src/core/agent/tools/function-tool.d.ts
|
|
5
|
+
interface FunctionTool {
|
|
6
|
+
type: "function";
|
|
7
|
+
/**
|
|
8
|
+
* Optional. When this tool is placed in a keyed record
|
|
9
|
+
* (`tools: { my_tool: ... }` or the function form), the agents plugin
|
|
10
|
+
* overrides this with the record key at index-build time. Only set it
|
|
11
|
+
* explicitly when constructing a `FunctionTool` outside any
|
|
12
|
+
* keyed-record context.
|
|
13
|
+
*/
|
|
14
|
+
name?: string;
|
|
15
|
+
description?: string | null;
|
|
16
|
+
parameters?: Record<string, unknown> | null;
|
|
17
|
+
strict?: boolean | null;
|
|
18
|
+
/**
|
|
19
|
+
* Behavioural hints that drive the agents plugin's approval gate and the
|
|
20
|
+
* client's approval-card styling. Prefer setting `effect` (one of
|
|
21
|
+
* `"read" | "write" | "update" | "destructive"`) — any mutating value
|
|
22
|
+
* forces HITL approval before `execute()` runs. Legacy `destructive: true`
|
|
23
|
+
* is still honoured. Must be preserved through {@link
|
|
24
|
+
* functionToolToDefinition} so the plugin sees them when building agent
|
|
25
|
+
* tool indexes.
|
|
26
|
+
*/
|
|
27
|
+
annotations?: ToolAnnotations;
|
|
28
|
+
/**
|
|
29
|
+
* Returns any shape; downstream `normalizeToolResult` serializes to a
|
|
30
|
+
* string before handing the value to the LLM.
|
|
31
|
+
*/
|
|
32
|
+
execute: (args: Record<string, unknown>) => unknown | Promise<unknown>;
|
|
33
|
+
}
|
|
34
|
+
declare function isFunctionTool(value: unknown): value is FunctionTool;
|
|
35
|
+
declare function functionToolToDefinition(tool: FunctionTool): AgentToolDefinition;
|
|
36
|
+
//#endregion
|
|
37
|
+
export { FunctionTool, functionToolToDefinition, isFunctionTool };
|
|
38
|
+
//# sourceMappingURL=function-tool.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"function-tool.d.ts","names":[],"sources":["../../../../src/core/agent/tools/function-tool.ts"],"mappings":";;;;UAEiB,YAAA;EACf,IAAA;;AADF;;;;;;EASE,IAAA;EACA,WAAA;EACA,UAAA,GAAa,MAAA;EACb,MAAA;EAHA;;;;;;;;;EAaA,WAAA,GAAc,eAAA;EAKwC;;;AAGxD;EAHE,OAAA,GAAU,IAAA,EAAM,MAAA,gCAAsC,OAAA;AAAA;AAAA,iBAGxC,cAAA,CAAe,KAAA,YAAiB,KAAA,IAAS,YAAA;AAAA,iBAUzC,wBAAA,CACd,IAAA,EAAM,YAAA,GACL,mBAAA"}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
//#region src/core/agent/tools/function-tool.ts
|
|
2
|
+
function isFunctionTool(value) {
|
|
3
|
+
if (typeof value !== "object" || value === null) return false;
|
|
4
|
+
const obj = value;
|
|
5
|
+
return obj.type === "function" && typeof obj.execute === "function";
|
|
6
|
+
}
|
|
7
|
+
function functionToolToDefinition(tool) {
|
|
8
|
+
const name = tool.name ?? "";
|
|
9
|
+
return {
|
|
10
|
+
name,
|
|
11
|
+
description: tool.description ?? name,
|
|
12
|
+
parameters: tool.parameters ?? {
|
|
13
|
+
type: "object",
|
|
14
|
+
properties: {}
|
|
15
|
+
},
|
|
16
|
+
...tool.annotations ? { annotations: tool.annotations } : {}
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
//#endregion
|
|
21
|
+
export { functionToolToDefinition, isFunctionTool };
|
|
22
|
+
//# sourceMappingURL=function-tool.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"function-tool.js","names":[],"sources":["../../../../src/core/agent/tools/function-tool.ts"],"sourcesContent":["import type { AgentToolDefinition, ToolAnnotations } from \"shared\";\n\nexport interface FunctionTool {\n type: \"function\";\n /**\n * Optional. When this tool is placed in a keyed record\n * (`tools: { my_tool: ... }` or the function form), the agents plugin\n * overrides this with the record key at index-build time. Only set it\n * explicitly when constructing a `FunctionTool` outside any\n * keyed-record context.\n */\n name?: string;\n description?: string | null;\n parameters?: Record<string, unknown> | null;\n strict?: boolean | null;\n /**\n * Behavioural hints that drive the agents plugin's approval gate and the\n * client's approval-card styling. Prefer setting `effect` (one of\n * `\"read\" | \"write\" | \"update\" | \"destructive\"`) — any mutating value\n * forces HITL approval before `execute()` runs. Legacy `destructive: true`\n * is still honoured. Must be preserved through {@link\n * functionToolToDefinition} so the plugin sees them when building agent\n * tool indexes.\n */\n annotations?: ToolAnnotations;\n /**\n * Returns any shape; downstream `normalizeToolResult` serializes to a\n * string before handing the value to the LLM.\n */\n execute: (args: Record<string, unknown>) => unknown | Promise<unknown>;\n}\n\nexport function isFunctionTool(value: unknown): value is FunctionTool {\n if (typeof value !== \"object\" || value === null) return false;\n const obj = value as Record<string, unknown>;\n // `name` is intentionally not required: the agents plugin overrides it\n // with the record key (`tools: { my_tool: tool({...}) }` -> \"my_tool\")\n // so requiring it on the FunctionTool shape rejects perfectly-valid\n // `tool({ description, schema, execute })` calls that omit the name.\n return obj.type === \"function\" && typeof obj.execute === \"function\";\n}\n\nexport function functionToolToDefinition(\n tool: FunctionTool,\n): AgentToolDefinition {\n // `name` is guaranteed to be overridden downstream by the record key\n // when the tool is registered through `AgentDefinition.tools`. Falling\n // back to an empty string here keeps the type honest without\n // surfacing a sentinel that could leak into a non-record context.\n const name = tool.name ?? \"\";\n return {\n name,\n description: tool.description ?? name,\n parameters: (tool.parameters as AgentToolDefinition[\"parameters\"]) ?? {\n type: \"object\",\n properties: {},\n },\n ...(tool.annotations ? { annotations: tool.annotations } : {}),\n };\n}\n"],"mappings":";AAgCA,SAAgB,eAAe,OAAuC;AACpE,KAAI,OAAO,UAAU,YAAY,UAAU,KAAM,QAAO;CACxD,MAAM,MAAM;AAKZ,QAAO,IAAI,SAAS,cAAc,OAAO,IAAI,YAAY;;AAG3D,SAAgB,yBACd,MACqB;CAKrB,MAAM,OAAO,KAAK,QAAQ;AAC1B,QAAO;EACL;EACA,aAAa,KAAK,eAAe;EACjC,YAAa,KAAK,cAAoD;GACpE,MAAM;GACN,YAAY,EAAE;GACf;EACD,GAAI,KAAK,cAAc,EAAE,aAAa,KAAK,aAAa,GAAG,EAAE;EAC9D"}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { McpEndpointConfig } from "../../../connectors/mcp/types.js";
|
|
2
|
+
import "../../../connectors/mcp/index.js";
|
|
3
|
+
|
|
4
|
+
//#region src/core/agent/tools/hosted-tools.d.ts
|
|
5
|
+
interface GenieTool {
|
|
6
|
+
type: "genie-space";
|
|
7
|
+
genie_space: {
|
|
8
|
+
id: string;
|
|
9
|
+
};
|
|
10
|
+
}
|
|
11
|
+
interface VectorSearchIndexTool {
|
|
12
|
+
type: "vector_search_index";
|
|
13
|
+
vector_search_index: {
|
|
14
|
+
name: string;
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
interface CustomMcpServerTool {
|
|
18
|
+
type: "custom_mcp_server";
|
|
19
|
+
custom_mcp_server: {
|
|
20
|
+
app_name: string;
|
|
21
|
+
app_url: string;
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
interface ExternalMcpServerTool {
|
|
25
|
+
type: "external_mcp_server";
|
|
26
|
+
external_mcp_server: {
|
|
27
|
+
connection_name: string;
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
type HostedTool = GenieTool | VectorSearchIndexTool | CustomMcpServerTool | ExternalMcpServerTool;
|
|
31
|
+
declare function isHostedTool(value: unknown): value is HostedTool;
|
|
32
|
+
declare function resolveHostedTools(tools: HostedTool[]): McpEndpointConfig[];
|
|
33
|
+
/**
|
|
34
|
+
* Factory for declaring a custom MCP server tool.
|
|
35
|
+
*
|
|
36
|
+
* Replaces the verbose `{ type: "custom_mcp_server", custom_mcp_server: { app_name, app_url } }`
|
|
37
|
+
* wrapper with a concise positional call.
|
|
38
|
+
*
|
|
39
|
+
* Example:
|
|
40
|
+
* ```ts
|
|
41
|
+
* mcpServer("my-app", "https://my-app.databricksapps.com/mcp")
|
|
42
|
+
* ```
|
|
43
|
+
*/
|
|
44
|
+
declare function mcpServer(name: string, url: string): CustomMcpServerTool;
|
|
45
|
+
//#endregion
|
|
46
|
+
export { HostedTool, isHostedTool, mcpServer, resolveHostedTools };
|
|
47
|
+
//# sourceMappingURL=hosted-tools.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"hosted-tools.d.ts","names":[],"sources":["../../../../src/core/agent/tools/hosted-tools.ts"],"mappings":";;;;UAEiB,SAAA;EACf,IAAA;EACA,WAAA;IAAe,EAAA;EAAA;AAAA;AAAA,UAGA,qBAAA;EACf,IAAA;EACA,mBAAA;IAAuB,IAAA;EAAA;AAAA;AAAA,UAGR,mBAAA;EACf,IAAA;EACA,iBAAA;IAAqB,QAAA;IAAkB,OAAA;EAAA;AAAA;AAAA,UAGxB,qBAAA;EACf,IAAA;EACA,mBAAA;IAAuB,eAAA;EAAA;AAAA;AAAA,KAGb,UAAA,GACR,SAAA,GACA,qBAAA,GACA,mBAAA,GACA,qBAAA;AAAA,iBASY,YAAA,CAAa,KAAA,YAAiB,KAAA,IAAS,UAAA;AAAA,iBA0CvC,kBAAA,CAAmB,KAAA,EAAO,UAAA,KAAe,iBAAA;;;;AA5DzD;;;;;;;;iBA2EgB,SAAA,CAAU,IAAA,UAAc,GAAA,WAAc,mBAAA"}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
//#region src/core/agent/tools/hosted-tools.ts
|
|
2
|
+
const HOSTED_TOOL_TYPES = new Set([
|
|
3
|
+
"genie-space",
|
|
4
|
+
"vector_search_index",
|
|
5
|
+
"custom_mcp_server",
|
|
6
|
+
"external_mcp_server"
|
|
7
|
+
]);
|
|
8
|
+
function isHostedTool(value) {
|
|
9
|
+
if (typeof value !== "object" || value === null) return false;
|
|
10
|
+
const obj = value;
|
|
11
|
+
return typeof obj.type === "string" && HOSTED_TOOL_TYPES.has(obj.type);
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Resolves HostedTool configs into MCP endpoint configurations
|
|
15
|
+
* that the MCP client can connect to.
|
|
16
|
+
*/
|
|
17
|
+
function resolveHostedTool(tool) {
|
|
18
|
+
switch (tool.type) {
|
|
19
|
+
case "genie-space": return {
|
|
20
|
+
name: `genie-${tool.genie_space.id}`,
|
|
21
|
+
url: `/api/2.0/mcp/genie/${tool.genie_space.id}`
|
|
22
|
+
};
|
|
23
|
+
case "vector_search_index": {
|
|
24
|
+
const parts = tool.vector_search_index.name.split(".");
|
|
25
|
+
if (parts.length !== 3) throw new Error(`vector_search_index name must be 3-part dotted (catalog.schema.index), got: ${tool.vector_search_index.name}`);
|
|
26
|
+
return {
|
|
27
|
+
name: `vs-${parts.join("-")}`,
|
|
28
|
+
url: `/api/2.0/mcp/vector-search/${parts[0]}/${parts[1]}/${parts[2]}`
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
case "custom_mcp_server": return {
|
|
32
|
+
name: tool.custom_mcp_server.app_name,
|
|
33
|
+
url: tool.custom_mcp_server.app_url
|
|
34
|
+
};
|
|
35
|
+
case "external_mcp_server": return {
|
|
36
|
+
name: tool.external_mcp_server.connection_name,
|
|
37
|
+
url: `/api/2.0/mcp/external/${tool.external_mcp_server.connection_name}`
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
function resolveHostedTools(tools) {
|
|
42
|
+
return tools.map(resolveHostedTool);
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Factory for declaring a custom MCP server tool.
|
|
46
|
+
*
|
|
47
|
+
* Replaces the verbose `{ type: "custom_mcp_server", custom_mcp_server: { app_name, app_url } }`
|
|
48
|
+
* wrapper with a concise positional call.
|
|
49
|
+
*
|
|
50
|
+
* Example:
|
|
51
|
+
* ```ts
|
|
52
|
+
* mcpServer("my-app", "https://my-app.databricksapps.com/mcp")
|
|
53
|
+
* ```
|
|
54
|
+
*/
|
|
55
|
+
function mcpServer(name, url) {
|
|
56
|
+
return {
|
|
57
|
+
type: "custom_mcp_server",
|
|
58
|
+
custom_mcp_server: {
|
|
59
|
+
app_name: name,
|
|
60
|
+
app_url: url
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
//#endregion
|
|
66
|
+
export { isHostedTool, mcpServer, resolveHostedTools };
|
|
67
|
+
//# sourceMappingURL=hosted-tools.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"hosted-tools.js","names":[],"sources":["../../../../src/core/agent/tools/hosted-tools.ts"],"sourcesContent":["import type { McpEndpointConfig } from \"../../../connectors/mcp\";\n\nexport interface GenieTool {\n type: \"genie-space\";\n genie_space: { id: string };\n}\n\nexport interface VectorSearchIndexTool {\n type: \"vector_search_index\";\n vector_search_index: { name: string };\n}\n\nexport interface CustomMcpServerTool {\n type: \"custom_mcp_server\";\n custom_mcp_server: { app_name: string; app_url: string };\n}\n\nexport interface ExternalMcpServerTool {\n type: \"external_mcp_server\";\n external_mcp_server: { connection_name: string };\n}\n\nexport type HostedTool =\n | GenieTool\n | VectorSearchIndexTool\n | CustomMcpServerTool\n | ExternalMcpServerTool;\n\nconst HOSTED_TOOL_TYPES = new Set([\n \"genie-space\",\n \"vector_search_index\",\n \"custom_mcp_server\",\n \"external_mcp_server\",\n]);\n\nexport function isHostedTool(value: unknown): value is HostedTool {\n if (typeof value !== \"object\" || value === null) return false;\n const obj = value as Record<string, unknown>;\n return typeof obj.type === \"string\" && HOSTED_TOOL_TYPES.has(obj.type);\n}\n\n/**\n * Resolves HostedTool configs into MCP endpoint configurations\n * that the MCP client can connect to.\n */\nfunction resolveHostedTool(tool: HostedTool): McpEndpointConfig {\n switch (tool.type) {\n case \"genie-space\":\n return {\n name: `genie-${tool.genie_space.id}`,\n url: `/api/2.0/mcp/genie/${tool.genie_space.id}`,\n };\n case \"vector_search_index\": {\n const parts = tool.vector_search_index.name.split(\".\");\n if (parts.length !== 3) {\n throw new Error(\n `vector_search_index name must be 3-part dotted (catalog.schema.index), got: ${tool.vector_search_index.name}`,\n );\n }\n return {\n name: `vs-${parts.join(\"-\")}`,\n url: `/api/2.0/mcp/vector-search/${parts[0]}/${parts[1]}/${parts[2]}`,\n };\n }\n case \"custom_mcp_server\":\n return {\n name: tool.custom_mcp_server.app_name,\n url: tool.custom_mcp_server.app_url,\n };\n case \"external_mcp_server\":\n return {\n name: tool.external_mcp_server.connection_name,\n url: `/api/2.0/mcp/external/${tool.external_mcp_server.connection_name}`,\n };\n }\n}\n\nexport function resolveHostedTools(tools: HostedTool[]): McpEndpointConfig[] {\n return tools.map(resolveHostedTool);\n}\n\n/**\n * Factory for declaring a custom MCP server tool.\n *\n * Replaces the verbose `{ type: \"custom_mcp_server\", custom_mcp_server: { app_name, app_url } }`\n * wrapper with a concise positional call.\n *\n * Example:\n * ```ts\n * mcpServer(\"my-app\", \"https://my-app.databricksapps.com/mcp\")\n * ```\n */\nexport function mcpServer(name: string, url: string): CustomMcpServerTool {\n return {\n type: \"custom_mcp_server\",\n custom_mcp_server: { app_name: name, app_url: url },\n };\n}\n"],"mappings":";AA4BA,MAAM,oBAAoB,IAAI,IAAI;CAChC;CACA;CACA;CACA;CACD,CAAC;AAEF,SAAgB,aAAa,OAAqC;AAChE,KAAI,OAAO,UAAU,YAAY,UAAU,KAAM,QAAO;CACxD,MAAM,MAAM;AACZ,QAAO,OAAO,IAAI,SAAS,YAAY,kBAAkB,IAAI,IAAI,KAAK;;;;;;AAOxE,SAAS,kBAAkB,MAAqC;AAC9D,SAAQ,KAAK,MAAb;EACE,KAAK,cACH,QAAO;GACL,MAAM,SAAS,KAAK,YAAY;GAChC,KAAK,sBAAsB,KAAK,YAAY;GAC7C;EACH,KAAK,uBAAuB;GAC1B,MAAM,QAAQ,KAAK,oBAAoB,KAAK,MAAM,IAAI;AACtD,OAAI,MAAM,WAAW,EACnB,OAAM,IAAI,MACR,+EAA+E,KAAK,oBAAoB,OACzG;AAEH,UAAO;IACL,MAAM,MAAM,MAAM,KAAK,IAAI;IAC3B,KAAK,8BAA8B,MAAM,GAAG,GAAG,MAAM,GAAG,GAAG,MAAM;IAClE;;EAEH,KAAK,oBACH,QAAO;GACL,MAAM,KAAK,kBAAkB;GAC7B,KAAK,KAAK,kBAAkB;GAC7B;EACH,KAAK,sBACH,QAAO;GACL,MAAM,KAAK,oBAAoB;GAC/B,KAAK,yBAAyB,KAAK,oBAAoB;GACxD;;;AAIP,SAAgB,mBAAmB,OAA0C;AAC3E,QAAO,MAAM,IAAI,kBAAkB;;;;;;;;;;;;;AAcrC,SAAgB,UAAU,MAAc,KAAkC;AACxE,QAAO;EACL,MAAM;EACN,mBAAmB;GAAE,UAAU;GAAM,SAAS;GAAK;EACpD"}
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
import { AppKitMcpClient, McpConnectAllResult } from "../../../connectors/mcp/client.js";
|
|
2
|
+
import { FunctionTool, functionToolToDefinition, isFunctionTool } from "./function-tool.js";
|
|
3
|
+
import { HostedTool, isHostedTool, mcpServer, resolveHostedTools } from "./hosted-tools.js";
|
|
4
|
+
import { ToolEntry, ToolRegistry, defineTool, executeFromRegistry, toolsFromRegistry } from "./define-tool.js";
|
|
5
|
+
import { ToolConfig, tool } from "./tool.js";
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import { AppKitMcpClient } from "../../../connectors/mcp/client.js";
|
|
2
|
+
import { tool } from "./tool.js";
|
|
3
|
+
import { defineTool, executeFromRegistry, toolsFromRegistry } from "./define-tool.js";
|
|
4
|
+
import { functionToolToDefinition, isFunctionTool } from "./function-tool.js";
|
|
5
|
+
import { isHostedTool, mcpServer, resolveHostedTools } from "./hosted-tools.js";
|
|
6
|
+
|
|
7
|
+
export { };
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { toJSONSchema } from "zod";
|
|
2
|
+
|
|
3
|
+
//#region src/core/agent/tools/json-schema.ts
|
|
4
|
+
/**
|
|
5
|
+
* Converts a Zod schema to JSON Schema suitable for an LLM tool-call
|
|
6
|
+
* `parameters` field.
|
|
7
|
+
*
|
|
8
|
+
* Wraps `zod`'s `toJSONSchema()` and strips the top-level `$schema` annotation
|
|
9
|
+
* that Zod v4 emits by default (e.g. `"https://json-schema.org/draft/..."`).
|
|
10
|
+
* The Databricks Mosaic serving endpoint forwards tool schemas to Google's
|
|
11
|
+
* Gemini `function_declarations` format, which rejects any top-level key it
|
|
12
|
+
* doesn't explicitly recognize — including `$schema` — with a 400
|
|
13
|
+
* `Invalid JSON payload received. Unknown name "$schema"` error. Other LLM
|
|
14
|
+
* providers either ignore the field or also trip on it, so stripping here is
|
|
15
|
+
* safe across backends.
|
|
16
|
+
*/
|
|
17
|
+
function toToolJSONSchema(schema) {
|
|
18
|
+
const { $schema: _ignored, ...rest } = toJSONSchema(schema);
|
|
19
|
+
return rest;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
//#endregion
|
|
23
|
+
export { toToolJSONSchema };
|
|
24
|
+
//# sourceMappingURL=json-schema.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"json-schema.js","names":[],"sources":["../../../../src/core/agent/tools/json-schema.ts"],"sourcesContent":["import { toJSONSchema, type z } from \"zod\";\n\n/**\n * Converts a Zod schema to JSON Schema suitable for an LLM tool-call\n * `parameters` field.\n *\n * Wraps `zod`'s `toJSONSchema()` and strips the top-level `$schema` annotation\n * that Zod v4 emits by default (e.g. `\"https://json-schema.org/draft/...\"`).\n * The Databricks Mosaic serving endpoint forwards tool schemas to Google's\n * Gemini `function_declarations` format, which rejects any top-level key it\n * doesn't explicitly recognize — including `$schema` — with a 400\n * `Invalid JSON payload received. Unknown name \"$schema\"` error. Other LLM\n * providers either ignore the field or also trip on it, so stripping here is\n * safe across backends.\n */\nexport function toToolJSONSchema(schema: z.ZodType): Record<string, unknown> {\n const raw = toJSONSchema(schema) as Record<string, unknown>;\n const { $schema: _ignored, ...rest } = raw;\n return rest;\n}\n"],"mappings":";;;;;;;;;;;;;;;;AAeA,SAAgB,iBAAiB,QAA4C;CAE3E,MAAM,EAAE,SAAS,UAAU,GAAG,SADlB,aAAa,OAAO;AAEhC,QAAO"}
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
//#region src/core/agent/tools/sql-policy.ts
|
|
2
|
+
/**
|
|
3
|
+
* Conservative SQL classifier used by agent-facing query tools to enforce
|
|
4
|
+
* `readOnly: true` annotations at execution time.
|
|
5
|
+
*
|
|
6
|
+
* Why a hand-rolled tokenizer rather than `node-sql-parser` or `pgsql-parser`:
|
|
7
|
+
*
|
|
8
|
+
* - `node-sql-parser`'s Hive/Spark dialect coverage rejects common Databricks
|
|
9
|
+
* SQL patterns (three-part `catalog.schema.table` names, `SHOW TABLES IN`,
|
|
10
|
+
* `DESCRIBE EXTENDED`, `EXPLAIN`) that must be allowed by a read-only
|
|
11
|
+
* classifier. Its PostgreSQL grammar rejects `SHOW`/`DESCRIBE` too.
|
|
12
|
+
* - `pgsql-parser` (libpg_query) is a native binding and fails to install
|
|
13
|
+
* cleanly on every Databricks App runtime we care about.
|
|
14
|
+
*
|
|
15
|
+
* We don't need to fully parse SQL — we only need to decide whether every
|
|
16
|
+
* statement in the batch starts with a read-only keyword. A small tokenizer
|
|
17
|
+
* that correctly strips strings, identifiers, and comments is enough and
|
|
18
|
+
* costs no extra dependencies.
|
|
19
|
+
*
|
|
20
|
+
* What this classifier guarantees (when it returns `readOnly: true`):
|
|
21
|
+
*
|
|
22
|
+
* - Every semicolon-separated statement outside a string, identifier, or
|
|
23
|
+
* comment begins with `SELECT`, `WITH`, `SHOW`, `EXPLAIN`, `DESCRIBE`, or
|
|
24
|
+
* `DESC`.
|
|
25
|
+
* - `SELECT 1; DROP TABLE x` is rejected (stacked write detected).
|
|
26
|
+
* - `SELECT 'value; DROP TABLE x'` passes (literal inside a string).
|
|
27
|
+
* - `-- DROP TABLE x\nSELECT 1` passes (comment stripped).
|
|
28
|
+
* - `SELECT 1 <block-comment ; DROP block-comment>` passes (comment stripped).
|
|
29
|
+
*
|
|
30
|
+
* What this classifier does NOT guarantee:
|
|
31
|
+
*
|
|
32
|
+
* - A `SELECT` statement may still have side effects via function calls
|
|
33
|
+
* (`SELECT pg_advisory_lock(...)`, `SELECT lo_import('/etc/passwd')`, CTEs
|
|
34
|
+
* with DML in Postgres 9.1+). Callers that need stronger guarantees should
|
|
35
|
+
* combine this check with a runtime mechanism: for PostgreSQL, execute the
|
|
36
|
+
* statement inside a dedicated client's `BEGIN READ ONLY … ROLLBACK`
|
|
37
|
+
* transaction (see `LakebasePlugin.runReadOnlyStatement`). A batched
|
|
38
|
+
* `pool.query("BEGIN READ ONLY; <stmt>; ROLLBACK")` cannot be used because
|
|
39
|
+
* the Postgres Extended Query protocol rejects multi-statement prepared
|
|
40
|
+
* queries, which silently breaks parameterized SQL.
|
|
41
|
+
*/
|
|
42
|
+
const READ_ONLY_KEYWORDS = new Set([
|
|
43
|
+
"SELECT",
|
|
44
|
+
"WITH",
|
|
45
|
+
"SHOW",
|
|
46
|
+
"EXPLAIN",
|
|
47
|
+
"DESCRIBE",
|
|
48
|
+
"DESC"
|
|
49
|
+
]);
|
|
50
|
+
/**
|
|
51
|
+
* Classify a SQL string as read-only or not. See module docstring for the
|
|
52
|
+
* precise guarantee this offers.
|
|
53
|
+
*/
|
|
54
|
+
function classifyReadOnly(sql) {
|
|
55
|
+
const strip = stripCommentsAndQuoted(sql);
|
|
56
|
+
if (strip.unterminated) return {
|
|
57
|
+
readOnly: false,
|
|
58
|
+
reason: `SQL has an unterminated ${strip.unterminated} literal`
|
|
59
|
+
};
|
|
60
|
+
const statements = splitStatements(strip.cleaned);
|
|
61
|
+
if (statements.length === 0) return {
|
|
62
|
+
readOnly: false,
|
|
63
|
+
reason: "SQL is empty or contains only comments"
|
|
64
|
+
};
|
|
65
|
+
for (let i = 0; i < statements.length; i++) {
|
|
66
|
+
const stmt = statements[i];
|
|
67
|
+
const firstWord = firstKeyword(stmt);
|
|
68
|
+
if (!firstWord) return {
|
|
69
|
+
readOnly: false,
|
|
70
|
+
reason: `statement ${i + 1} of ${statements.length} is empty`
|
|
71
|
+
};
|
|
72
|
+
if (!READ_ONLY_KEYWORDS.has(firstWord.toUpperCase())) return {
|
|
73
|
+
readOnly: false,
|
|
74
|
+
reason: `statement starts with '${firstWord}'; only SELECT, WITH, SHOW, EXPLAIN, DESCRIBE, DESC are allowed in read-only mode`
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
return {
|
|
78
|
+
readOnly: true,
|
|
79
|
+
statements: statements.length
|
|
80
|
+
};
|
|
81
|
+
}
|
|
82
|
+
/**
|
|
83
|
+
* Assert `sql` is read-only or throw {@link ReadOnlySqlViolation}. Suitable
|
|
84
|
+
* for calling from agent-tool handlers where the thrown string surfaces back
|
|
85
|
+
* to the LLM as the tool's error output.
|
|
86
|
+
*/
|
|
87
|
+
function assertReadOnlySql(sql) {
|
|
88
|
+
const result = classifyReadOnly(sql);
|
|
89
|
+
if (!result.readOnly) throw new ReadOnlySqlViolation(result.reason);
|
|
90
|
+
}
|
|
91
|
+
var ReadOnlySqlViolation = class extends Error {
|
|
92
|
+
constructor(reason) {
|
|
93
|
+
super(`SQL read-only policy violation: ${reason}`);
|
|
94
|
+
this.name = "ReadOnlySqlViolation";
|
|
95
|
+
}
|
|
96
|
+
};
|
|
97
|
+
function stripCommentsAndQuoted(sql) {
|
|
98
|
+
const out = [];
|
|
99
|
+
let i = 0;
|
|
100
|
+
const n = sql.length;
|
|
101
|
+
let unterminated = null;
|
|
102
|
+
while (i < n) {
|
|
103
|
+
const ch = sql[i];
|
|
104
|
+
const next = i + 1 < n ? sql[i + 1] : "";
|
|
105
|
+
if (ch === "-" && next === "-") {
|
|
106
|
+
out.push(" ");
|
|
107
|
+
i += 2;
|
|
108
|
+
while (i < n && sql[i] !== "\n") {
|
|
109
|
+
out.push(" ");
|
|
110
|
+
i++;
|
|
111
|
+
}
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
if (ch === "/" && next === "*") {
|
|
115
|
+
out.push(" ");
|
|
116
|
+
i += 2;
|
|
117
|
+
let depth = 1;
|
|
118
|
+
while (i < n && depth > 0) {
|
|
119
|
+
if (sql[i] === "/" && sql[i + 1] === "*") {
|
|
120
|
+
out.push(" ");
|
|
121
|
+
i += 2;
|
|
122
|
+
depth++;
|
|
123
|
+
continue;
|
|
124
|
+
}
|
|
125
|
+
if (sql[i] === "*" && sql[i + 1] === "/") {
|
|
126
|
+
out.push(" ");
|
|
127
|
+
i += 2;
|
|
128
|
+
depth--;
|
|
129
|
+
continue;
|
|
130
|
+
}
|
|
131
|
+
out.push(sql[i] === "\n" ? "\n" : " ");
|
|
132
|
+
i++;
|
|
133
|
+
}
|
|
134
|
+
if (depth > 0) unterminated = "block comment";
|
|
135
|
+
continue;
|
|
136
|
+
}
|
|
137
|
+
if (ch === "'" || ch === "E" && next === "'" || ch === "e" && next === "'") {
|
|
138
|
+
if (ch === "E" || ch === "e") {
|
|
139
|
+
out.push(" ");
|
|
140
|
+
i++;
|
|
141
|
+
}
|
|
142
|
+
out.push(" ");
|
|
143
|
+
i++;
|
|
144
|
+
let closed = false;
|
|
145
|
+
while (i < n) {
|
|
146
|
+
if (sql[i] === "'" && sql[i + 1] === "'") {
|
|
147
|
+
out.push(" ");
|
|
148
|
+
i += 2;
|
|
149
|
+
continue;
|
|
150
|
+
}
|
|
151
|
+
if (sql[i] === "\\" && sql[i + 1]) {
|
|
152
|
+
out.push(" ");
|
|
153
|
+
i += 2;
|
|
154
|
+
continue;
|
|
155
|
+
}
|
|
156
|
+
if (sql[i] === "'") {
|
|
157
|
+
out.push(" ");
|
|
158
|
+
i++;
|
|
159
|
+
closed = true;
|
|
160
|
+
break;
|
|
161
|
+
}
|
|
162
|
+
out.push(sql[i] === "\n" ? "\n" : " ");
|
|
163
|
+
i++;
|
|
164
|
+
}
|
|
165
|
+
if (!closed) unterminated = "string";
|
|
166
|
+
continue;
|
|
167
|
+
}
|
|
168
|
+
if (ch === "\"") {
|
|
169
|
+
out.push(" ");
|
|
170
|
+
i++;
|
|
171
|
+
let closed = false;
|
|
172
|
+
while (i < n) {
|
|
173
|
+
if (sql[i] === "\"" && sql[i + 1] === "\"") {
|
|
174
|
+
out.push(" ");
|
|
175
|
+
i += 2;
|
|
176
|
+
continue;
|
|
177
|
+
}
|
|
178
|
+
if (sql[i] === "\"") {
|
|
179
|
+
out.push(" ");
|
|
180
|
+
i++;
|
|
181
|
+
closed = true;
|
|
182
|
+
break;
|
|
183
|
+
}
|
|
184
|
+
out.push(sql[i] === "\n" ? "\n" : " ");
|
|
185
|
+
i++;
|
|
186
|
+
}
|
|
187
|
+
if (!closed) unterminated = "identifier";
|
|
188
|
+
continue;
|
|
189
|
+
}
|
|
190
|
+
if (ch === "`") {
|
|
191
|
+
out.push(" ");
|
|
192
|
+
i++;
|
|
193
|
+
let closed = false;
|
|
194
|
+
while (i < n) {
|
|
195
|
+
if (sql[i] === "`" && sql[i + 1] === "`") {
|
|
196
|
+
out.push(" ");
|
|
197
|
+
i += 2;
|
|
198
|
+
continue;
|
|
199
|
+
}
|
|
200
|
+
if (sql[i] === "`") {
|
|
201
|
+
out.push(" ");
|
|
202
|
+
i++;
|
|
203
|
+
closed = true;
|
|
204
|
+
break;
|
|
205
|
+
}
|
|
206
|
+
out.push(sql[i] === "\n" ? "\n" : " ");
|
|
207
|
+
i++;
|
|
208
|
+
}
|
|
209
|
+
if (!closed) unterminated = "identifier";
|
|
210
|
+
continue;
|
|
211
|
+
}
|
|
212
|
+
if (ch === "$") {
|
|
213
|
+
const tagMatch = sql.slice(i).match(/^\$([A-Za-z_][A-Za-z0-9_]*)?\$/);
|
|
214
|
+
if (tagMatch) {
|
|
215
|
+
const tag = tagMatch[0];
|
|
216
|
+
out.push(" ".repeat(tag.length));
|
|
217
|
+
i += tag.length;
|
|
218
|
+
const closeIdx = sql.indexOf(tag, i);
|
|
219
|
+
if (closeIdx === -1) {
|
|
220
|
+
while (i < n) {
|
|
221
|
+
out.push(sql[i] === "\n" ? "\n" : " ");
|
|
222
|
+
i++;
|
|
223
|
+
}
|
|
224
|
+
unterminated = "dollar-quoted string";
|
|
225
|
+
} else {
|
|
226
|
+
while (i < closeIdx) {
|
|
227
|
+
out.push(sql[i] === "\n" ? "\n" : " ");
|
|
228
|
+
i++;
|
|
229
|
+
}
|
|
230
|
+
out.push(" ".repeat(tag.length));
|
|
231
|
+
i += tag.length;
|
|
232
|
+
}
|
|
233
|
+
continue;
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
out.push(ch);
|
|
237
|
+
i++;
|
|
238
|
+
}
|
|
239
|
+
return {
|
|
240
|
+
cleaned: out.join(""),
|
|
241
|
+
unterminated
|
|
242
|
+
};
|
|
243
|
+
}
|
|
244
|
+
/** Split on unquoted `;`, trim, drop empty segments. */
|
|
245
|
+
function splitStatements(cleanedSql) {
|
|
246
|
+
return cleanedSql.split(";").map((s) => s.trim()).filter((s) => s.length > 0);
|
|
247
|
+
}
|
|
248
|
+
/** Return the first bareword keyword of a statement, or null if empty. */
|
|
249
|
+
function firstKeyword(stmt) {
|
|
250
|
+
const match = stmt.match(/^\s*([A-Za-z_][A-Za-z0-9_]*)/);
|
|
251
|
+
return match ? match[1] : null;
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
//#endregion
|
|
255
|
+
export { assertReadOnlySql };
|
|
256
|
+
//# sourceMappingURL=sql-policy.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sql-policy.js","names":[],"sources":["../../../../src/core/agent/tools/sql-policy.ts"],"sourcesContent":["/**\n * Conservative SQL classifier used by agent-facing query tools to enforce\n * `readOnly: true` annotations at execution time.\n *\n * Why a hand-rolled tokenizer rather than `node-sql-parser` or `pgsql-parser`:\n *\n * - `node-sql-parser`'s Hive/Spark dialect coverage rejects common Databricks\n * SQL patterns (three-part `catalog.schema.table` names, `SHOW TABLES IN`,\n * `DESCRIBE EXTENDED`, `EXPLAIN`) that must be allowed by a read-only\n * classifier. Its PostgreSQL grammar rejects `SHOW`/`DESCRIBE` too.\n * - `pgsql-parser` (libpg_query) is a native binding and fails to install\n * cleanly on every Databricks App runtime we care about.\n *\n * We don't need to fully parse SQL — we only need to decide whether every\n * statement in the batch starts with a read-only keyword. A small tokenizer\n * that correctly strips strings, identifiers, and comments is enough and\n * costs no extra dependencies.\n *\n * What this classifier guarantees (when it returns `readOnly: true`):\n *\n * - Every semicolon-separated statement outside a string, identifier, or\n * comment begins with `SELECT`, `WITH`, `SHOW`, `EXPLAIN`, `DESCRIBE`, or\n * `DESC`.\n * - `SELECT 1; DROP TABLE x` is rejected (stacked write detected).\n * - `SELECT 'value; DROP TABLE x'` passes (literal inside a string).\n * - `-- DROP TABLE x\\nSELECT 1` passes (comment stripped).\n * - `SELECT 1 <block-comment ; DROP block-comment>` passes (comment stripped).\n *\n * What this classifier does NOT guarantee:\n *\n * - A `SELECT` statement may still have side effects via function calls\n * (`SELECT pg_advisory_lock(...)`, `SELECT lo_import('/etc/passwd')`, CTEs\n * with DML in Postgres 9.1+). Callers that need stronger guarantees should\n * combine this check with a runtime mechanism: for PostgreSQL, execute the\n * statement inside a dedicated client's `BEGIN READ ONLY … ROLLBACK`\n * transaction (see `LakebasePlugin.runReadOnlyStatement`). A batched\n * `pool.query(\"BEGIN READ ONLY; <stmt>; ROLLBACK\")` cannot be used because\n * the Postgres Extended Query protocol rejects multi-statement prepared\n * queries, which silently breaks parameterized SQL.\n */\n\nconst READ_ONLY_KEYWORDS = new Set([\n \"SELECT\",\n \"WITH\",\n \"SHOW\",\n \"EXPLAIN\",\n \"DESCRIBE\",\n \"DESC\",\n]);\n\ntype SqlReadOnlyResult =\n | { readOnly: true; statements: number }\n | { readOnly: false; reason: string };\n\n/**\n * Classify a SQL string as read-only or not. See module docstring for the\n * precise guarantee this offers.\n */\nexport function classifyReadOnly(sql: string): SqlReadOnlyResult {\n const strip = stripCommentsAndQuoted(sql);\n if (strip.unterminated) {\n return {\n readOnly: false,\n reason: `SQL has an unterminated ${strip.unterminated} literal`,\n };\n }\n const statements = splitStatements(strip.cleaned);\n\n if (statements.length === 0) {\n return {\n readOnly: false,\n reason: \"SQL is empty or contains only comments\",\n };\n }\n\n for (let i = 0; i < statements.length; i++) {\n const stmt = statements[i];\n const firstWord = firstKeyword(stmt);\n if (!firstWord) {\n return {\n readOnly: false,\n reason: `statement ${i + 1} of ${statements.length} is empty`,\n };\n }\n if (!READ_ONLY_KEYWORDS.has(firstWord.toUpperCase())) {\n return {\n readOnly: false,\n reason: `statement starts with '${firstWord}'; only SELECT, WITH, SHOW, EXPLAIN, DESCRIBE, DESC are allowed in read-only mode`,\n };\n }\n }\n\n return { readOnly: true, statements: statements.length };\n}\n\n/**\n * Assert `sql` is read-only or throw {@link ReadOnlySqlViolation}. Suitable\n * for calling from agent-tool handlers where the thrown string surfaces back\n * to the LLM as the tool's error output.\n */\nexport function assertReadOnlySql(sql: string): void {\n const result = classifyReadOnly(sql);\n if (!result.readOnly) {\n throw new ReadOnlySqlViolation(result.reason);\n }\n}\n\nexport class ReadOnlySqlViolation extends Error {\n constructor(reason: string) {\n super(`SQL read-only policy violation: ${reason}`);\n this.name = \"ReadOnlySqlViolation\";\n }\n}\n\n// ---------------------------------------------------------------------------\n// Tokenizer helpers\n// ---------------------------------------------------------------------------\n\n/**\n * Walk `sql` character-by-character and replace every string literal,\n * identifier quote, and comment body with a single space of equivalent\n * length. Leaves structural tokens (semicolons, whitespace, identifiers,\n * operators) in place.\n *\n * Handles:\n * - `-- line comments` through end-of-line\n * - SQL block comments (slash-star ... star-slash) with correct nesting (PostgreSQL)\n * - `'single-quoted strings'` with `''` escape\n * - `\"double-quoted identifiers\"` with `\"\"` escape (ANSI)\n * - `` `backtick identifiers` `` (Databricks)\n * - `$tag$dollar quoted$tag$` strings (PostgreSQL)\n * - `E'escape-style'` strings (PostgreSQL)\n */\ntype StripResult = {\n cleaned: string;\n /** Non-null if tokenization ended inside an unterminated literal or comment. */\n unterminated:\n | null\n | \"string\"\n | \"identifier\"\n | \"block comment\"\n | \"dollar-quoted string\";\n};\n\nfunction stripCommentsAndQuoted(sql: string): StripResult {\n const out: string[] = [];\n let i = 0;\n const n = sql.length;\n let unterminated: StripResult[\"unterminated\"] = null;\n\n while (i < n) {\n const ch = sql[i];\n const next = i + 1 < n ? sql[i + 1] : \"\";\n\n if (ch === \"-\" && next === \"-\") {\n out.push(\" \");\n i += 2;\n while (i < n && sql[i] !== \"\\n\") {\n out.push(\" \");\n i++;\n }\n continue;\n }\n\n if (ch === \"/\" && next === \"*\") {\n out.push(\" \");\n i += 2;\n let depth = 1;\n while (i < n && depth > 0) {\n if (sql[i] === \"/\" && sql[i + 1] === \"*\") {\n out.push(\" \");\n i += 2;\n depth++;\n continue;\n }\n if (sql[i] === \"*\" && sql[i + 1] === \"/\") {\n out.push(\" \");\n i += 2;\n depth--;\n continue;\n }\n out.push(sql[i] === \"\\n\" ? \"\\n\" : \" \");\n i++;\n }\n if (depth > 0) {\n unterminated = \"block comment\";\n }\n continue;\n }\n\n if (\n ch === \"'\" ||\n (ch === \"E\" && next === \"'\") ||\n (ch === \"e\" && next === \"'\")\n ) {\n if (ch === \"E\" || ch === \"e\") {\n out.push(\" \");\n i++;\n }\n out.push(\" \");\n i++;\n let closed = false;\n while (i < n) {\n if (sql[i] === \"'\" && sql[i + 1] === \"'\") {\n out.push(\" \");\n i += 2;\n continue;\n }\n if (sql[i] === \"\\\\\" && sql[i + 1]) {\n out.push(\" \");\n i += 2;\n continue;\n }\n if (sql[i] === \"'\") {\n out.push(\" \");\n i++;\n closed = true;\n break;\n }\n out.push(sql[i] === \"\\n\" ? \"\\n\" : \" \");\n i++;\n }\n if (!closed) unterminated = \"string\";\n continue;\n }\n\n if (ch === '\"') {\n out.push(\" \");\n i++;\n let closed = false;\n while (i < n) {\n if (sql[i] === '\"' && sql[i + 1] === '\"') {\n out.push(\" \");\n i += 2;\n continue;\n }\n if (sql[i] === '\"') {\n out.push(\" \");\n i++;\n closed = true;\n break;\n }\n out.push(sql[i] === \"\\n\" ? \"\\n\" : \" \");\n i++;\n }\n if (!closed) unterminated = \"identifier\";\n continue;\n }\n\n if (ch === \"`\") {\n out.push(\" \");\n i++;\n let closed = false;\n while (i < n) {\n if (sql[i] === \"`\" && sql[i + 1] === \"`\") {\n out.push(\" \");\n i += 2;\n continue;\n }\n if (sql[i] === \"`\") {\n out.push(\" \");\n i++;\n closed = true;\n break;\n }\n out.push(sql[i] === \"\\n\" ? \"\\n\" : \" \");\n i++;\n }\n if (!closed) unterminated = \"identifier\";\n continue;\n }\n\n if (ch === \"$\") {\n const tagMatch = sql.slice(i).match(/^\\$([A-Za-z_][A-Za-z0-9_]*)?\\$/);\n if (tagMatch) {\n const tag = tagMatch[0];\n out.push(\" \".repeat(tag.length));\n i += tag.length;\n const closeIdx = sql.indexOf(tag, i);\n if (closeIdx === -1) {\n while (i < n) {\n out.push(sql[i] === \"\\n\" ? \"\\n\" : \" \");\n i++;\n }\n unterminated = \"dollar-quoted string\";\n } else {\n while (i < closeIdx) {\n out.push(sql[i] === \"\\n\" ? \"\\n\" : \" \");\n i++;\n }\n out.push(\" \".repeat(tag.length));\n i += tag.length;\n }\n continue;\n }\n }\n\n out.push(ch);\n i++;\n }\n\n return { cleaned: out.join(\"\"), unterminated };\n}\n\n/** Split on unquoted `;`, trim, drop empty segments. */\nfunction splitStatements(cleanedSql: string): string[] {\n return cleanedSql\n .split(\";\")\n .map((s) => s.trim())\n .filter((s) => s.length > 0);\n}\n\n/** Return the first bareword keyword of a statement, or null if empty. */\nfunction firstKeyword(stmt: string): string | null {\n const match = stmt.match(/^\\s*([A-Za-z_][A-Za-z0-9_]*)/);\n return match ? match[1] : null;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAyCA,MAAM,qBAAqB,IAAI,IAAI;CACjC;CACA;CACA;CACA;CACA;CACA;CACD,CAAC;;;;;AAUF,SAAgB,iBAAiB,KAAgC;CAC/D,MAAM,QAAQ,uBAAuB,IAAI;AACzC,KAAI,MAAM,aACR,QAAO;EACL,UAAU;EACV,QAAQ,2BAA2B,MAAM,aAAa;EACvD;CAEH,MAAM,aAAa,gBAAgB,MAAM,QAAQ;AAEjD,KAAI,WAAW,WAAW,EACxB,QAAO;EACL,UAAU;EACV,QAAQ;EACT;AAGH,MAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK;EAC1C,MAAM,OAAO,WAAW;EACxB,MAAM,YAAY,aAAa,KAAK;AACpC,MAAI,CAAC,UACH,QAAO;GACL,UAAU;GACV,QAAQ,aAAa,IAAI,EAAE,MAAM,WAAW,OAAO;GACpD;AAEH,MAAI,CAAC,mBAAmB,IAAI,UAAU,aAAa,CAAC,CAClD,QAAO;GACL,UAAU;GACV,QAAQ,0BAA0B,UAAU;GAC7C;;AAIL,QAAO;EAAE,UAAU;EAAM,YAAY,WAAW;EAAQ;;;;;;;AAQ1D,SAAgB,kBAAkB,KAAmB;CACnD,MAAM,SAAS,iBAAiB,IAAI;AACpC,KAAI,CAAC,OAAO,SACV,OAAM,IAAI,qBAAqB,OAAO,OAAO;;AAIjD,IAAa,uBAAb,cAA0C,MAAM;CAC9C,YAAY,QAAgB;AAC1B,QAAM,mCAAmC,SAAS;AAClD,OAAK,OAAO;;;AAkChB,SAAS,uBAAuB,KAA0B;CACxD,MAAM,MAAgB,EAAE;CACxB,IAAI,IAAI;CACR,MAAM,IAAI,IAAI;CACd,IAAI,eAA4C;AAEhD,QAAO,IAAI,GAAG;EACZ,MAAM,KAAK,IAAI;EACf,MAAM,OAAO,IAAI,IAAI,IAAI,IAAI,IAAI,KAAK;AAEtC,MAAI,OAAO,OAAO,SAAS,KAAK;AAC9B,OAAI,KAAK,KAAK;AACd,QAAK;AACL,UAAO,IAAI,KAAK,IAAI,OAAO,MAAM;AAC/B,QAAI,KAAK,IAAI;AACb;;AAEF;;AAGF,MAAI,OAAO,OAAO,SAAS,KAAK;AAC9B,OAAI,KAAK,KAAK;AACd,QAAK;GACL,IAAI,QAAQ;AACZ,UAAO,IAAI,KAAK,QAAQ,GAAG;AACzB,QAAI,IAAI,OAAO,OAAO,IAAI,IAAI,OAAO,KAAK;AACxC,SAAI,KAAK,KAAK;AACd,UAAK;AACL;AACA;;AAEF,QAAI,IAAI,OAAO,OAAO,IAAI,IAAI,OAAO,KAAK;AACxC,SAAI,KAAK,KAAK;AACd,UAAK;AACL;AACA;;AAEF,QAAI,KAAK,IAAI,OAAO,OAAO,OAAO,IAAI;AACtC;;AAEF,OAAI,QAAQ,EACV,gBAAe;AAEjB;;AAGF,MACE,OAAO,OACN,OAAO,OAAO,SAAS,OACvB,OAAO,OAAO,SAAS,KACxB;AACA,OAAI,OAAO,OAAO,OAAO,KAAK;AAC5B,QAAI,KAAK,IAAI;AACb;;AAEF,OAAI,KAAK,IAAI;AACb;GACA,IAAI,SAAS;AACb,UAAO,IAAI,GAAG;AACZ,QAAI,IAAI,OAAO,OAAO,IAAI,IAAI,OAAO,KAAK;AACxC,SAAI,KAAK,KAAK;AACd,UAAK;AACL;;AAEF,QAAI,IAAI,OAAO,QAAQ,IAAI,IAAI,IAAI;AACjC,SAAI,KAAK,KAAK;AACd,UAAK;AACL;;AAEF,QAAI,IAAI,OAAO,KAAK;AAClB,SAAI,KAAK,IAAI;AACb;AACA,cAAS;AACT;;AAEF,QAAI,KAAK,IAAI,OAAO,OAAO,OAAO,IAAI;AACtC;;AAEF,OAAI,CAAC,OAAQ,gBAAe;AAC5B;;AAGF,MAAI,OAAO,MAAK;AACd,OAAI,KAAK,IAAI;AACb;GACA,IAAI,SAAS;AACb,UAAO,IAAI,GAAG;AACZ,QAAI,IAAI,OAAO,QAAO,IAAI,IAAI,OAAO,MAAK;AACxC,SAAI,KAAK,KAAK;AACd,UAAK;AACL;;AAEF,QAAI,IAAI,OAAO,MAAK;AAClB,SAAI,KAAK,IAAI;AACb;AACA,cAAS;AACT;;AAEF,QAAI,KAAK,IAAI,OAAO,OAAO,OAAO,IAAI;AACtC;;AAEF,OAAI,CAAC,OAAQ,gBAAe;AAC5B;;AAGF,MAAI,OAAO,KAAK;AACd,OAAI,KAAK,IAAI;AACb;GACA,IAAI,SAAS;AACb,UAAO,IAAI,GAAG;AACZ,QAAI,IAAI,OAAO,OAAO,IAAI,IAAI,OAAO,KAAK;AACxC,SAAI,KAAK,KAAK;AACd,UAAK;AACL;;AAEF,QAAI,IAAI,OAAO,KAAK;AAClB,SAAI,KAAK,IAAI;AACb;AACA,cAAS;AACT;;AAEF,QAAI,KAAK,IAAI,OAAO,OAAO,OAAO,IAAI;AACtC;;AAEF,OAAI,CAAC,OAAQ,gBAAe;AAC5B;;AAGF,MAAI,OAAO,KAAK;GACd,MAAM,WAAW,IAAI,MAAM,EAAE,CAAC,MAAM,iCAAiC;AACrE,OAAI,UAAU;IACZ,MAAM,MAAM,SAAS;AACrB,QAAI,KAAK,IAAI,OAAO,IAAI,OAAO,CAAC;AAChC,SAAK,IAAI;IACT,MAAM,WAAW,IAAI,QAAQ,KAAK,EAAE;AACpC,QAAI,aAAa,IAAI;AACnB,YAAO,IAAI,GAAG;AACZ,UAAI,KAAK,IAAI,OAAO,OAAO,OAAO,IAAI;AACtC;;AAEF,oBAAe;WACV;AACL,YAAO,IAAI,UAAU;AACnB,UAAI,KAAK,IAAI,OAAO,OAAO,OAAO,IAAI;AACtC;;AAEF,SAAI,KAAK,IAAI,OAAO,IAAI,OAAO,CAAC;AAChC,UAAK,IAAI;;AAEX;;;AAIJ,MAAI,KAAK,GAAG;AACZ;;AAGF,QAAO;EAAE,SAAS,IAAI,KAAK,GAAG;EAAE;EAAc;;;AAIhD,SAAS,gBAAgB,YAA8B;AACrD,QAAO,WACJ,MAAM,IAAI,CACV,KAAK,MAAM,EAAE,MAAM,CAAC,CACpB,QAAQ,MAAM,EAAE,SAAS,EAAE;;;AAIhC,SAAS,aAAa,MAA6B;CACjD,MAAM,QAAQ,KAAK,MAAM,+BAA+B;AACxD,QAAO,QAAQ,MAAM,KAAK"}
|