@aigne/afs-cli 1.11.0-beta.4 → 1.11.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/README.md +16 -1
  2. package/dist/cli.cjs +53 -20
  3. package/dist/cli.mjs +54 -21
  4. package/dist/cli.mjs.map +1 -1
  5. package/dist/commands/exec.cjs +132 -14
  6. package/dist/commands/exec.mjs +129 -14
  7. package/dist/commands/exec.mjs.map +1 -1
  8. package/dist/commands/explain.cjs +1 -1
  9. package/dist/commands/explain.mjs +1 -1
  10. package/dist/commands/explain.mjs.map +1 -1
  11. package/dist/commands/index.mjs +1 -1
  12. package/dist/commands/ls.cjs +129 -30
  13. package/dist/commands/ls.mjs +129 -30
  14. package/dist/commands/ls.mjs.map +1 -1
  15. package/dist/commands/mount.cjs +2 -1
  16. package/dist/commands/mount.mjs +2 -1
  17. package/dist/commands/mount.mjs.map +1 -1
  18. package/dist/commands/read.cjs +213 -14
  19. package/dist/commands/read.mjs +213 -14
  20. package/dist/commands/read.mjs.map +1 -1
  21. package/dist/commands/serve.cjs +3 -1
  22. package/dist/commands/serve.mjs +3 -1
  23. package/dist/commands/serve.mjs.map +1 -1
  24. package/dist/commands/stat.cjs +116 -34
  25. package/dist/commands/stat.mjs +117 -34
  26. package/dist/commands/stat.mjs.map +1 -1
  27. package/dist/commands/write.cjs +37 -4
  28. package/dist/commands/write.mjs +38 -4
  29. package/dist/commands/write.mjs.map +1 -1
  30. package/dist/config/loader.cjs +33 -13
  31. package/dist/config/loader.mjs +33 -13
  32. package/dist/config/loader.mjs.map +1 -1
  33. package/dist/config/provider-factory.cjs +311 -3
  34. package/dist/config/provider-factory.mjs +311 -3
  35. package/dist/config/provider-factory.mjs.map +1 -1
  36. package/dist/config/schema.cjs +3 -1
  37. package/dist/config/schema.mjs +3 -1
  38. package/dist/config/schema.mjs.map +1 -1
  39. package/dist/config/uri-parser.cjs +195 -2
  40. package/dist/config/uri-parser.mjs +195 -2
  41. package/dist/config/uri-parser.mjs.map +1 -1
  42. package/dist/explorer/actions.cjs +53 -23
  43. package/dist/explorer/actions.mjs +54 -23
  44. package/dist/explorer/actions.mjs.map +1 -1
  45. package/dist/explorer/components/dialog.cjs +163 -10
  46. package/dist/explorer/components/dialog.mjs +163 -10
  47. package/dist/explorer/components/dialog.mjs.map +1 -1
  48. package/dist/explorer/components/file-list.mjs.map +1 -1
  49. package/dist/explorer/components/metadata-panel.cjs +39 -25
  50. package/dist/explorer/components/metadata-panel.mjs +39 -25
  51. package/dist/explorer/components/metadata-panel.mjs.map +1 -1
  52. package/dist/explorer/screen.cjs +23 -8
  53. package/dist/explorer/screen.mjs +24 -9
  54. package/dist/explorer/screen.mjs.map +1 -1
  55. package/dist/explorer/theme.cjs +3 -1
  56. package/dist/explorer/theme.mjs +3 -1
  57. package/dist/explorer/theme.mjs.map +1 -1
  58. package/dist/path-utils.cjs +2 -1
  59. package/dist/path-utils.mjs +1 -1
  60. package/dist/runtime.cjs +24 -0
  61. package/dist/runtime.mjs +24 -0
  62. package/dist/runtime.mjs.map +1 -1
  63. package/dist/ui/header.cjs +0 -9
  64. package/dist/ui/header.mjs +1 -9
  65. package/dist/ui/header.mjs.map +1 -1
  66. package/dist/ui/index.cjs +0 -2
  67. package/dist/ui/index.mjs +2 -3
  68. package/dist/ui/index.mjs.map +1 -1
  69. package/dist/utils/meta.cjs +51 -0
  70. package/dist/utils/meta.mjs +49 -0
  71. package/dist/utils/meta.mjs.map +1 -0
  72. package/package.json +19 -9
@@ -1 +1 @@
1
- {"version":3,"file":"loader.mjs","names":[],"sources":["../../src/config/loader.ts"],"sourcesContent":["import { access, readFile } from \"node:fs/promises\";\nimport { homedir } from \"node:os\";\nimport { dirname, join } from \"node:path\";\nimport { parse } from \"smol-toml\";\nimport { resolveEnvVarsInObject } from \"./env.js\";\nimport { type AFSConfig, ConfigSchema, type MountConfig, type ServeConfig } from \"./schema.js\";\n\nexport const CONFIG_DIR_NAME = \".afs-config\";\nexport const CONFIG_FILE_NAME = \"config.toml\";\n\nexport interface ConfigLoaderOptions {\n /** Custom path to user-level config directory (for testing) */\n userConfigDir?: string;\n}\n\n/**\n * Loads and merges AFS configuration from multiple layers\n *\n * Layer priority (lowest to highest):\n * 1. User-level: ~/.afs-config/config.toml\n * 2. All intermediate directories from project root to cwd\n *\n * Example: if cwd is /project/packages/cli, configs are merged from:\n * ~/.afs-config/config.toml (user)\n * /project/.afs-config/config.toml (project root, has .git)\n * /project/packages/.afs-config/config.toml (intermediate)\n * /project/packages/cli/.afs-config/config.toml (cwd)\n */\nexport class ConfigLoader {\n private userConfigDir: string;\n\n constructor(options: ConfigLoaderOptions = {}) {\n this.userConfigDir = options.userConfigDir ?? join(homedir(), CONFIG_DIR_NAME);\n }\n\n /**\n * Load and merge configuration from all layers\n *\n * @param cwd - Current working directory (defaults to process.cwd())\n * @returns Merged configuration\n * @throws Error on invalid config, TOML parse error, or duplicate mount paths\n */\n async load(cwd: string = process.cwd()): Promise<AFSConfig> {\n const configPaths = await this.getConfigPaths(cwd);\n const configs: AFSConfig[] = [];\n\n for (const configPath of configPaths) {\n const config = await this.loadSingleConfig(configPath);\n configs.push(config);\n }\n\n return this.mergeConfigs(configs);\n }\n\n /**\n * Get paths to all existing config files\n *\n * Collects configs from:\n * 1. User-level: ~/.afs-config/config.toml\n * 2. Project root (or topmost .afs-config dir) to cwd: all .afs-config/config.toml files\n */\n async getConfigPaths(cwd: string = process.cwd()): Promise<string[]> {\n const paths: string[] = [];\n\n // 1. User-level config\n const userConfigPath = join(this.userConfigDir, CONFIG_FILE_NAME);\n if (await this.fileExists(userConfigPath)) {\n paths.push(userConfigPath);\n }\n\n // 2. Find project root (look for .git going up)\n const projectRoot = await this.findProjectRoot(cwd);\n\n // 3. Determine start directory\n // If project root found, use it; otherwise find topmost .afs-config directory\n const startDir = projectRoot ?? (await this.findTopmostAfsDir(cwd)) ?? cwd;\n\n // 4. Collect all config files from start to cwd\n const intermediatePaths = await this.collectConfigsFromTo(startDir, cwd);\n paths.push(...intermediatePaths);\n\n return paths;\n }\n\n /**\n * Find the topmost directory containing .afs-config from startDir going up\n */\n private async findTopmostAfsDir(startDir: string): Promise<string | null> {\n let currentDir = startDir;\n let topmostAfsDir: string | null = null;\n\n while (true) {\n if (await this.fileExists(join(currentDir, CONFIG_DIR_NAME))) {\n topmostAfsDir = currentDir;\n }\n\n const parentDir = dirname(currentDir);\n if (parentDir === currentDir) {\n // Reached filesystem root\n break;\n }\n currentDir = parentDir;\n }\n\n return topmostAfsDir;\n }\n\n /**\n * Collect all config files from startDir to endDir (inclusive)\n * Returns paths in order from startDir to endDir (parent to child)\n */\n private async collectConfigsFromTo(startDir: string, endDir: string): Promise<string[]> {\n const paths: string[] = [];\n\n // Build list of directories from startDir to endDir\n const dirs: string[] = [];\n let current = endDir;\n\n while (true) {\n dirs.unshift(current); // prepend to maintain parent-to-child order\n\n if (current === startDir) {\n break;\n }\n\n const parent = dirname(current);\n if (parent === current) {\n // Reached filesystem root without finding startDir\n // This shouldn't happen if startDir is an ancestor of endDir\n break;\n }\n current = parent;\n }\n\n // Check each directory for config file\n for (const dir of dirs) {\n const configPath = join(dir, CONFIG_DIR_NAME, CONFIG_FILE_NAME);\n if (await this.fileExists(configPath)) {\n paths.push(configPath);\n }\n }\n\n return paths;\n }\n\n /**\n * Load a single config file\n */\n private async loadSingleConfig(configPath: string): Promise<AFSConfig> {\n const content = await readFile(configPath, \"utf-8\");\n\n let parsed: unknown;\n try {\n parsed = parse(content);\n } catch (error) {\n throw new Error(\n `Failed to parse TOML config at ${configPath}: ${error instanceof Error ? error.message : String(error)}`,\n );\n }\n\n // Resolve environment variables\n const resolved = resolveEnvVarsInObject(parsed);\n\n // Validate against schema\n const result = ConfigSchema.safeParse(resolved);\n if (!result.success) {\n const errors = result.error.errors.map((e) => `${e.path.join(\".\")}: ${e.message}`).join(\"; \");\n throw new Error(`Invalid config at ${configPath}: ${errors}`);\n }\n\n return result.data;\n }\n\n /**\n * Create a composite key for namespace+path duplicate detection\n * Uses empty string for undefined namespace (default namespace)\n */\n private makeNamespacePathKey(namespace: string | undefined, path: string): string {\n return `${namespace ?? \"\"}:${path}`;\n }\n\n /**\n * Merge multiple configs, checking for duplicate mount paths within same namespace\n * For serve config, later (more specific) configs override earlier ones\n */\n private mergeConfigs(configs: AFSConfig[]): AFSConfig {\n const allMounts: MountConfig[] = [];\n // key = \"namespace:path\", value = uri for error message\n const seenPaths = new Map<string, string>();\n let mergedServe: ServeConfig | undefined;\n\n for (const config of configs) {\n // Merge mounts\n for (const mount of config.mounts) {\n const key = this.makeNamespacePathKey(mount.namespace, mount.path);\n if (seenPaths.has(key)) {\n const nsLabel = mount.namespace ? `namespace '${mount.namespace}'` : \"default namespace\";\n throw new Error(\n `Duplicate mount path \"${mount.path}\" in ${nsLabel} found in configuration. ` +\n `Mount paths must be unique within each namespace.`,\n );\n }\n seenPaths.set(key, mount.uri);\n allMounts.push(mount);\n }\n\n // Merge serve config (later configs override earlier ones)\n if (config.serve) {\n mergedServe = mergedServe ? { ...mergedServe, ...config.serve } : config.serve;\n }\n }\n\n return { mounts: allMounts, serve: mergedServe };\n }\n\n /**\n * Find project root by looking for .git\n * Note: Only .git is used as project root marker, not .afs-config,\n * because .afs-config can exist at multiple levels for hierarchical config\n */\n private async findProjectRoot(startDir: string): Promise<string | null> {\n let currentDir = startDir;\n\n while (true) {\n // Check for .git directory\n if (await this.fileExists(join(currentDir, \".git\"))) {\n return currentDir;\n }\n\n const parentDir = dirname(currentDir);\n if (parentDir === currentDir) {\n // Reached filesystem root\n return null;\n }\n currentDir = parentDir;\n }\n }\n\n /**\n * Check if a file or directory exists\n */\n private async fileExists(path: string): Promise<boolean> {\n try {\n await access(path);\n return true;\n } catch {\n return false;\n }\n }\n}\n\n// Default singleton instance\nexport const configLoader = new ConfigLoader();\n"],"mappings":";;;;;;;;AAOA,MAAa,kBAAkB;AAC/B,MAAa,mBAAmB;;;;;;;;;;;;;;AAoBhC,IAAa,eAAb,MAA0B;CACxB,AAAQ;CAER,YAAY,UAA+B,EAAE,EAAE;AAC7C,OAAK,gBAAgB,QAAQ,iBAAiB,KAAK,SAAS,EAAE,gBAAgB;;;;;;;;;CAUhF,MAAM,KAAK,MAAc,QAAQ,KAAK,EAAsB;EAC1D,MAAM,cAAc,MAAM,KAAK,eAAe,IAAI;EAClD,MAAM,UAAuB,EAAE;AAE/B,OAAK,MAAM,cAAc,aAAa;GACpC,MAAM,SAAS,MAAM,KAAK,iBAAiB,WAAW;AACtD,WAAQ,KAAK,OAAO;;AAGtB,SAAO,KAAK,aAAa,QAAQ;;;;;;;;;CAUnC,MAAM,eAAe,MAAc,QAAQ,KAAK,EAAqB;EACnE,MAAM,QAAkB,EAAE;EAG1B,MAAM,iBAAiB,KAAK,KAAK,eAAe,iBAAiB;AACjE,MAAI,MAAM,KAAK,WAAW,eAAe,CACvC,OAAM,KAAK,eAAe;EAQ5B,MAAM,WAJc,MAAM,KAAK,gBAAgB,IAAI,IAIlB,MAAM,KAAK,kBAAkB,IAAI,IAAK;EAGvE,MAAM,oBAAoB,MAAM,KAAK,qBAAqB,UAAU,IAAI;AACxE,QAAM,KAAK,GAAG,kBAAkB;AAEhC,SAAO;;;;;CAMT,MAAc,kBAAkB,UAA0C;EACxE,IAAI,aAAa;EACjB,IAAI,gBAA+B;AAEnC,SAAO,MAAM;AACX,OAAI,MAAM,KAAK,WAAW,KAAK,YAAY,gBAAgB,CAAC,CAC1D,iBAAgB;GAGlB,MAAM,YAAY,QAAQ,WAAW;AACrC,OAAI,cAAc,WAEhB;AAEF,gBAAa;;AAGf,SAAO;;;;;;CAOT,MAAc,qBAAqB,UAAkB,QAAmC;EACtF,MAAM,QAAkB,EAAE;EAG1B,MAAM,OAAiB,EAAE;EACzB,IAAI,UAAU;AAEd,SAAO,MAAM;AACX,QAAK,QAAQ,QAAQ;AAErB,OAAI,YAAY,SACd;GAGF,MAAM,SAAS,QAAQ,QAAQ;AAC/B,OAAI,WAAW,QAGb;AAEF,aAAU;;AAIZ,OAAK,MAAM,OAAO,MAAM;GACtB,MAAM,aAAa,KAAK,KAAK,iBAAiB,iBAAiB;AAC/D,OAAI,MAAM,KAAK,WAAW,WAAW,CACnC,OAAM,KAAK,WAAW;;AAI1B,SAAO;;;;;CAMT,MAAc,iBAAiB,YAAwC;EACrE,MAAM,UAAU,MAAM,SAAS,YAAY,QAAQ;EAEnD,IAAI;AACJ,MAAI;AACF,YAAS,MAAM,QAAQ;WAChB,OAAO;AACd,SAAM,IAAI,MACR,kCAAkC,WAAW,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,GACxG;;EAIH,MAAM,WAAW,uBAAuB,OAAO;EAG/C,MAAM,SAAS,aAAa,UAAU,SAAS;AAC/C,MAAI,CAAC,OAAO,SAAS;GACnB,MAAM,SAAS,OAAO,MAAM,OAAO,KAAK,MAAM,GAAG,EAAE,KAAK,KAAK,IAAI,CAAC,IAAI,EAAE,UAAU,CAAC,KAAK,KAAK;AAC7F,SAAM,IAAI,MAAM,qBAAqB,WAAW,IAAI,SAAS;;AAG/D,SAAO,OAAO;;;;;;CAOhB,AAAQ,qBAAqB,WAA+B,MAAsB;AAChF,SAAO,GAAG,aAAa,GAAG,GAAG;;;;;;CAO/B,AAAQ,aAAa,SAAiC;EACpD,MAAM,YAA2B,EAAE;EAEnC,MAAM,4BAAY,IAAI,KAAqB;EAC3C,IAAI;AAEJ,OAAK,MAAM,UAAU,SAAS;AAE5B,QAAK,MAAM,SAAS,OAAO,QAAQ;IACjC,MAAM,MAAM,KAAK,qBAAqB,MAAM,WAAW,MAAM,KAAK;AAClE,QAAI,UAAU,IAAI,IAAI,EAAE;KACtB,MAAM,UAAU,MAAM,YAAY,cAAc,MAAM,UAAU,KAAK;AACrE,WAAM,IAAI,MACR,yBAAyB,MAAM,KAAK,OAAO,QAAQ,4EAEpD;;AAEH,cAAU,IAAI,KAAK,MAAM,IAAI;AAC7B,cAAU,KAAK,MAAM;;AAIvB,OAAI,OAAO,MACT,eAAc,cAAc;IAAE,GAAG;IAAa,GAAG,OAAO;IAAO,GAAG,OAAO;;AAI7E,SAAO;GAAE,QAAQ;GAAW,OAAO;GAAa;;;;;;;CAQlD,MAAc,gBAAgB,UAA0C;EACtE,IAAI,aAAa;AAEjB,SAAO,MAAM;AAEX,OAAI,MAAM,KAAK,WAAW,KAAK,YAAY,OAAO,CAAC,CACjD,QAAO;GAGT,MAAM,YAAY,QAAQ,WAAW;AACrC,OAAI,cAAc,WAEhB,QAAO;AAET,gBAAa;;;;;;CAOjB,MAAc,WAAW,MAAgC;AACvD,MAAI;AACF,SAAM,OAAO,KAAK;AAClB,UAAO;UACD;AACN,UAAO;;;;AAMb,MAAa,eAAe,IAAI,cAAc"}
1
+ {"version":3,"file":"loader.mjs","names":[],"sources":["../../src/config/loader.ts"],"sourcesContent":["import { access, readFile } from \"node:fs/promises\";\nimport { homedir } from \"node:os\";\nimport { dirname, join } from \"node:path\";\nimport { parse } from \"smol-toml\";\nimport { resolveEnvVarsInObject } from \"./env.js\";\nimport { type AFSConfig, ConfigSchema, type MountConfig, type ServeConfig } from \"./schema.js\";\n\nexport const CONFIG_DIR_NAME = \".afs-config\";\nexport const CONFIG_FILE_NAME = \"config.toml\";\n\nexport interface ConfigLoaderOptions {\n /** Custom path to user-level config directory (for testing) */\n userConfigDir?: string;\n}\n\n/**\n * Environment variable to override user config directory.\n * Useful for testing to isolate from real user config.\n */\nexport const AFS_USER_CONFIG_DIR_ENV = \"AFS_USER_CONFIG_DIR\";\n\n/**\n * Loads and merges AFS configuration from multiple layers\n *\n * Layer priority (lowest to highest):\n * 1. User-level: ~/.afs-config/config.toml\n * 2. All intermediate directories from project root to cwd\n *\n * Example: if cwd is /project/packages/cli, configs are merged from:\n * ~/.afs-config/config.toml (user)\n * /project/.afs-config/config.toml (project root, has .git)\n * /project/packages/.afs-config/config.toml (intermediate)\n * /project/packages/cli/.afs-config/config.toml (cwd)\n */\nexport class ConfigLoader {\n private userConfigDir: string;\n\n constructor(options: ConfigLoaderOptions = {}) {\n // Priority: options > environment variable > default (~/.afs-config)\n this.userConfigDir =\n options.userConfigDir ??\n process.env[AFS_USER_CONFIG_DIR_ENV] ??\n join(homedir(), CONFIG_DIR_NAME);\n }\n\n /**\n * Load and merge configuration from all layers\n *\n * @param cwd - Current working directory (defaults to process.cwd())\n * @returns Merged configuration\n * @throws Error on invalid config, TOML parse error, or duplicate mount paths\n */\n async load(cwd: string = process.cwd()): Promise<AFSConfig> {\n const configPaths = await this.getConfigPaths(cwd);\n const configs: AFSConfig[] = [];\n\n for (const configPath of configPaths) {\n const config = await this.loadSingleConfig(configPath);\n configs.push(config);\n }\n\n return this.mergeConfigs(configs);\n }\n\n /**\n * Get paths to all existing config files\n *\n * Collects configs from:\n * 1. User-level: ~/.afs-config/config.toml\n * 2. Project root (or topmost .afs-config dir) to cwd: all .afs-config/config.toml files\n */\n async getConfigPaths(cwd: string = process.cwd()): Promise<string[]> {\n const paths: string[] = [];\n\n // 1. User-level config\n const userConfigPath = join(this.userConfigDir, CONFIG_FILE_NAME);\n if (await this.fileExists(userConfigPath)) {\n paths.push(userConfigPath);\n }\n\n // 2. Find project root (look for .git going up)\n const projectRoot = await this.findProjectRoot(cwd);\n\n // 3. Determine start directory\n // If project root found, use it; otherwise find topmost .afs-config directory\n const startDir = projectRoot ?? (await this.findTopmostAfsDir(cwd)) ?? cwd;\n\n // 4. Collect all config files from start to cwd\n // Exclude user config directory to avoid loading it twice\n const intermediatePaths = await this.collectConfigsFromTo(startDir, cwd, this.userConfigDir);\n paths.push(...intermediatePaths);\n\n return paths;\n }\n\n /**\n * Find the topmost directory containing .afs-config from startDir going up\n */\n private async findTopmostAfsDir(startDir: string): Promise<string | null> {\n let currentDir = startDir;\n let topmostAfsDir: string | null = null;\n\n while (true) {\n if (await this.fileExists(join(currentDir, CONFIG_DIR_NAME))) {\n topmostAfsDir = currentDir;\n }\n\n const parentDir = dirname(currentDir);\n if (parentDir === currentDir) {\n // Reached filesystem root\n break;\n }\n currentDir = parentDir;\n }\n\n return topmostAfsDir;\n }\n\n /**\n * Collect all config files from startDir to endDir (inclusive)\n * Returns paths in order from startDir to endDir (parent to child)\n *\n * @param excludeConfigDir - Optional config directory to exclude (to avoid duplicates)\n */\n private async collectConfigsFromTo(\n startDir: string,\n endDir: string,\n excludeConfigDir?: string,\n ): Promise<string[]> {\n const paths: string[] = [];\n\n // Build list of directories from startDir to endDir\n const dirs: string[] = [];\n let current = endDir;\n\n while (true) {\n dirs.unshift(current); // prepend to maintain parent-to-child order\n\n if (current === startDir) {\n break;\n }\n\n const parent = dirname(current);\n if (parent === current) {\n // Reached filesystem root without finding startDir\n // This shouldn't happen if startDir is an ancestor of endDir\n break;\n }\n current = parent;\n }\n\n // Check each directory for config file\n for (const dir of dirs) {\n const configDir = join(dir, CONFIG_DIR_NAME);\n // Skip if this is the excluded config directory (e.g., user config already loaded)\n if (excludeConfigDir && configDir === excludeConfigDir) {\n continue;\n }\n const configPath = join(configDir, CONFIG_FILE_NAME);\n if (await this.fileExists(configPath)) {\n paths.push(configPath);\n }\n }\n\n return paths;\n }\n\n /**\n * Load a single config file\n */\n private async loadSingleConfig(configPath: string): Promise<AFSConfig> {\n const content = await readFile(configPath, \"utf-8\");\n\n let parsed: unknown;\n try {\n parsed = parse(content);\n } catch (error) {\n throw new Error(\n `Failed to parse TOML config at ${configPath}: ${error instanceof Error ? error.message : String(error)}`,\n );\n }\n\n // Resolve environment variables with friendly error messages\n let resolved: unknown;\n try {\n resolved = resolveEnvVarsInObject(parsed);\n } catch (error) {\n const message = error instanceof Error ? error.message : String(error);\n // Extract variable name from error message like \"Environment variable GITHUB_TOKEN is not defined\"\n const match = message.match(/Environment variable (\\w+) is not defined/);\n if (match) {\n const varName = match[1];\n throw new Error(\n `Missing environment variable ${varName} in ${configPath}.\\n` +\n ` Set it in your shell: export ${varName}=your_value\\n` +\n ` Or add to .env file: ${varName}=your_value`,\n );\n }\n throw new Error(`Failed to resolve environment variables in ${configPath}: ${message}`);\n }\n\n // Validate against schema\n const result = ConfigSchema.safeParse(resolved);\n if (!result.success) {\n const errors = result.error.errors.map((e) => `${e.path.join(\".\")}: ${e.message}`).join(\"; \");\n throw new Error(`Invalid config at ${configPath}: ${errors}`);\n }\n\n return result.data;\n }\n\n /**\n * Create a composite key for namespace+path duplicate detection\n * Uses empty string for undefined namespace (default namespace)\n */\n private makeNamespacePathKey(namespace: string | undefined, path: string): string {\n return `${namespace ?? \"\"}:${path}`;\n }\n\n /**\n * Merge multiple configs with child configs overriding parent configs\n * For both mounts and serve, later (more specific) configs override earlier ones\n */\n private mergeConfigs(configs: AFSConfig[]): AFSConfig {\n // key = \"namespace:path\", value = index in allMounts array\n const mountIndexByKey = new Map<string, number>();\n const allMounts: MountConfig[] = [];\n let mergedServe: ServeConfig | undefined;\n\n for (const config of configs) {\n // Merge mounts - later configs override earlier ones with same namespace+path\n for (const mount of config.mounts) {\n const key = this.makeNamespacePathKey(mount.namespace, mount.path);\n const existingIndex = mountIndexByKey.get(key);\n if (existingIndex !== undefined) {\n // Override existing mount with the new one (child overrides parent)\n allMounts[existingIndex] = mount;\n } else {\n // Add new mount\n mountIndexByKey.set(key, allMounts.length);\n allMounts.push(mount);\n }\n }\n\n // Merge serve config (later configs override earlier ones)\n if (config.serve) {\n mergedServe = mergedServe ? { ...mergedServe, ...config.serve } : config.serve;\n }\n }\n\n return { mounts: allMounts, serve: mergedServe };\n }\n\n /**\n * Find project root by looking for .git\n * Note: Only .git is used as project root marker, not .afs-config,\n * because .afs-config can exist at multiple levels for hierarchical config\n */\n private async findProjectRoot(startDir: string): Promise<string | null> {\n let currentDir = startDir;\n\n while (true) {\n // Check for .git directory\n if (await this.fileExists(join(currentDir, \".git\"))) {\n return currentDir;\n }\n\n const parentDir = dirname(currentDir);\n if (parentDir === currentDir) {\n // Reached filesystem root\n return null;\n }\n currentDir = parentDir;\n }\n }\n\n /**\n * Check if a file or directory exists\n */\n private async fileExists(path: string): Promise<boolean> {\n try {\n await access(path);\n return true;\n } catch {\n return false;\n }\n }\n}\n\n// Default singleton instance\nexport const configLoader = new ConfigLoader();\n"],"mappings":";;;;;;;;AAOA,MAAa,kBAAkB;AAC/B,MAAa,mBAAmB;;;;;AAWhC,MAAa,0BAA0B;;;;;;;;;;;;;;AAevC,IAAa,eAAb,MAA0B;CACxB,AAAQ;CAER,YAAY,UAA+B,EAAE,EAAE;AAE7C,OAAK,gBACH,QAAQ,iBACR,QAAQ,IAAI,4BACZ,KAAK,SAAS,EAAE,gBAAgB;;;;;;;;;CAUpC,MAAM,KAAK,MAAc,QAAQ,KAAK,EAAsB;EAC1D,MAAM,cAAc,MAAM,KAAK,eAAe,IAAI;EAClD,MAAM,UAAuB,EAAE;AAE/B,OAAK,MAAM,cAAc,aAAa;GACpC,MAAM,SAAS,MAAM,KAAK,iBAAiB,WAAW;AACtD,WAAQ,KAAK,OAAO;;AAGtB,SAAO,KAAK,aAAa,QAAQ;;;;;;;;;CAUnC,MAAM,eAAe,MAAc,QAAQ,KAAK,EAAqB;EACnE,MAAM,QAAkB,EAAE;EAG1B,MAAM,iBAAiB,KAAK,KAAK,eAAe,iBAAiB;AACjE,MAAI,MAAM,KAAK,WAAW,eAAe,CACvC,OAAM,KAAK,eAAe;EAQ5B,MAAM,WAJc,MAAM,KAAK,gBAAgB,IAAI,IAIlB,MAAM,KAAK,kBAAkB,IAAI,IAAK;EAIvE,MAAM,oBAAoB,MAAM,KAAK,qBAAqB,UAAU,KAAK,KAAK,cAAc;AAC5F,QAAM,KAAK,GAAG,kBAAkB;AAEhC,SAAO;;;;;CAMT,MAAc,kBAAkB,UAA0C;EACxE,IAAI,aAAa;EACjB,IAAI,gBAA+B;AAEnC,SAAO,MAAM;AACX,OAAI,MAAM,KAAK,WAAW,KAAK,YAAY,gBAAgB,CAAC,CAC1D,iBAAgB;GAGlB,MAAM,YAAY,QAAQ,WAAW;AACrC,OAAI,cAAc,WAEhB;AAEF,gBAAa;;AAGf,SAAO;;;;;;;;CAST,MAAc,qBACZ,UACA,QACA,kBACmB;EACnB,MAAM,QAAkB,EAAE;EAG1B,MAAM,OAAiB,EAAE;EACzB,IAAI,UAAU;AAEd,SAAO,MAAM;AACX,QAAK,QAAQ,QAAQ;AAErB,OAAI,YAAY,SACd;GAGF,MAAM,SAAS,QAAQ,QAAQ;AAC/B,OAAI,WAAW,QAGb;AAEF,aAAU;;AAIZ,OAAK,MAAM,OAAO,MAAM;GACtB,MAAM,YAAY,KAAK,KAAK,gBAAgB;AAE5C,OAAI,oBAAoB,cAAc,iBACpC;GAEF,MAAM,aAAa,KAAK,WAAW,iBAAiB;AACpD,OAAI,MAAM,KAAK,WAAW,WAAW,CACnC,OAAM,KAAK,WAAW;;AAI1B,SAAO;;;;;CAMT,MAAc,iBAAiB,YAAwC;EACrE,MAAM,UAAU,MAAM,SAAS,YAAY,QAAQ;EAEnD,IAAI;AACJ,MAAI;AACF,YAAS,MAAM,QAAQ;WAChB,OAAO;AACd,SAAM,IAAI,MACR,kCAAkC,WAAW,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,GACxG;;EAIH,IAAI;AACJ,MAAI;AACF,cAAW,uBAAuB,OAAO;WAClC,OAAO;GACd,MAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;GAEtE,MAAM,QAAQ,QAAQ,MAAM,4CAA4C;AACxE,OAAI,OAAO;IACT,MAAM,UAAU,MAAM;AACtB,UAAM,IAAI,MACR,gCAAgC,QAAQ,MAAM,WAAW,oCACrB,QAAQ,sCAChB,QAAQ,aACrC;;AAEH,SAAM,IAAI,MAAM,8CAA8C,WAAW,IAAI,UAAU;;EAIzF,MAAM,SAAS,aAAa,UAAU,SAAS;AAC/C,MAAI,CAAC,OAAO,SAAS;GACnB,MAAM,SAAS,OAAO,MAAM,OAAO,KAAK,MAAM,GAAG,EAAE,KAAK,KAAK,IAAI,CAAC,IAAI,EAAE,UAAU,CAAC,KAAK,KAAK;AAC7F,SAAM,IAAI,MAAM,qBAAqB,WAAW,IAAI,SAAS;;AAG/D,SAAO,OAAO;;;;;;CAOhB,AAAQ,qBAAqB,WAA+B,MAAsB;AAChF,SAAO,GAAG,aAAa,GAAG,GAAG;;;;;;CAO/B,AAAQ,aAAa,SAAiC;EAEpD,MAAM,kCAAkB,IAAI,KAAqB;EACjD,MAAM,YAA2B,EAAE;EACnC,IAAI;AAEJ,OAAK,MAAM,UAAU,SAAS;AAE5B,QAAK,MAAM,SAAS,OAAO,QAAQ;IACjC,MAAM,MAAM,KAAK,qBAAqB,MAAM,WAAW,MAAM,KAAK;IAClE,MAAM,gBAAgB,gBAAgB,IAAI,IAAI;AAC9C,QAAI,kBAAkB,OAEpB,WAAU,iBAAiB;SACtB;AAEL,qBAAgB,IAAI,KAAK,UAAU,OAAO;AAC1C,eAAU,KAAK,MAAM;;;AAKzB,OAAI,OAAO,MACT,eAAc,cAAc;IAAE,GAAG;IAAa,GAAG,OAAO;IAAO,GAAG,OAAO;;AAI7E,SAAO;GAAE,QAAQ;GAAW,OAAO;GAAa;;;;;;;CAQlD,MAAc,gBAAgB,UAA0C;EACtE,IAAI,aAAa;AAEjB,SAAO,MAAM;AAEX,OAAI,MAAM,KAAK,WAAW,KAAK,YAAY,OAAO,CAAC,CACjD,QAAO;GAGT,MAAM,YAAY,QAAQ,WAAW;AACrC,OAAI,cAAc,WAEhB,QAAO;AAET,gBAAa;;;;;;CAOjB,MAAc,WAAW,MAAgC;AACvD,MAAI;AACF,SAAM,OAAO,KAAK;AAClB,UAAO;UACD;AACN,UAAO;;;;AAMb,MAAa,eAAe,IAAI,cAAc"}
@@ -15,8 +15,20 @@ async function createProvider(mount) {
15
15
  case "git": return createGitProvider(mount, parsed.path, parsed.params, parsed.host);
16
16
  case "sqlite": return createSQLiteProvider(mount, parsed.path);
17
17
  case "json": return createJSONProvider(mount, parsed.path);
18
+ case "toml": return createTOMLProvider(mount, parsed.path);
19
+ case "s3": return createS3Provider(mount, parsed.path, parsed.params);
20
+ case "gs": return createGCSProvider(mount, parsed.path, parsed.params);
21
+ case "ec2": return createEC2Provider(mount, parsed.path, parsed.params);
22
+ case "gce": return createGCEProvider(mount, parsed.path, parsed.params);
23
+ case "dns": return createDNSProvider(mount, parsed.path, parsed.params);
24
+ case "github": return createGitHubProvider(mount, parsed.path, parsed.params);
25
+ case "sandbox": return createSandboxProvider(mount);
18
26
  case "http":
19
27
  case "https": return createHttpProvider(mount);
28
+ case "mcp":
29
+ case "mcp+stdio":
30
+ case "mcp+http":
31
+ case "mcp+sse": return createMCPProvider(mount, parsed);
20
32
  default: throw new Error(`Unknown URI scheme: ${parsed.scheme}`);
21
33
  }
22
34
  }
@@ -32,7 +44,8 @@ async function createAFSFSProvider(mount, localPath) {
32
44
  }
33
45
  async function createGitProvider(mount, repoPath, params, host) {
34
46
  const { AFSGit } = await import("@aigne/afs-git");
35
- if (host) return new AFSGit({
47
+ let provider;
48
+ if (host) provider = new AFSGit({
36
49
  remoteUrl: `git@${host}:${repoPath}`,
37
50
  name: mount.path.slice(1).replace(/\//g, "-") || "git",
38
51
  description: mount.description,
@@ -40,7 +53,7 @@ async function createGitProvider(mount, repoPath, params, host) {
40
53
  branches: params.branch ? [params.branch] : void 0,
41
54
  ...mount.options
42
55
  });
43
- if (repoPath.startsWith("https://") || repoPath.startsWith("http://")) return new AFSGit({
56
+ else if (repoPath.startsWith("https://") || repoPath.startsWith("http://")) provider = new AFSGit({
44
57
  remoteUrl: repoPath,
45
58
  name: mount.path.slice(1).replace(/\//g, "-") || "git",
46
59
  description: mount.description,
@@ -48,7 +61,7 @@ async function createGitProvider(mount, repoPath, params, host) {
48
61
  branches: params.branch ? [params.branch] : void 0,
49
62
  ...mount.options
50
63
  });
51
- return new AFSGit({
64
+ else provider = new AFSGit({
52
65
  repoPath,
53
66
  name: mount.path.slice(1).replace(/\//g, "-") || "git",
54
67
  description: mount.description,
@@ -56,6 +69,8 @@ async function createGitProvider(mount, repoPath, params, host) {
56
69
  branches: params.branch ? [params.branch] : void 0,
57
70
  ...mount.options
58
71
  });
72
+ await provider.ready();
73
+ return provider;
59
74
  }
60
75
  async function createSQLiteProvider(mount, dbPath) {
61
76
  const { SQLiteAFS } = await import("@aigne/afs-sqlite");
@@ -77,6 +92,126 @@ async function createJSONProvider(mount, jsonPath) {
77
92
  ...mount.options
78
93
  });
79
94
  }
95
+ async function createTOMLProvider(mount, tomlPath) {
96
+ const { AFSTOML } = await import("@aigne/afs-toml");
97
+ return new AFSTOML({
98
+ tomlPath,
99
+ name: mount.path.slice(1).replace(/\//g, "-") || "toml",
100
+ description: mount.description,
101
+ accessMode: mount.access_mode,
102
+ ...mount.options
103
+ });
104
+ }
105
+ /**
106
+ * Create an S3 provider from mount configuration
107
+ *
108
+ * Supported URI formats:
109
+ * - s3://bucket (bucket only)
110
+ * - s3://bucket/prefix (bucket with prefix)
111
+ *
112
+ * Options:
113
+ * - endpoint: S3-compatible endpoint URL (for MinIO, R2, B2, etc.)
114
+ * - forcePathStyle: Use path-style URLs instead of virtual-hosted style
115
+ * - region: AWS region
116
+ * - profile: AWS profile name
117
+ */
118
+ async function createS3Provider(mount, path, params) {
119
+ const { AFSS3 } = await import("@aigne/afs-s3");
120
+ const parts = path.split("/");
121
+ const bucket = parts[0] || "";
122
+ const prefix = parts.slice(1).join("/") || void 0;
123
+ if (!bucket) throw new Error("S3 URI requires a bucket name: s3://bucket/prefix");
124
+ return new AFSS3({
125
+ bucket,
126
+ prefix,
127
+ name: mount.path.slice(1).replace(/\//g, "-") || bucket,
128
+ description: mount.description,
129
+ accessMode: mount.access_mode,
130
+ region: params.region ?? mount.options?.region,
131
+ endpoint: params.endpoint ?? mount.options?.endpoint,
132
+ forcePathStyle: params.forcePathStyle === "true" || mount.options?.forcePathStyle,
133
+ profile: params.profile ?? mount.options?.profile,
134
+ ...mount.options
135
+ });
136
+ }
137
+ /**
138
+ * Create a GCS provider from mount configuration
139
+ *
140
+ * Supported URI formats:
141
+ * - gs://bucket (bucket only)
142
+ * - gs://bucket/prefix (bucket with prefix)
143
+ *
144
+ * Options:
145
+ * - projectId: Google Cloud project ID
146
+ * - keyFilename: Path to service account key file
147
+ * - apiEndpoint: Custom API endpoint (for emulators)
148
+ */
149
+ async function createGCSProvider(mount, path, params) {
150
+ const { AFSGCS } = await import("@aigne/afs-gcs");
151
+ const parts = path.split("/");
152
+ const bucket = parts[0] || "";
153
+ const prefix = parts.slice(1).join("/") || void 0;
154
+ if (!bucket) throw new Error("GCS URI requires a bucket name: gs://bucket/prefix");
155
+ return new AFSGCS({
156
+ bucket,
157
+ prefix,
158
+ name: mount.path.slice(1).replace(/\//g, "-") || bucket,
159
+ description: mount.description,
160
+ accessMode: mount.access_mode,
161
+ projectId: params.projectId ?? mount.options?.projectId,
162
+ keyFilename: params.keyFilename ?? mount.options?.keyFilename,
163
+ endpoint: params.endpoint ?? mount.options?.endpoint,
164
+ ...mount.options
165
+ });
166
+ }
167
+ /**
168
+ * Create a GitHub provider from mount configuration
169
+ *
170
+ * Supported URI formats:
171
+ * - github://owner/repo (single-repo mode)
172
+ * - github://owner (org mode - list all repos in organization)
173
+ * - github:// (multi-repo mode, requires options.mode = "multi-repo")
174
+ *
175
+ * Options:
176
+ * - mode: "single-repo" | "multi-repo" | "org" (default: auto-detected from URI)
177
+ * - baseUrl: GitHub API base URL (for GitHub Enterprise)
178
+ * - cache: { enabled: boolean; ttl: number }
179
+ * - rateLimit: { autoRetry: boolean; maxRetries: number }
180
+ */
181
+ async function createGitHubProvider(mount, repoPath, params) {
182
+ const { AFSGitHub } = await import("@aigne/afs-github");
183
+ const parts = repoPath.split("/").filter(Boolean);
184
+ const owner = parts[0];
185
+ const repo = parts[1];
186
+ let mode = params.mode ?? mount.options?.mode;
187
+ if (!mode) if (owner && repo) mode = "single-repo";
188
+ else if (owner && !repo) mode = "org";
189
+ else mode = "multi-repo";
190
+ if (mode === "single-repo" && (!owner || !repo)) throw new Error("GitHub single-repo mode requires owner/repo in URI: github://owner/repo");
191
+ if (mode === "org" && !owner) throw new Error("GitHub org mode requires owner in URI: github://owner");
192
+ const authToken = mount.auth ?? mount.token;
193
+ const ownerType = mount.options?.owner_type ?? mount.options?.ownerType;
194
+ return new AFSGitHub({
195
+ name: mount.path.slice(1).replace(/\//g, "-") || "github",
196
+ description: mount.description,
197
+ owner,
198
+ repo,
199
+ auth: authToken ? { token: authToken } : void 0,
200
+ mode,
201
+ ownerType,
202
+ accessMode: mount.access_mode ?? "readonly",
203
+ ...mount.options
204
+ });
205
+ }
206
+ async function createSandboxProvider(mount) {
207
+ const { AFSSandbox } = await import("@aigne/afs-sandbox");
208
+ return new AFSSandbox({
209
+ name: mount.path.slice(1).replace(/\//g, "-") || "sandbox",
210
+ description: mount.description,
211
+ accessMode: mount.access_mode,
212
+ ...mount.options
213
+ });
214
+ }
80
215
  async function createHttpProvider(mount) {
81
216
  const { AFSHttpClient } = await import("@aigne/afs-http");
82
217
  return new AFSHttpClient({
@@ -84,9 +219,182 @@ async function createHttpProvider(mount) {
84
219
  name: mount.path.slice(1).replace(/\//g, "-") || "http",
85
220
  description: mount.description,
86
221
  accessMode: mount.access_mode,
222
+ token: mount.token,
87
223
  ...mount.options
88
224
  });
89
225
  }
226
+ /**
227
+ * Create an MCP provider from mount configuration
228
+ *
229
+ * Supported URI formats:
230
+ * - mcp://name (requires options.transport, options.command/url)
231
+ * - mcp+stdio://command/args... (e.g., mcp+stdio://npx/-y/@modelcontextprotocol/server-sqlite/test.db)
232
+ * - mcp+http://host/path (e.g., mcp+http://mcp.notion.com/mcp)
233
+ * - mcp+sse://host/path (e.g., mcp+sse://api.example.com/sse)
234
+ *
235
+ * Options:
236
+ * - transport: "stdio" | "http" | "sse" (required for mcp:// scheme)
237
+ * - command: string (for stdio transport)
238
+ * - args: string[] (for stdio transport)
239
+ * - env: Record<string, string> (for stdio transport)
240
+ * - url: string (for http/sse transport)
241
+ * - headers: Record<string, string> (for http/sse transport)
242
+ */
243
+ async function createMCPProvider(mount, parsed) {
244
+ const { AFSMCP } = await import("@aigne/afs-mcp");
245
+ const name = mount.path.slice(1).replace(/\//g, "-") || "mcp";
246
+ const options = mount.options || {};
247
+ let transport;
248
+ let command;
249
+ let args;
250
+ let url;
251
+ if (parsed.scheme === "mcp+stdio") {
252
+ transport = "stdio";
253
+ const parts = parsed.path.split("/").filter(Boolean).map(decodeURIComponent);
254
+ command = parts[0];
255
+ args = parts.slice(1);
256
+ } else if (parsed.scheme === "mcp+http") {
257
+ transport = "http";
258
+ url = `https://${parsed.host || ""}${parsed.path}`;
259
+ } else if (parsed.scheme === "mcp+sse") {
260
+ transport = "sse";
261
+ url = `https://${parsed.host || ""}${parsed.path}`;
262
+ } else {
263
+ transport = options.transport || "stdio";
264
+ command = options.command;
265
+ args = options.args;
266
+ url = options.url;
267
+ }
268
+ const mcpOptions = {
269
+ name,
270
+ description: mount.description,
271
+ transport
272
+ };
273
+ if (transport === "stdio") {
274
+ if (!command) throw new Error("MCP stdio transport requires 'command' option");
275
+ mcpOptions.command = command;
276
+ mcpOptions.args = args || [];
277
+ if (options.env) mcpOptions.env = options.env;
278
+ } else {
279
+ if (!url) throw new Error(`MCP ${transport} transport requires 'url' option`);
280
+ mcpOptions.url = url;
281
+ if (options.headers) mcpOptions.headers = options.headers;
282
+ }
283
+ if (options.timeout) mcpOptions.timeout = options.timeout;
284
+ if (options.maxReconnects) mcpOptions.maxReconnects = options.maxReconnects;
285
+ return new AFSMCP(mcpOptions);
286
+ }
287
+ /**
288
+ * Create an EC2 provider from mount configuration
289
+ *
290
+ * Supported URI formats:
291
+ * - ec2://us-east-1 (single region)
292
+ * - ec2://us-east-1,us-west-2 (multi-region)
293
+ * - ec2://?profile=myprofile (use default region from profile)
294
+ *
295
+ * Options:
296
+ * - endpoint: Custom endpoint URL (for LocalStack, etc.)
297
+ * - profile: AWS profile name
298
+ * - filters: Array of { name, values } filters
299
+ * - cache: { ttl: number, instanceTtl: number, staticTtl: number }
300
+ */
301
+ async function createEC2Provider(mount, path, params) {
302
+ const { AFSEC2 } = await import("@aigne/afs-ec2");
303
+ const regions = path.split(",").map((r) => r.trim()).filter(Boolean);
304
+ const config = {
305
+ name: mount.path.slice(1).replace(/\//g, "-") || "ec2",
306
+ description: mount.description,
307
+ accessMode: mount.access_mode ?? "readonly"
308
+ };
309
+ if (regions.length === 1) config.region = regions[0];
310
+ else if (regions.length > 1) config.regions = regions;
311
+ const endpoint = params.endpoint ?? mount.options?.endpoint;
312
+ if (endpoint) config.endpoint = endpoint;
313
+ const profile = params.profile ?? mount.options?.profile;
314
+ if (profile) config.profile = profile;
315
+ if (mount.options?.credentials) config.credentials = mount.options.credentials;
316
+ if (mount.options?.filters) config.filters = mount.options.filters;
317
+ if (mount.options?.cache) config.cache = mount.options.cache;
318
+ return new AFSEC2(config);
319
+ }
320
+ /**
321
+ * Create a GCE provider from mount configuration
322
+ *
323
+ * Supported URI formats:
324
+ * - gce://project-id/zone (project and zone)
325
+ * - gce://project-id (project only, zone from options)
326
+ *
327
+ * Options:
328
+ * - zone: GCE zone
329
+ * - keyFilename: Path to service account key file
330
+ * - credentials: Service account credentials object
331
+ * - cache: { ttl: number, instanceTtl: number, staticTtl: number }
332
+ */
333
+ async function createGCEProvider(mount, path, params) {
334
+ const { AFSGCE } = await import("@aigne/afs-gce");
335
+ const parts = path.split("/").filter(Boolean);
336
+ const projectId = parts[0] || "";
337
+ const zoneFromPath = parts[1];
338
+ if (!projectId) throw new Error("GCE URI requires a project ID: gce://project-id/zone");
339
+ const zone = zoneFromPath ?? params.zone ?? mount.options?.zone;
340
+ if (!zone) throw new Error("GCE requires a zone: gce://project-id/zone or use ?zone= parameter");
341
+ const config = {
342
+ name: mount.path.slice(1).replace(/\//g, "-") || "gce",
343
+ description: mount.description,
344
+ projectId,
345
+ zone,
346
+ accessMode: mount.access_mode ?? "readonly"
347
+ };
348
+ const keyFilename = params.keyFilename ?? mount.options?.keyFilename;
349
+ if (keyFilename) config.keyFilename = keyFilename;
350
+ if (mount.options?.credentials) config.credentials = mount.options.credentials;
351
+ if (mount.options?.cache) config.cache = mount.options.cache;
352
+ return new AFSGCE(config);
353
+ }
354
+ /**
355
+ * Create a DNS provider from mount configuration
356
+ *
357
+ * Supported URI formats:
358
+ * - dns://zone.domain.com (single zone)
359
+ *
360
+ * Options:
361
+ * - provider: DNS provider type ("route53" | "clouddns")
362
+ * - endpoint: Custom endpoint URL (for LocalStack, etc.)
363
+ * - region: AWS region (for Route53)
364
+ * - credentials: { accessKeyId, secretAccessKey } (for Route53)
365
+ * - projectId: Google Cloud project ID (for Cloud DNS)
366
+ * - keyFilename: Path to service account key file (for Cloud DNS)
367
+ * - permissions: { preset, dangerous }
368
+ */
369
+ async function createDNSProvider(mount, zoneDomain, params) {
370
+ const { DNSProvider, Route53Adapter, CloudDNSAdapter } = await import("@aigne/afs-dns");
371
+ const providerType = params.provider ?? mount.options?.provider ?? "route53";
372
+ let adapter;
373
+ if (providerType === "route53") {
374
+ const adapterConfig = {};
375
+ const region = params.region ?? mount.options?.region;
376
+ if (region) adapterConfig.region = region;
377
+ const endpoint = params.endpoint ?? mount.options?.endpoint;
378
+ if (endpoint) adapterConfig.endpoint = endpoint;
379
+ if (mount.options?.credentials) adapterConfig.credentials = mount.options.credentials;
380
+ adapter = new Route53Adapter(adapterConfig);
381
+ } else if (providerType === "clouddns") {
382
+ const adapterConfig = {};
383
+ const projectId = params.projectId ?? mount.options?.projectId;
384
+ if (projectId) adapterConfig.projectId = projectId;
385
+ const keyFilename = params.keyFilename ?? mount.options?.keyFilename;
386
+ if (keyFilename) adapterConfig.keyFilename = keyFilename;
387
+ adapter = new CloudDNSAdapter(adapterConfig);
388
+ } else throw new Error(`Unsupported DNS provider: ${providerType}. Supported providers: 'route53', 'clouddns'.`);
389
+ return new DNSProvider({
390
+ zone: zoneDomain,
391
+ adapter,
392
+ accessMode: mount.access_mode ?? "readonly",
393
+ permissions: mount.options?.permissions,
394
+ auditLog: mount.options?.auditLog,
395
+ rateLimiting: mount.options?.rateLimiting
396
+ });
397
+ }
90
398
 
91
399
  //#endregion
92
400
  exports.createProvider = createProvider;