cryptic-md 1.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,22 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024-2026 Camille Scott
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
@@ -0,0 +1,127 @@
1
+ Metadata-Version: 2.4
2
+ Name: cryptic-md
3
+ Version: 1.0.2
4
+ Summary: LLM tools for summarizing web content into structured Obsidian notes.
5
+ License-Expression: MIT
6
+ License-File: LICENSE.md
7
+ Keywords: obsidian,openai,llm,summarization,notes,markdown
8
+ Author: Camille Scott
9
+ Author-email: camille.scott.w@gmail.com
10
+ Requires-Python: >=3.12,<4
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: End Users/Desktop
13
+ Classifier: Operating System :: POSIX :: Linux
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Classifier: Topic :: Text Processing :: Markup :: Markdown
18
+ Classifier: Topic :: Utilities
19
+ Requires-Dist: asyncinotify (>=4.0,<5.0)
20
+ Requires-Dist: openai (>=2.36.0,<3.0.0)
21
+ Requires-Dist: ponderosa[rich] (>=0.6.0,<0.7.0)
22
+ Requires-Dist: python-dotenv (>=1.2.2,<2.0.0)
23
+ Requires-Dist: python-frontmatter (>=1.1.0,<2.0.0)
24
+ Requires-Dist: pyyaml (>=6.0,<7.0)
25
+ Requires-Dist: rich (>=13.9.3,<14.0.0)
26
+ Project-URL: Homepage, https://github.com/camillescott/cryptic
27
+ Project-URL: Issues, https://github.com/camillescott/cryptic/issues
28
+ Project-URL: Repository, https://github.com/camillescott/cryptic
29
+ Description-Content-Type: text/markdown
30
+
31
+ # cryptic
32
+
33
+ ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/camillescott/cryptic/docker-publish.yml)
34
+ ![Docker Image Version](https://img.shields.io/docker/v/camillescott/cryptic?logo=docker)
35
+
36
+ LLM tools for summarizing web content into structured Obsidian notes.
37
+
38
+ ## Features
39
+
40
+ - Structured-output summarization via OpenAI, with per-category schemas (papers, articles, events, products, discussions, media, software, references).
41
+ - Two-pane note generation: YAML frontmatter for metadata, Markdown body for content. Section layout and frontmatter mapping are declared via Pydantic field annotations.
42
+ - Long-running service that watches a directory for new notes, processes them concurrently, and moves results to an output directory. Unmodified copies of each input are archived to a separate directory.
43
+ - Persistent retry bookkeeping via a `cryptic_tries` frontmatter field, capped at a configurable `max_tries`.
44
+ - Settle delay before reading new files so sources that write incrementally are picked up only once.
45
+ - YAML configuration for model list, default model, reasoning effort, prompt text, and service directories.
46
+
47
+ ## Installation
48
+
49
+ ```sh
50
+ poetry install
51
+ ```
52
+
53
+ Set `OPENAI_API_KEY` in the environment or in a `.env` file in the project root.
54
+
55
+ ## Configuration
56
+
57
+ Create `~/.config/cryptic/config.yaml`:
58
+
59
+ ```yaml
60
+ openai:
61
+ models:
62
+ - gpt-5.4-mini
63
+ default_model: gpt-5.4-mini
64
+ default_reasoning: medium
65
+
66
+ service:
67
+ vaults:
68
+ personal:
69
+ input_dir: ~/Obsidian/Personal/cryptic-staging
70
+ output_dir: ~/Obsidian/Personal/cryptic-processed
71
+ originals_dir: ~/Obsidian/Personal/cryptic-originals
72
+ max_concurrent: 3
73
+ max_tries: 3
74
+ pickup_delay_seconds: 3.0
75
+ ```
76
+
77
+ Override the config path per-invocation with `--config /path/to/config.yaml`.
78
+
79
+ ## Usage
80
+
81
+ Process a single note in place:
82
+
83
+ ```sh
84
+ cryptic process note --note path/to/note.md
85
+ ```
86
+
87
+ Run the service against the configured directories:
88
+
89
+ ```sh
90
+ cryptic service
91
+ ```
92
+
93
+ Drain the input directory once and exit (useful for batch runs):
94
+
95
+ ```sh
96
+ cryptic service --once
97
+ ```
98
+
99
+ Common flags available on both commands:
100
+
101
+ - `--model NAME` — pick a model from `openai.models`.
102
+ - `--reasoning {low,medium,high,xhigh}` — set reasoning effort.
103
+ - `--config PATH` — use an alternate config file.
104
+
105
+ ## Docker
106
+
107
+ Pre-built images are published to Docker Hub at `camillescott/cryptic`. The included `compose.yaml` is the simplest way to run the service:
108
+
109
+ ```sh
110
+ export OPENAI_API_KEY=sk-...
111
+ docker compose up -d
112
+ ```
113
+
114
+ It bind-mounts `./vaults` → `/vaults` (your Obsidian tree) and `./config` → `/config` (a directory containing `config.yaml`). Paths inside `config.yaml` must be rooted at `/vaults`, for example `/vaults/personal/cryptic-staging`.
115
+
116
+ To build the image locally instead of pulling:
117
+
118
+ ```sh
119
+ docker build -t cryptic .
120
+ ```
121
+
122
+ inotify works across bind mounts on Linux hosts. On Docker Desktop for macOS or Windows, host filesystem events don't propagate into the container.
123
+
124
+ ---
125
+
126
+ Portions of this project's code have been written with agentic coding tools.
127
+
@@ -0,0 +1,96 @@
1
+ # cryptic
2
+
3
+ ![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/camillescott/cryptic/docker-publish.yml)
4
+ ![Docker Image Version](https://img.shields.io/docker/v/camillescott/cryptic?logo=docker)
5
+
6
+ LLM tools for summarizing web content into structured Obsidian notes.
7
+
8
+ ## Features
9
+
10
+ - Structured-output summarization via OpenAI, with per-category schemas (papers, articles, events, products, discussions, media, software, references).
11
+ - Two-pane note generation: YAML frontmatter for metadata, Markdown body for content. Section layout and frontmatter mapping are declared via Pydantic field annotations.
12
+ - Long-running service that watches a directory for new notes, processes them concurrently, and moves results to an output directory. Unmodified copies of each input are archived to a separate directory.
13
+ - Persistent retry bookkeeping via a `cryptic_tries` frontmatter field, capped at a configurable `max_tries`.
14
+ - Settle delay before reading new files so sources that write incrementally are picked up only once.
15
+ - YAML configuration for model list, default model, reasoning effort, prompt text, and service directories.
16
+
17
+ ## Installation
18
+
19
+ ```sh
20
+ poetry install
21
+ ```
22
+
23
+ Set `OPENAI_API_KEY` in the environment or in a `.env` file in the project root.
24
+
25
+ ## Configuration
26
+
27
+ Create `~/.config/cryptic/config.yaml`:
28
+
29
+ ```yaml
30
+ openai:
31
+ models:
32
+ - gpt-5.4-mini
33
+ default_model: gpt-5.4-mini
34
+ default_reasoning: medium
35
+
36
+ service:
37
+ vaults:
38
+ personal:
39
+ input_dir: ~/Obsidian/Personal/cryptic-staging
40
+ output_dir: ~/Obsidian/Personal/cryptic-processed
41
+ originals_dir: ~/Obsidian/Personal/cryptic-originals
42
+ max_concurrent: 3
43
+ max_tries: 3
44
+ pickup_delay_seconds: 3.0
45
+ ```
46
+
47
+ Override the config path per-invocation with `--config /path/to/config.yaml`.
48
+
49
+ ## Usage
50
+
51
+ Process a single note in place:
52
+
53
+ ```sh
54
+ cryptic process note --note path/to/note.md
55
+ ```
56
+
57
+ Run the service against the configured directories:
58
+
59
+ ```sh
60
+ cryptic service
61
+ ```
62
+
63
+ Drain the input directory once and exit (useful for batch runs):
64
+
65
+ ```sh
66
+ cryptic service --once
67
+ ```
68
+
69
+ Common flags available on both commands:
70
+
71
+ - `--model NAME` — pick a model from `openai.models`.
72
+ - `--reasoning {low,medium,high,xhigh}` — set reasoning effort.
73
+ - `--config PATH` — use an alternate config file.
74
+
75
+ ## Docker
76
+
77
+ Pre-built images are published to Docker Hub at `camillescott/cryptic`. The included `compose.yaml` is the simplest way to run the service:
78
+
79
+ ```sh
80
+ export OPENAI_API_KEY=sk-...
81
+ docker compose up -d
82
+ ```
83
+
84
+ It bind-mounts `./vaults` → `/vaults` (your Obsidian tree) and `./config` → `/config` (a directory containing `config.yaml`). Paths inside `config.yaml` must be rooted at `/vaults`, for example `/vaults/personal/cryptic-staging`.
85
+
86
+ To build the image locally instead of pulling:
87
+
88
+ ```sh
89
+ docker build -t cryptic .
90
+ ```
91
+
92
+ inotify works across bind mounts on Linux hosts. On Docker Desktop for macOS or Windows, host filesystem events don't propagate into the container.
93
+
94
+ ---
95
+
96
+ Portions of this project's code have been written with agentic coding tools.
@@ -0,0 +1 @@
1
+ __version__ = '1.0.2'
@@ -0,0 +1,20 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # File : __main__.py
4
+ # License: MIT
5
+ # Author : Camille Scott <camille.scott.w@gmail.com>
6
+ # Date : 29.10.2024
7
+ # (c) Camille Scott, 2024
8
+
9
+
10
+ import sys
11
+
12
+ from .cmds import commands
13
+
14
+
15
+ def main():
16
+ return commands.run()
17
+
18
+
19
+ if __name__ == '__main__':
20
+ sys.exit(main())
@@ -0,0 +1,55 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # File : args.py
4
+ # License: MIT
5
+ # Author : Camille Scott <camille.scott.w@gmail.com>
6
+ # Date : 31.10.2024
7
+ # (c) Camille Scott, 2024
8
+
9
+ from argparse import Action
10
+ from enum import Enum
11
+ from pathlib import Path
12
+
13
+ from ponderosa import CmdTree, ArgParser, arggroup
14
+
15
+
16
+ class EnumAction(Action):
17
+ """
18
+ Argparse action for handling Enums
19
+ """
20
+ def __init__(self, **kwargs):
21
+ # Pop off the type value
22
+ enum = kwargs.pop("type", None)
23
+
24
+ # Ensure an Enum subclass is provided
25
+ if enum is None:
26
+ raise ValueError("type must be assigned an Enum when using EnumAction")
27
+ if not issubclass(enum, Enum):
28
+ raise TypeError("type must be an Enum when using EnumAction")
29
+
30
+ # Generate choices from the Enum
31
+ kwargs.setdefault("choices", tuple(e.name for e in enum))
32
+
33
+ super(EnumAction, self).__init__(**kwargs)
34
+
35
+ self._enum = enum
36
+
37
+ def __call__(self, parser, namespace, values, option_string=None):
38
+ # Convert value back into an Enum
39
+ enum = self._enum[values]
40
+ setattr(namespace, self.dest, enum)
41
+
42
+
43
+ commands = CmdTree()
44
+
45
+
46
+ @commands.root.args("Config", common=True)
47
+ def common_args(parser: ArgParser):
48
+ parser.add_argument('--config', type=Path, default=None,
49
+ help='Path to YAML config file.')
50
+ parser.add_argument('--model', type=str, default=None,
51
+ help='Override the configured default model.')
52
+ parser.add_argument('--reasoning', type=str, default=None,
53
+ choices=['low', 'medium', 'high', 'xhigh'],
54
+ help='Override the configured reasoning effort.')
55
+
@@ -0,0 +1,35 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # File : chat.py
4
+ # License: MIT
5
+ # Author : Camille Scott <camille.scott.w@gmail.com>
6
+ # Date : 23.10.2024
7
+ # (c) Camille Scott, 2024
8
+
9
+ from typing import Type
10
+
11
+ from openai import AsyncOpenAI
12
+ from openai.types.chat import ChatCompletion
13
+
14
+ from .models import BaseNoteSummary, NoteSummary
15
+
16
+
17
+ async def summarize_page(
18
+ client: AsyncOpenAI,
19
+ content: str,
20
+ *,
21
+ model: str,
22
+ system_prompt: str,
23
+ reasoning: str,
24
+ schema: Type[BaseNoteSummary] = NoteSummary,
25
+ ) -> tuple[BaseNoteSummary | None, ChatCompletion]:
26
+ completion = await client.chat.completions.parse(
27
+ model=model,
28
+ messages=[
29
+ {"role": "system", "content": system_prompt},
30
+ {"role": "user", "content": content},
31
+ ],
32
+ response_format=schema,
33
+ reasoning_effort=reasoning,
34
+ )
35
+ return completion.choices[0].message.parsed, completion
@@ -0,0 +1,150 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # File : cmds.py
4
+ # License: MIT
5
+ # Author : Camille Scott <camille.scott.w@gmail.com>
6
+ # Date : 28.10.2024
7
+ # (c) Camille Scott, 2024
8
+
9
+ from argparse import Namespace
10
+ from pathlib import Path
11
+ import shutil
12
+
13
+ from dotenv import load_dotenv
14
+ from openai import AsyncOpenAI
15
+ from rich.console import Console
16
+
17
+ from .args import ArgParser, arggroup, commands, common_args, EnumAction
18
+ from .chat import summarize_page
19
+ from .config import AppConfig
20
+ from .models import NoteSummary, PageCategory, summary_schema_from_category
21
+ from .note import WebNote
22
+ from . import service as service_mod
23
+
24
+
25
+ def _resolve_model(args: Namespace, cfg: AppConfig, console: Console) -> str | None:
26
+ requested = args.model or cfg.openai.default_model
27
+ if requested not in cfg.openai.models:
28
+ console.print(
29
+ f'[red]Model {requested!r} is not in configured models: '
30
+ f'{cfg.openai.models}[/red]'
31
+ )
32
+ return None
33
+ return requested
34
+
35
+
36
+ def _resolve_reasoning(args: Namespace, cfg: AppConfig) -> str:
37
+ return args.reasoning or cfg.openai.default_reasoning
38
+
39
+
40
+ @common_args.postprocessor()
41
+ def resolve_config(args: Namespace):
42
+ console = Console(stderr=True)
43
+ try:
44
+ cfg = AppConfig.load(args.config)
45
+ except (FileNotFoundError, ValueError) as e:
46
+ console.print(f'[red]{e}[/red]')
47
+ raise
48
+
49
+ args.model = _resolve_model(args, cfg, console)
50
+ if args.model is None:
51
+ raise ValueError('No model specified')
52
+
53
+ args.reasoning = _resolve_reasoning(args, cfg)
54
+ args.cfg = cfg
55
+
56
+ load_dotenv()
57
+
58
+
59
+ @arggroup('Category')
60
+ def category_args(parser: ArgParser):
61
+ parser.add_argument('--category', '-c', type=PageCategory, action=EnumAction)
62
+
63
+
64
+ @category_args.apply()
65
+ @commands.register('process', 'note',
66
+ help='Process a note with the LLM and rewrite it.')
67
+ async def process_note(args: Namespace):
68
+ console = Console(stderr=True)
69
+
70
+
71
+ console.log(f'Load {args.note}...')
72
+ note = WebNote(args.note)
73
+
74
+ if note.cryptic_processed and not args.force:
75
+ console.log('[red] Note already processed and not --force, exiting.')
76
+ return 1
77
+
78
+ if args.category:
79
+ schema = summary_schema_from_category(args.category)
80
+ console.log(f'[yellow] Forcing {schema} as Schema')
81
+ else:
82
+ schema = NoteSummary
83
+
84
+ client = AsyncOpenAI()
85
+ try:
86
+ with console.status(f'[bold blue]Wait for OpenAI response...'):
87
+ summary, completion = await summarize_page(
88
+ client,
89
+ note.content,
90
+ model=args.model,
91
+ system_prompt=args.cfg.prompt.text,
92
+ reasoning=args.reasoning,
93
+ schema=schema,
94
+ )
95
+ finally:
96
+ await client.close()
97
+
98
+ if summary is None:
99
+ console.print(f'[red] Error processing note!')
100
+ return 1
101
+
102
+ console.log(f'Processed note using {completion.usage.total_tokens} tokens.')
103
+ console.print(summary)
104
+
105
+ if args.backup:
106
+ console.log('Backup note...')
107
+ shutil.copy(args.note, args.note.with_suffix('.bak'))
108
+
109
+ console.log('Update and save note...')
110
+ note.process_summary(summary)
111
+ note.save()
112
+
113
+ console.rule('Processed Note')
114
+ note.to_console(console)
115
+
116
+ return 0
117
+
118
+
119
+ @process_note.args()
120
+ def _(parser: ArgParser):
121
+ parser.add_argument('--note', '-i', type=Path, required=True)
122
+ parser.add_argument('--force', '-f', default=False, action='store_true')
123
+ parser.add_argument('--backup', '-b', default=False, action='store_true')
124
+
125
+
126
+ @commands.register('service',
127
+ help='Watch configured vault directories and process new notes.')
128
+ async def service_cmd(args: Namespace):
129
+ console = Console(stderr=True)
130
+
131
+ svc = args.cfg.require_service()
132
+ if args.max_concurrent is not None:
133
+ svc.max_concurrent = args.max_concurrent
134
+
135
+ return await service_mod.run(
136
+ console=console,
137
+ cfg=args.cfg,
138
+ svc=svc,
139
+ model=args.model,
140
+ reasoning=args.reasoning,
141
+ once=args.once,
142
+ )
143
+
144
+
145
+ @service_cmd.args()
146
+ def _(parser: ArgParser):
147
+ parser.add_argument('--max-concurrent', type=int, default=None,
148
+ help='Override service.max_concurrent from config.')
149
+ parser.add_argument('--once', default=False, action='store_true',
150
+ help='Drain existing files and exit instead of watching.')
@@ -0,0 +1,146 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # File : config.py
4
+ # License: MIT
5
+ # Author : Camille Scott <camille.scott.w@gmail.com>
6
+
7
+ from __future__ import annotations
8
+
9
+ from importlib.resources import files as _pkg_files
10
+ import os
11
+ from pathlib import Path
12
+ from typing import Literal, Self
13
+
14
+ import yaml
15
+ from pydantic import BaseModel, Field, ValidationError, model_validator
16
+
17
+
18
+ DEFAULT_MODELS = ['gpt-5.4-mini', 'gpt-5.4-nano-2026-03-17']
19
+ DEFAULT_MODEL = 'gpt-5.4-mini'
20
+
21
+ ReasoningLevel = Literal['low', 'medium', 'high', 'xhigh']
22
+ REASONING_LEVELS: tuple[ReasoningLevel, ...] = ('low', 'medium', 'high', 'xhigh')
23
+ DEFAULT_REASONING: ReasoningLevel = 'medium'
24
+
25
+
26
+ def _xdg_default_config_path() -> Path:
27
+ base = os.environ.get('XDG_CONFIG_HOME')
28
+ root = Path(base) if base else Path.home() / '.config'
29
+ return root / 'cryptic' / 'config.yaml'
30
+
31
+
32
+ def _packaged_prompt_text() -> str:
33
+ return (_pkg_files('cryptic') / 'prompts' / 'categorize.txt').read_text(encoding='utf-8').strip()
34
+
35
+
36
+ class OpenAICfg(BaseModel):
37
+ models: list[str] = Field(default_factory=lambda: list(DEFAULT_MODELS))
38
+ default_model: str = DEFAULT_MODEL
39
+ default_reasoning: ReasoningLevel = DEFAULT_REASONING
40
+
41
+ @model_validator(mode='after')
42
+ def _default_in_models(self) -> Self:
43
+ if self.default_model not in self.models:
44
+ raise ValueError(
45
+ f'default_model {self.default_model!r} is not in openai.models {self.models}'
46
+ )
47
+ return self
48
+
49
+
50
+ class PromptCfg(BaseModel):
51
+ path: Path | None = None
52
+ text: str | None = None
53
+
54
+ @model_validator(mode='after')
55
+ def _resolve(self) -> Self:
56
+ if self.path is not None and self.text is not None:
57
+ raise ValueError('prompt: set exactly one of `path` or `text`, not both')
58
+ if self.path is not None:
59
+ resolved = Path(self.path).expanduser().resolve()
60
+ self.text = resolved.read_text(encoding='utf-8').strip()
61
+ return self
62
+
63
+
64
+ class VaultCfg(BaseModel):
65
+ input_dir: Path
66
+ output_dir: Path
67
+ originals_dir: Path
68
+ name: str | None = None
69
+
70
+ @model_validator(mode='after')
71
+ def _expand(self) -> Self:
72
+ self.input_dir = Path(self.input_dir).expanduser().resolve()
73
+ self.output_dir = Path(self.output_dir).expanduser().resolve()
74
+ self.originals_dir = Path(self.originals_dir).expanduser().resolve()
75
+ return self
76
+
77
+
78
+ class ServiceCfg(BaseModel):
79
+ vaults: dict[str, VaultCfg]
80
+ max_concurrent: int = 3
81
+ max_tries: int = 3
82
+ pickup_delay_seconds: float = 3.0
83
+
84
+ @model_validator(mode='after')
85
+ def _check(self) -> Self:
86
+ if not self.vaults:
87
+ raise ValueError('service.vaults must define at least one vault')
88
+ seen: dict[Path, str] = {}
89
+ for name, vault in self.vaults.items():
90
+ vault.name = name
91
+ if vault.input_dir in seen:
92
+ raise ValueError(
93
+ f'vaults {seen[vault.input_dir]!r} and {name!r} share '
94
+ f'input_dir {vault.input_dir}; input_dirs must be distinct'
95
+ )
96
+ seen[vault.input_dir] = name
97
+ if self.max_concurrent < 1:
98
+ raise ValueError('service.max_concurrent must be >= 1')
99
+ if self.max_tries < 1:
100
+ raise ValueError('service.max_tries must be >= 1')
101
+ if self.pickup_delay_seconds < 0:
102
+ raise ValueError('service.pickup_delay_seconds must be >= 0')
103
+ return self
104
+
105
+
106
+ class AppConfig(BaseModel):
107
+ openai: OpenAICfg = Field(default_factory=OpenAICfg)
108
+ prompt: PromptCfg = Field(default_factory=PromptCfg)
109
+ service: ServiceCfg | None = None
110
+
111
+ @model_validator(mode='after')
112
+ def _default_prompt(self) -> Self:
113
+ if self.prompt.text is None and self.prompt.path is None:
114
+ self.prompt = PromptCfg(text=_packaged_prompt_text())
115
+ return self
116
+
117
+ @classmethod
118
+ def load(cls, path: Path | None) -> AppConfig:
119
+ if path is not None:
120
+ path = Path(path).expanduser().resolve()
121
+ if not path.exists():
122
+ raise FileNotFoundError(f'config file not found: {path}')
123
+ return cls._from_file(path)
124
+
125
+ default = _xdg_default_config_path()
126
+ if default.exists():
127
+ return cls._from_file(default)
128
+
129
+ return cls()
130
+
131
+ @classmethod
132
+ def _from_file(cls, path: Path) -> AppConfig:
133
+ with path.open('r', encoding='utf-8') as fp:
134
+ raw = yaml.safe_load(fp) or {}
135
+ try:
136
+ return cls.model_validate(raw)
137
+ except ValidationError as e:
138
+ raise ValueError(f'invalid config at {path}:\n{e}') from e
139
+
140
+ def require_service(self) -> ServiceCfg:
141
+ if self.service is None:
142
+ raise ValueError(
143
+ 'service config required: add a `service:` section to your config.yaml '
144
+ f'(default location: {_xdg_default_config_path()})'
145
+ )
146
+ return self.service