textprompts 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,29 @@
1
+ from .config import MetadataMode, get_metadata, set_metadata
2
+ from .errors import (
3
+ FileMissingError,
4
+ InvalidMetadataError,
5
+ MalformedHeaderError,
6
+ MissingMetadataError,
7
+ TextPromptsError,
8
+ )
9
+ from .loaders import load_prompt, load_prompts
10
+ from .models import Prompt, PromptMeta
11
+ from .safe_string import SafeString
12
+ from .savers import save_prompt
13
+
14
+ __all__ = [
15
+ "load_prompt",
16
+ "load_prompts",
17
+ "save_prompt",
18
+ "Prompt",
19
+ "PromptMeta",
20
+ "SafeString",
21
+ "MetadataMode",
22
+ "set_metadata",
23
+ "get_metadata",
24
+ "TextPromptsError",
25
+ "FileMissingError",
26
+ "MissingMetadataError",
27
+ "InvalidMetadataError",
28
+ "MalformedHeaderError",
29
+ ]
textprompts/_parser.py ADDED
@@ -0,0 +1,139 @@
1
+ import textwrap
2
+ from pathlib import Path
3
+ from typing import Optional
4
+
5
+ try:
6
+ import tomllib
7
+ except ImportError:
8
+ import tomli as tomllib # type: ignore[import-not-found, no-redef]
9
+
10
+ from .config import MetadataMode
11
+ from .errors import InvalidMetadataError, MalformedHeaderError, MissingMetadataError
12
+ from .models import Prompt, PromptMeta
13
+ from .safe_string import SafeString
14
+
15
+ DELIM = "---"
16
+
17
+
18
+ def _split_front_matter(text: str) -> tuple[Optional[str], str]:
19
+ """
20
+ Returns (header, body). Header may be None.
21
+
22
+ Strict parsing: only considers "---" at the very beginning of the file
23
+ (no leading whitespace) as valid front matter delimiter.
24
+ """
25
+ # Must start exactly with "---" (no leading whitespace)
26
+ if not text.startswith(DELIM):
27
+ return None, text
28
+
29
+ # Find second delimiter after the first
30
+ second_delim = text.find(DELIM, len(DELIM))
31
+ if second_delim == -1:
32
+ raise MalformedHeaderError("Missing closing delimiter '---' for front matter")
33
+
34
+ # Extract header and body
35
+ header = text[len(DELIM) : second_delim].strip()
36
+ body = text[second_delim + len(DELIM) :].lstrip("\n")
37
+
38
+ return header, body
39
+
40
+
41
+ def parse_file(path: Path, *, metadata_mode: MetadataMode) -> Prompt:
42
+ """
43
+ Parse a file according to the specified metadata mode.
44
+
45
+ Parameters
46
+ ----------
47
+ path : Path
48
+ The file to parse.
49
+ metadata_mode : MetadataMode
50
+ The metadata handling mode.
51
+ """
52
+
53
+ try:
54
+ raw = path.read_text(encoding="utf-8")
55
+ except UnicodeDecodeError as e:
56
+ from .errors import TextPromptsError
57
+
58
+ raise TextPromptsError(f"Cannot decode {path} as UTF-8: {e}") from e
59
+
60
+ # Handle IGNORE mode - treat entire file as body
61
+ if metadata_mode == MetadataMode.IGNORE:
62
+ ignore_meta = PromptMeta(title=path.stem)
63
+ return Prompt(path=path, meta=ignore_meta, body=SafeString(textwrap.dedent(raw)))
64
+
65
+ # For STRICT and ALLOW modes, try to parse front matter
66
+ try:
67
+ header_txt, body = _split_front_matter(raw)
68
+ except MalformedHeaderError as e:
69
+ # If parsing fails and file starts with "---", suggest using IGNORE mode
70
+ if raw.startswith(DELIM):
71
+ raise InvalidMetadataError(
72
+ f"{e}. If this file has no metadata and starts with '---', "
73
+ f"use meta=MetadataMode.IGNORE to skip metadata parsing."
74
+ ) from e
75
+ raise
76
+
77
+ meta: Optional[PromptMeta] = None
78
+ if header_txt is not None:
79
+ # We have front matter - parse it
80
+ try:
81
+ data = tomllib.loads(header_txt)
82
+
83
+ if metadata_mode == MetadataMode.STRICT:
84
+ # STRICT mode: require title, description, version fields and they must not be empty
85
+ required_fields = {"title", "description", "version"}
86
+ missing_fields = required_fields - set(data.keys())
87
+ if missing_fields:
88
+ raise InvalidMetadataError(
89
+ f"Missing required metadata fields: {', '.join(sorted(missing_fields))}. "
90
+ f"STRICT mode requires 'title', 'description', and 'version' fields. "
91
+ f"Use meta=MetadataMode.ALLOW for less strict validation."
92
+ )
93
+
94
+ # Check for empty required fields
95
+ empty_fields = [
96
+ field
97
+ for field in required_fields
98
+ if not data.get(field) or str(data.get(field)).strip() == ""
99
+ ]
100
+ if empty_fields:
101
+ raise InvalidMetadataError(
102
+ f"Empty required metadata fields: {', '.join(sorted(empty_fields))}. "
103
+ f"STRICT mode requires non-empty 'title', 'description', and 'version' fields. "
104
+ f"Use meta=MetadataMode.ALLOW for less strict validation."
105
+ )
106
+
107
+ # For both STRICT and ALLOW modes, validate the data structure
108
+ meta = PromptMeta.model_validate(data)
109
+
110
+ except tomllib.TOMLDecodeError as e:
111
+ raise InvalidMetadataError(
112
+ f"Invalid TOML in front matter: {e}. "
113
+ f"Use meta=MetadataMode.IGNORE to skip metadata parsing."
114
+ ) from e
115
+ except InvalidMetadataError:
116
+ raise
117
+ except Exception as e:
118
+ raise InvalidMetadataError(f"Invalid metadata: {e}") from e
119
+
120
+ else:
121
+ # No front matter found
122
+ if metadata_mode == MetadataMode.STRICT:
123
+ raise MissingMetadataError(
124
+ f"No metadata found in {path}. "
125
+ f"STRICT mode requires metadata with title, description, and version fields. "
126
+ f"Use meta=MetadataMode.ALLOW or meta=MetadataMode.IGNORE for less strict validation."
127
+ )
128
+ # ALLOW mode: create empty metadata
129
+ meta = PromptMeta()
130
+
131
+ # Always ensure we have metadata with a title
132
+ if not meta:
133
+ meta = PromptMeta()
134
+
135
+ # Use filename as title if not provided
136
+ if meta.title is None:
137
+ meta.title = path.stem
138
+
139
+ return Prompt(path=path, meta=meta, body=SafeString(textwrap.dedent(body)))
textprompts/cli.py ADDED
@@ -0,0 +1,31 @@
1
+ import argparse
2
+ import json
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ from .errors import TextPromptsError
7
+ from .loaders import load_prompt
8
+
9
+
10
+ def _make_parser() -> argparse.ArgumentParser:
11
+ p = argparse.ArgumentParser(description="Show prompt metadata/body")
12
+ p.add_argument("file", type=Path)
13
+ p.add_argument("--json", action="store_true", help="Print metadata as JSON")
14
+ return p
15
+
16
+
17
+ def main() -> None:
18
+ args = _make_parser().parse_args()
19
+ try:
20
+ prompt = load_prompt(args.file, meta="ignore")
21
+ if args.json:
22
+ print(json.dumps(prompt.meta.model_dump() if prompt.meta else {}, indent=2))
23
+ else:
24
+ print(prompt.body)
25
+ except TextPromptsError as e:
26
+ print(f"Error: {e}", file=sys.stderr)
27
+ sys.exit(1)
28
+
29
+
30
+ if __name__ == "__main__":
31
+ main()
textprompts/config.py ADDED
@@ -0,0 +1,110 @@
1
+ """
2
+ Global configuration for textprompts metadata handling.
3
+ """
4
+
5
+ from enum import Enum
6
+ from typing import Union
7
+
8
+
9
+ class MetadataMode(Enum):
10
+ """
11
+ Metadata handling modes for prompt loading.
12
+
13
+ Attributes
14
+ ----------
15
+ STRICT : MetadataMode
16
+ Requires metadata with title, description, version fields that are not empty.
17
+ Throws error if metadata is missing or any required field is empty.
18
+ ALLOW : MetadataMode
19
+ Loads any metadata that exists, fields can be empty or None.
20
+ Only throws error if TOML syntax is invalid.
21
+ IGNORE : MetadataMode
22
+ Doesn't parse metadata at all, treats entire file as prompt body.
23
+ Uses filename (without extension) as title.
24
+ """
25
+
26
+ STRICT = "strict"
27
+ ALLOW = "allow"
28
+ IGNORE = "ignore"
29
+
30
+
31
+ # Global configuration variable
32
+ _METADATA_MODE: MetadataMode = MetadataMode.IGNORE
33
+
34
+
35
+ def set_metadata(mode: Union[MetadataMode, str]) -> None:
36
+ """
37
+ Set the global metadata handling mode.
38
+
39
+ Parameters
40
+ ----------
41
+ mode : MetadataMode or str
42
+ The metadata handling mode to use globally.
43
+ Can be MetadataMode enum or string: "strict", "allow", or "ignore".
44
+
45
+ Examples
46
+ --------
47
+ >>> import textprompts
48
+ >>> textprompts.set_metadata(textprompts.MetadataMode.STRICT)
49
+ >>> textprompts.set_metadata("allow") # Also works with strings
50
+
51
+ Raises
52
+ ------
53
+ ValueError
54
+ If mode is not a valid MetadataMode or string.
55
+ """
56
+ global _METADATA_MODE
57
+
58
+ if isinstance(mode, str):
59
+ try:
60
+ mode = MetadataMode(mode.lower())
61
+ except ValueError:
62
+ valid_modes = [m.value for m in MetadataMode]
63
+ raise ValueError(
64
+ f"Invalid metadata mode: {mode}. Valid modes: {valid_modes}"
65
+ )
66
+
67
+ if not isinstance(mode, MetadataMode):
68
+ raise ValueError(f"Mode must be MetadataMode enum or string, got {type(mode)}")
69
+
70
+ _METADATA_MODE = mode
71
+
72
+
73
+ def get_metadata() -> MetadataMode:
74
+ """
75
+ Get the current global metadata handling mode.
76
+
77
+ Returns
78
+ -------
79
+ MetadataMode
80
+ The current global metadata handling mode.
81
+ """
82
+ return _METADATA_MODE
83
+
84
+
85
+ def _resolve_metadata_mode(meta: Union[MetadataMode, str, None]) -> MetadataMode:
86
+ """
87
+ Resolve the metadata mode from parameters and global config.
88
+
89
+ Priority order:
90
+ 1. meta parameter (if provided)
91
+ 2. global configuration
92
+
93
+ Parameters
94
+ ----------
95
+ meta : MetadataMode, str, or None
96
+ Explicit metadata mode override.
97
+
98
+ Returns
99
+ -------
100
+ MetadataMode
101
+ The resolved metadata mode to use.
102
+ """
103
+ # Priority 1: explicit meta parameter
104
+ if meta is not None:
105
+ if isinstance(meta, str):
106
+ return MetadataMode(meta.lower())
107
+ return meta
108
+
109
+ # Priority 2: global configuration
110
+ return _METADATA_MODE
textprompts/errors.py ADDED
@@ -0,0 +1,18 @@
1
+ from pathlib import Path
2
+
3
+
4
+ class TextPromptsError(Exception): ...
5
+
6
+
7
+ class FileMissingError(TextPromptsError):
8
+ def __init__(self, path: Path):
9
+ super().__init__(f"File not found: {path}")
10
+
11
+
12
+ class MissingMetadataError(TextPromptsError): ...
13
+
14
+
15
+ class InvalidMetadataError(TextPromptsError): ...
16
+
17
+
18
+ class MalformedHeaderError(TextPromptsError): ...
textprompts/loaders.py ADDED
@@ -0,0 +1,113 @@
1
+ from pathlib import Path
2
+ from typing import Iterable, Union
3
+
4
+ from ._parser import parse_file
5
+ from .config import MetadataMode, _resolve_metadata_mode
6
+ from .errors import FileMissingError
7
+ from .models import Prompt
8
+
9
+
10
+ def load_prompt(
11
+ path: Union[str, Path], *, meta: Union[MetadataMode, str, None] = None
12
+ ) -> Prompt:
13
+ """
14
+ Load a single prompt file.
15
+
16
+ Parameters
17
+ ----------
18
+ path : str | Path
19
+ File to load.
20
+ meta : MetadataMode, str, or None, default None
21
+ Metadata handling mode. Can be:
22
+ - MetadataMode.STRICT: Requires metadata with title, description, version (not empty)
23
+ - MetadataMode.ALLOW: Loads any metadata, can be empty, only errors on TOML parse failure
24
+ - MetadataMode.IGNORE: No metadata parsing, uses filename as title
25
+ - String: "strict", "allow", or "ignore"
26
+ - None: Use global configuration
27
+
28
+ Raises
29
+ ------
30
+ TextPromptsError subclasses on any failure.
31
+
32
+ Examples
33
+ --------
34
+ >>> # Using global configuration
35
+ >>> import textprompts
36
+ >>> textprompts.set_metadata(textprompts.MetadataMode.STRICT)
37
+ >>> prompt = textprompts.load_prompt("example.txt")
38
+
39
+ >>> # Override with parameter
40
+ >>> prompt = textprompts.load_prompt("example.txt", meta=textprompts.MetadataMode.ALLOW)
41
+ >>> prompt = textprompts.load_prompt("example.txt", meta="ignore") # String also works
42
+ """
43
+ fp = Path(path)
44
+ if not fp.is_file():
45
+ raise FileMissingError(fp)
46
+
47
+ # Resolve metadata mode from parameters and global config
48
+ mode = _resolve_metadata_mode(meta)
49
+
50
+ return parse_file(fp, metadata_mode=mode)
51
+
52
+
53
+ def load_prompts(
54
+ *paths: Union[str, Path],
55
+ recursive: bool = False,
56
+ glob: str = "*.txt",
57
+ meta: Union[MetadataMode, str, None] = None,
58
+ max_files: Union[int, None] = 1000,
59
+ ) -> list[Prompt]:
60
+ """
61
+ Convenience loader for many files / directories.
62
+
63
+ Parameters
64
+ ----------
65
+ *paths : str | Path
66
+ Files and directories to load.
67
+ recursive : bool, default False
68
+ If True, search directories recursively.
69
+ glob : str, default "*.txt"
70
+ Glob pattern for finding files in directories.
71
+ meta : MetadataMode, str, or None, default None
72
+ Metadata handling mode. Can be:
73
+ - MetadataMode.STRICT: Requires metadata with title, description, version (not empty)
74
+ - MetadataMode.ALLOW: Loads any metadata, can be empty, only errors on TOML parse failure
75
+ - MetadataMode.IGNORE: No metadata parsing, uses filename as title
76
+ - String: "strict", "allow", or "ignore"
77
+ - None: Use global configuration
78
+ max_files : int | None, default 1000
79
+ Maximum number of files to process. None for no limit.
80
+
81
+ Examples
82
+ --------
83
+ >>> # Using global configuration
84
+ >>> import textprompts
85
+ >>> textprompts.set_metadata(textprompts.MetadataMode.ALLOW)
86
+ >>> prompts = textprompts.load_prompts("prompts/", recursive=True)
87
+
88
+ >>> # Override with parameter
89
+ >>> prompts = textprompts.load_prompts("prompts/", meta="strict")
90
+ """
91
+ collected: list[Prompt] = []
92
+ file_count = 0
93
+
94
+ for p in paths:
95
+ pth = Path(p)
96
+ if pth.is_dir():
97
+ itr: Iterable[Path] = pth.rglob(glob) if recursive else pth.glob(glob)
98
+ for f in itr:
99
+ if max_files and file_count >= max_files:
100
+ from .errors import TextPromptsError
101
+
102
+ raise TextPromptsError(f"Exceeded max_files limit of {max_files}")
103
+ collected.append(load_prompt(f, meta=meta))
104
+ file_count += 1
105
+ else:
106
+ if max_files and file_count >= max_files:
107
+ from .errors import TextPromptsError
108
+
109
+ raise TextPromptsError(f"Exceeded max_files limit of {max_files}")
110
+ collected.append(load_prompt(pth, meta=meta))
111
+ file_count += 1
112
+
113
+ return collected
textprompts/models.py ADDED
@@ -0,0 +1,42 @@
1
+ from datetime import date
2
+ from pathlib import Path
3
+ from typing import Union
4
+
5
+ from pydantic import BaseModel, Field, field_validator
6
+
7
+ from .safe_string import SafeString
8
+
9
+
10
+ class PromptMeta(BaseModel):
11
+ title: Union[str, None] = Field(default=None, description="Human-readable name")
12
+ version: Union[str, None] = Field(default=None)
13
+ author: Union[str, None] = Field(default=None)
14
+ created: Union[date, None] = Field(default=None)
15
+ description: Union[str, None] = Field(default=None)
16
+
17
+
18
+ class Prompt(BaseModel):
19
+ path: Path
20
+ meta: Union[PromptMeta, None]
21
+ body: SafeString
22
+
23
+ @field_validator("body")
24
+ @classmethod
25
+ def body_not_empty(cls, v: str) -> SafeString:
26
+ if not v.strip():
27
+ raise ValueError("Prompt body is empty")
28
+ return SafeString(v)
29
+
30
+ def __repr__(self) -> str:
31
+ if self.meta and self.meta.title:
32
+ if self.meta.version:
33
+ return (
34
+ f"Prompt(title='{self.meta.title}', version='{self.meta.version}')"
35
+ )
36
+ else:
37
+ return f"Prompt(title='{self.meta.title}')"
38
+ else:
39
+ return f"Prompt(path='{self.path}')"
40
+
41
+ def __str__(self) -> str:
42
+ return str(self.body)
@@ -0,0 +1,138 @@
1
+ """
2
+ Utility functions for extracting and validating placeholders in format strings.
3
+
4
+ This module provides robust placeholder extraction and validation for SafeString
5
+ formatting operations.
6
+ """
7
+
8
+ import re
9
+ from typing import Any, Dict, Set, Tuple
10
+
11
+
12
+ def extract_placeholders(text: str) -> Set[str]:
13
+ """
14
+ Extract all placeholder names from a format string.
15
+
16
+ Handles various placeholder formats including:
17
+ - Named placeholders: {name}
18
+ - Positional placeholders: {0}, {1}
19
+ - Format specifiers: {value:02d}, {price:.2f}
20
+ - Ignores escaped braces: {{literal}}
21
+
22
+ Args:
23
+ text: The format string to extract placeholders from
24
+
25
+ Returns:
26
+ Set of placeholder names found in the string
27
+
28
+ Examples:
29
+ >>> extract_placeholders("Hello {name}!")
30
+ {'name'}
31
+ >>> extract_placeholders("Item {0}: {name} costs ${price:.2f}")
32
+ {'0', 'name', 'price'}
33
+ >>> extract_placeholders("No placeholders here")
34
+ set()
35
+ >>> extract_placeholders("Escaped {{braces}} but {real} placeholder")
36
+ {'real'}
37
+ """
38
+ # Replace escaped braces with temporary markers to avoid matching them
39
+ temp_text = text.replace("{{", "\x00ESCAPED_OPEN\x00").replace(
40
+ "}}", "\x00ESCAPED_CLOSE\x00"
41
+ )
42
+
43
+ # Find all placeholder patterns: {name}, {0}, {value:format}, {}
44
+ # Pattern explanation:
45
+ # \{ - literal opening brace
46
+ # ([^}:]*) - capture group 1: placeholder name (can be empty, stops at : or })
47
+ # (?::[^}]*)? - optional format specifier (non-capturing group)
48
+ # \} - literal closing brace
49
+ pattern = r"\{([^}:]*)(?::[^}]*)?\}"
50
+
51
+ matches = re.findall(pattern, temp_text)
52
+ return set(matches)
53
+
54
+
55
+ def validate_format_args(
56
+ placeholders: Set[str], args: Tuple[Any, ...], kwargs: Dict[str, Any], skip_validation: bool = False
57
+ ) -> None:
58
+ """
59
+ Validate that format arguments match the placeholders in the template.
60
+
61
+ Args:
62
+ placeholders: Set of placeholder names expected in the template
63
+ args: Positional arguments passed to format()
64
+ kwargs: Keyword arguments passed to format()
65
+ skip_validation: If True, skip all validation checks
66
+
67
+ Raises:
68
+ ValueError: If there are missing placeholders or validation fails
69
+
70
+ Examples:
71
+ >>> validate_format_args({'name'}, (), {'name': 'Alice'}) # OK
72
+ >>> validate_format_args({'name'}, (), {}) # Raises ValueError
73
+ >>> validate_format_args({'name'}, (), {}, skip_validation=True) # OK
74
+ """
75
+ if skip_validation:
76
+ return
77
+
78
+ # Convert positional args to keyword args using string indices
79
+ all_kwargs = kwargs.copy()
80
+ for i, arg in enumerate(args):
81
+ all_kwargs[str(i)] = arg
82
+
83
+ # Special handling for empty placeholders - they match positional args
84
+ # If we have an empty placeholder and positional args, match them
85
+ if "" in placeholders and args:
86
+ all_kwargs[""] = args[0]
87
+
88
+ # Check for missing placeholders
89
+ provided_keys = set(str(k) for k in all_kwargs.keys())
90
+ missing_keys = placeholders - provided_keys
91
+
92
+ if missing_keys:
93
+ raise ValueError(f"Missing format variables: {sorted(missing_keys)}")
94
+
95
+
96
+ def should_ignore_validation(ignore_flag: bool) -> bool:
97
+ """
98
+ Determine if placeholder validation should be ignored.
99
+
100
+ This is a simple utility function that could be extended in the future
101
+ to handle more complex validation logic or global settings.
102
+
103
+ Args:
104
+ ignore_flag: The _ignore_placeholders flag value
105
+
106
+ Returns:
107
+ True if validation should be bypassed, False otherwise
108
+ """
109
+ return ignore_flag
110
+
111
+
112
+ def get_placeholder_info(text: str) -> Dict[str, Any]:
113
+ """
114
+ Get detailed information about placeholders in a format string.
115
+
116
+ Args:
117
+ text: The format string to analyze
118
+
119
+ Returns:
120
+ Dictionary with placeholder analysis information
121
+
122
+ Examples:
123
+ >>> info = get_placeholder_info("Hello {name}, you have {count:d} items")
124
+ >>> info['count']
125
+ 2
126
+ >>> info['names']
127
+ {'name', 'count'}
128
+ """
129
+ placeholders = extract_placeholders(text)
130
+
131
+ return {
132
+ "count": len(placeholders),
133
+ "names": placeholders,
134
+ "has_positional": any(p.isdigit() for p in placeholders),
135
+ "has_named": any(not p.isdigit() for p in placeholders),
136
+ "is_mixed": any(p.isdigit() for p in placeholders)
137
+ and any(not p.isdigit() for p in placeholders),
138
+ }
textprompts/py.typed ADDED
File without changes
@@ -0,0 +1,110 @@
1
+ from typing import Any, Set
2
+
3
+ from pydantic import GetCoreSchemaHandler
4
+ from pydantic_core import core_schema
5
+
6
+ from .placeholder_utils import extract_placeholders, validate_format_args
7
+
8
+
9
+ class SafeString(str):
10
+ """
11
+ A string subclass that validates format() calls to ensure all placeholders are provided.
12
+
13
+ This prevents common errors where format variables are missing, making prompt templates
14
+ more reliable and easier to debug.
15
+
16
+ Attributes:
17
+ placeholders: Set of placeholder names found in the string
18
+ """
19
+
20
+ placeholders: Set[str]
21
+
22
+ def __new__(cls, value: str) -> "SafeString":
23
+ """Create a new SafeString instance with extracted placeholders."""
24
+ instance = str.__new__(cls, value)
25
+ instance.placeholders = extract_placeholders(value)
26
+ return instance
27
+
28
+ def format(self, *args: Any, **kwargs: Any) -> str:
29
+ """
30
+ Format the string with configurable validation behavior.
31
+
32
+ By default (skip_validation=False), this method validates that all placeholders
33
+ have corresponding values and raises ValueError if any are missing.
34
+
35
+ When skip_validation=True, it performs partial formatting, replacing only
36
+ the placeholders for which values are provided.
37
+
38
+ Args:
39
+ *args: Positional arguments for formatting
40
+ skip_validation: If True, perform partial formatting without validation
41
+ **kwargs: Keyword arguments for formatting
42
+
43
+ Returns:
44
+ The formatted string
45
+
46
+ Raises:
47
+ ValueError: If skip_validation=False and any placeholder is missing
48
+ """
49
+ skip_validation = kwargs.pop('skip_validation', False)
50
+ if skip_validation:
51
+ # Partial formatting - replace only available placeholders
52
+ return self._partial_format(*args, **kwargs)
53
+ else:
54
+ # Strict formatting - validate all placeholders are provided
55
+ validate_format_args(self.placeholders, args, kwargs, skip_validation=False)
56
+ return str.format(self, *args, **kwargs)
57
+
58
+ def _partial_format(self, *args: Any, **kwargs: Any) -> str:
59
+ """
60
+ Perform partial formatting, replacing only the placeholders that have values.
61
+
62
+ Args:
63
+ *args: Positional arguments for formatting
64
+ **kwargs: Keyword arguments for formatting
65
+
66
+ Returns:
67
+ The partially formatted string
68
+ """
69
+ # Convert positional args to keyword args
70
+ all_kwargs = kwargs.copy()
71
+ for i, arg in enumerate(args):
72
+ all_kwargs[str(i)] = arg
73
+
74
+ # Build a format string with only available placeholders
75
+ result = str(self)
76
+
77
+ # Replace placeholders one by one if they have values
78
+ for placeholder in self.placeholders:
79
+ if placeholder in all_kwargs:
80
+ # Create a single-placeholder format string
81
+ placeholder_pattern = f"{{{placeholder}}}"
82
+ if placeholder_pattern in result:
83
+ try:
84
+ # Replace this specific placeholder
85
+ result = result.replace(
86
+ placeholder_pattern, str(all_kwargs[placeholder])
87
+ )
88
+ except (KeyError, ValueError):
89
+ # If replacement fails, leave the placeholder as is
90
+ pass
91
+
92
+ return result
93
+
94
+ def __str__(self) -> str:
95
+ """Return the string representation."""
96
+ return str.__str__(self)
97
+
98
+ def __repr__(self) -> str:
99
+ """Return the string representation for debugging."""
100
+ return f"SafeString({str.__repr__(self)}, placeholders={self.placeholders})"
101
+
102
+ @classmethod
103
+ def __get_pydantic_core_schema__(
104
+ cls, source_type: Any, handler: GetCoreSchemaHandler
105
+ ) -> core_schema.CoreSchema:
106
+ """Support for Pydantic v2 schema generation."""
107
+ return core_schema.no_info_after_validator_function(
108
+ cls,
109
+ core_schema.str_schema(),
110
+ )
textprompts/savers.py ADDED
@@ -0,0 +1,66 @@
1
+ from pathlib import Path
2
+ from typing import Union
3
+
4
+ from .models import Prompt, PromptMeta
5
+
6
+
7
+ def save_prompt(path: Union[str, Path], content: Union[str, Prompt]) -> None:
8
+ """
9
+ Save a prompt to a file.
10
+
11
+ Parameters
12
+ ----------
13
+ path : str | Path
14
+ File path to save the prompt to.
15
+ content : str | Prompt
16
+ Either a string (prompt text only) or a Prompt object with metadata.
17
+ If a string is provided, a template with required metadata fields will be created.
18
+
19
+ Examples
20
+ --------
21
+ >>> # Save a simple prompt with metadata template
22
+ >>> save_prompt("my_prompt.txt", "You are a helpful assistant.")
23
+
24
+ >>> # Save a Prompt object with full metadata
25
+ >>> prompt = Prompt(
26
+ ... path=Path("my_prompt.txt"),
27
+ ... meta=PromptMeta(title="Assistant", version="1.0.0", description="A helpful AI"),
28
+ ... body="You are a helpful assistant."
29
+ ... )
30
+ >>> save_prompt("my_prompt.txt", prompt)
31
+ """
32
+ path = Path(path)
33
+
34
+ if isinstance(content, str):
35
+ # Create template with required fields
36
+ template = f"""---
37
+ title = ""
38
+ description = ""
39
+ version = ""
40
+ ---
41
+
42
+ {content}"""
43
+ path.write_text(template, encoding="utf-8")
44
+ elif isinstance(content, Prompt):
45
+ # Build the front matter
46
+ lines = ["---"]
47
+
48
+ # Always include required fields
49
+ meta = content.meta or PromptMeta()
50
+ lines.append(f'title = "{meta.title or ""}"')
51
+ lines.append(f'description = "{meta.description or ""}"')
52
+ lines.append(f'version = "{meta.version or ""}"')
53
+
54
+ # Include optional fields if present
55
+ if meta.author:
56
+ lines.append(f'author = "{meta.author}"')
57
+ if meta.created:
58
+ lines.append(f'created = "{meta.created.isoformat()}"')
59
+
60
+ lines.append("---")
61
+ lines.append("")
62
+ lines.append(str(content.body))
63
+
64
+ path.write_text("\n".join(lines), encoding="utf-8")
65
+ else:
66
+ raise TypeError(f"content must be str or Prompt, not {type(content).__name__}")
@@ -0,0 +1,493 @@
1
+ Metadata-Version: 2.4
2
+ Name: textprompts
3
+ Version: 0.0.1
4
+ Summary: Minimal text-based prompt-loader with TOML front-matter
5
+ Keywords: prompts,toml,frontmatter,template
6
+ Author: Jan Siml
7
+ Author-email: Jan Siml <49557684+svilupp@users.noreply.github.com>
8
+ License-Expression: MIT
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
17
+ Requires-Dist: pydantic~=2.7
18
+ Requires-Dist: tomli>=1.0.0 ; python_full_version < '3.11'
19
+ Requires-Python: >=3.11
20
+ Project-URL: Bug Tracker, https://github.com/svilupp/textprompts/issues
21
+ Project-URL: Documentation, https://github.com/svilupp/textprompts#readme
22
+ Project-URL: Homepage, https://github.com/svilupp/textprompts
23
+ Description-Content-Type: text/markdown
24
+
25
+ # textprompts
26
+
27
+ > **So simple, it's not even worth vibing about coding yet it just makes so much sense.**
28
+
29
+ Are you tired of vendors trying to sell you fancy UIs for prompt management that just make your system more confusing and harder to debug? Isn't it nice to just have your prompts **next to your code**?
30
+
31
+ But then you worry: *Did my formatter change my prompt? Are those spaces at the beginning actually part of the prompt or just indentation?*
32
+
33
+ **textprompts** solves this elegantly: treat your prompts as **text files** and keep your linters and formatters away from them.
34
+
35
+ ## Why textprompts?
36
+
37
+ - ✅ **Prompts live next to your code** - no external systems to manage
38
+ - ✅ **Git is your version control** - diff, branch, and experiment with ease
39
+ - ✅ **No formatter headaches** - your prompts stay exactly as you wrote them
40
+ - ✅ **Minimal markup** - just TOML front-matter when you need metadata (or no metadata if you prefer!)
41
+ - ✅ **Zero dependencies** - well, almost (just Pydantic)
42
+ - ✅ **Safe formatting** - catch missing variables before they cause problems
43
+ - ✅ **Works with everything** - OpenAI, Anthropic, local models, function calls
44
+
45
+ ## Installation
46
+
47
+ ```bash
48
+ uv add textprompts # or pip install textprompts
49
+ ```
50
+
51
+ ## Quick Start
52
+
53
+ **Super simple by default** - TextPrompts just loads text files with optional metadata:
54
+
55
+ 1. **Create a prompt file** (`greeting.txt`):
56
+ ```
57
+ ---
58
+ title = "Customer Greeting"
59
+ version = "1.0.0"
60
+ description = "Friendly greeting for customer support"
61
+ ---
62
+
63
+ Hello {customer_name}!
64
+
65
+ Welcome to {company_name}. We're here to help you with {issue_type}.
66
+
67
+ Best regards,
68
+ {agent_name}
69
+ ```
70
+
71
+ 2. **Load and use it** (no configuration needed):
72
+ ```python
73
+ import textprompts
74
+
75
+ # Just load it - works with or without metadata
76
+ prompt = textprompts.load_prompt("greeting.txt")
77
+
78
+ # Use it safely - all placeholders must be provided
79
+ message = prompt.body.format(
80
+ customer_name="Alice",
81
+ company_name="ACME Corp",
82
+ issue_type="billing question",
83
+ agent_name="Sarah"
84
+ )
85
+
86
+ print(message)
87
+
88
+ # Or use partial formatting when needed
89
+ partial = prompt.body.format(
90
+ customer_name="Alice",
91
+ company_name="ACME Corp",
92
+ skip_validation=True
93
+ )
94
+ # Result: "Hello Alice!\n\nWelcome to ACME Corp. We're here to help you with {issue_type}.\n\nBest regards,\n{agent_name}"
95
+ ```
96
+
97
+ **Even simpler** - no metadata required:
98
+ ```python
99
+ # simple_prompt.txt contains just: "Analyze this data: {data}"
100
+ prompt = textprompts.load_prompt("simple_prompt.txt") # Just works!
101
+ result = prompt.body.format(data="sales figures")
102
+ ```
103
+
104
+ ## Core Features
105
+
106
+ ### Safe String Formatting
107
+
108
+ Never ship a prompt with missing variables again:
109
+
110
+ ```python
111
+ from textprompts import SafeString
112
+
113
+ template = SafeString("Hello {name}, your order {order_id} is {status}")
114
+
115
+ # ✅ Strict formatting - all placeholders must be provided
116
+ result = template.format(name="Alice", order_id="12345", status="shipped")
117
+
118
+ # ❌ This catches the error by default
119
+ try:
120
+ result = template.format(name="Alice") # Missing order_id and status
121
+ except ValueError as e:
122
+ print(f"Error: {e}") # Missing format variables: ['order_id', 'status']
123
+
124
+ # ✅ Partial formatting - replace only what you have
125
+ partial = template.format(name="Alice", skip_validation=True)
126
+ print(partial) # "Hello Alice, your order {order_id} is {status}"
127
+ ```
128
+
129
+ ### Bulk Loading
130
+
131
+ Load entire directories of prompts:
132
+
133
+ ```python
134
+ from textprompts import load_prompts
135
+
136
+ # Load all prompts from a directory
137
+ prompts = load_prompts("prompts/", recursive=True)
138
+
139
+ # Create a lookup
140
+ prompt_dict = {p.meta.title: p for p in prompts if p.meta}
141
+ greeting = prompt_dict["Customer Greeting"]
142
+ ```
143
+
144
+ ### Simple & Flexible Metadata Handling
145
+
146
+ TextPrompts is designed to be **super simple** by default - just load text files with optional metadata when available. No configuration needed!
147
+
148
+ ```python
149
+ import textprompts
150
+
151
+ # Default behavior: load metadata if available, otherwise just use the file content
152
+ prompt = textprompts.load_prompt("my_prompt.txt") # Just works!
153
+
154
+ # Three modes available for different use cases:
155
+ # 1. IGNORE (default): Treat as simple text file, use filename as title
156
+ textprompts.set_metadata("ignore") # Super simple file loading
157
+ prompt = textprompts.load_prompt("prompt.txt") # No metadata parsing
158
+ print(prompt.meta.title) # "prompt" (from filename)
159
+
160
+ # 2. ALLOW: Load metadata if present, don't worry if it's incomplete
161
+ textprompts.set_metadata("allow") # Flexible metadata loading
162
+ prompt = textprompts.load_prompt("prompt.txt") # Loads any metadata found
163
+
164
+ # 3. STRICT: Require complete metadata for production use
165
+ textprompts.set_metadata("strict") # Prevent errors in production
166
+ prompt = textprompts.load_prompt("prompt.txt") # Must have title, description, version
167
+
168
+ # Override per prompt when needed
169
+ prompt = textprompts.load_prompt("prompt.txt", meta="strict")
170
+ ```
171
+
172
+ **Why this design?**
173
+ - **Default = Simple**: No configuration needed, just load files
174
+ - **Flexible**: Add metadata when you want structure
175
+ - **Production-Safe**: Use strict mode to catch missing metadata before deployment
176
+
177
+ ## Real-World Examples
178
+
179
+ ### OpenAI Integration
180
+
181
+ ```python
182
+ import openai
183
+ from textprompts import load_prompt
184
+
185
+ system_prompt = load_prompt("prompts/customer_support_system.txt")
186
+ user_prompt = load_prompt("prompts/user_query_template.txt")
187
+
188
+ response = openai.chat.completions.create(
189
+ model="gpt-4.1-mini",
190
+ messages=[
191
+ {
192
+ "role": "system",
193
+ "content": system_prompt.body.format(
194
+ company_name="ACME Corp",
195
+ support_level="premium"
196
+ )
197
+ },
198
+ {
199
+ "role": "user",
200
+ "content": user_prompt.body.format(
201
+ query="How do I return an item?",
202
+ customer_tier="premium"
203
+ )
204
+ }
205
+ ]
206
+ )
207
+ ```
208
+
209
+ ### Function Calling (Tool Definitions)
210
+
211
+ Yes, you can version control your function schemas too:
212
+
213
+ ```python
214
+ # tools/search_products.txt
215
+ ---
216
+ title = "Product Search Tool"
217
+ version = "2.1.0"
218
+ description = "Search our product catalog"
219
+ ---
220
+
221
+ {
222
+ "type": "function",
223
+ "function": {
224
+ "name": "search_products",
225
+ "description": "Search for products in our catalog",
226
+ "parameters": {
227
+ "type": "object",
228
+ "properties": {
229
+ "query": {
230
+ "type": "string",
231
+ "description": "Search query for products"
232
+ },
233
+ "category": {
234
+ "type": "string",
235
+ "enum": ["electronics", "clothing", "books"],
236
+ "description": "Product category to search within"
237
+ },
238
+ "max_results": {
239
+ "type": "integer",
240
+ "default": 10,
241
+ "description": "Maximum number of results to return"
242
+ }
243
+ },
244
+ "required": ["query"]
245
+ }
246
+ }
247
+ }
248
+ ```
249
+
250
+ ```python
251
+ import json
252
+ from textprompts import load_prompt
253
+
254
+ # Load and parse the tool definition
255
+ tool_prompt = load_prompt("tools/search_products.txt")
256
+ tool_schema = json.loads(tool_prompt.body)
257
+
258
+ # Use with OpenAI
259
+ response = openai.chat.completions.create(
260
+ model="gpt-4.1-mini",
261
+ messages=[{"role": "user", "content": "Find me some electronics"}],
262
+ tools=[tool_schema]
263
+ )
264
+ ```
265
+
266
+ ### Environment-Specific Prompts
267
+
268
+ ```python
269
+ import os
270
+ from textprompts import load_prompt
271
+
272
+ env = os.getenv("ENVIRONMENT", "development")
273
+ system_prompt = load_prompt(f"prompts/{env}/system.txt")
274
+
275
+ # prompts/development/system.txt - verbose logging
276
+ # prompts/production/system.txt - concise responses
277
+ ```
278
+
279
+ ### Prompt Versioning & Experimentation
280
+
281
+ ```python
282
+ from textprompts import load_prompt
283
+
284
+ # Easy A/B testing
285
+ prompt_version = "v2" # or "v1", "experimental", etc.
286
+ prompt = load_prompt(f"prompts/{prompt_version}/system.txt")
287
+
288
+ # Git handles the rest:
289
+ # git checkout experiment-branch
290
+ # git diff main -- prompts/
291
+ ```
292
+
293
+ ## File Format
294
+
295
+ TextPrompts uses TOML front-matter (optional) followed by your prompt content:
296
+
297
+ ```
298
+ ---
299
+ title = "My Prompt"
300
+ version = "1.0.0"
301
+ author = "Your Name"
302
+ description = "What this prompt does"
303
+ created = "2024-01-15"
304
+ tags = ["customer-support", "greeting"]
305
+ ---
306
+
307
+ Your prompt content goes here.
308
+
309
+ Use {variables} for templating.
310
+ ```
311
+
312
+ ### Metadata Modes
313
+
314
+ Choose the right level of strictness for your use case:
315
+
316
+ 1. **IGNORE** (default) - Simple text file loading, filename becomes title
317
+ 2. **ALLOW** - Load metadata if present, don't worry about completeness
318
+ 3. **STRICT** - Require complete metadata (title, description, version) for production safety
319
+
320
+ ```python
321
+ # Set globally
322
+ textprompts.set_metadata("ignore") # Default: simple file loading
323
+ textprompts.set_metadata("allow") # Flexible: load any metadata
324
+ textprompts.set_metadata("strict") # Production: require complete metadata
325
+
326
+ # Or override per prompt
327
+ prompt = textprompts.load_prompt("file.txt", meta="strict")
328
+ ```
329
+
330
+ ## API Reference
331
+
332
+ ### `load_prompt(path, *, meta=None)`
333
+
334
+ Load a single prompt file.
335
+
336
+ - `path`: Path to the prompt file
337
+ - `meta`: Metadata handling mode - `MetadataMode.STRICT`, `MetadataMode.ALLOW`, `MetadataMode.IGNORE`, or string equivalents. None uses global config.
338
+
339
+ Returns a `Prompt` object with:
340
+ - `prompt.meta`: Metadata from TOML front-matter (always present)
341
+ - `prompt.body`: The prompt content as a `SafeString`
342
+ - `prompt.path`: Path to the original file
343
+
344
+ ### `load_prompts(*paths, recursive=False, glob="*.txt", meta=None, max_files=1000)`
345
+
346
+ Load multiple prompts from files or directories.
347
+
348
+ - `*paths`: Files or directories to load
349
+ - `recursive`: Search directories recursively (default: False)
350
+ - `glob`: File pattern to match (default: "*.txt")
351
+ - `meta`: Metadata handling mode - `MetadataMode.STRICT`, `MetadataMode.ALLOW`, `MetadataMode.IGNORE`, or string equivalents. None uses global config.
352
+ - `max_files`: Maximum files to process (default: 1000)
353
+
354
+ ### `set_metadata(mode)` / `get_metadata()`
355
+
356
+ Set or get the global metadata handling mode.
357
+
358
+ - `mode`: `MetadataMode.STRICT`, `MetadataMode.ALLOW`, `MetadataMode.IGNORE`, or string equivalents
359
+
360
+ ```python
361
+ import textprompts
362
+
363
+ # Set global mode
364
+ textprompts.set_metadata(textprompts.MetadataMode.STRICT)
365
+ textprompts.set_metadata("allow") # String also works
366
+
367
+ # Get current mode
368
+ current_mode = textprompts.get_metadata()
369
+ ```
370
+
371
+ ### `save_prompt(path, content)`
372
+
373
+ Save a prompt to a file.
374
+
375
+ - `path`: Path to save the prompt file
376
+ - `content`: Either a string (creates template with required fields) or a `Prompt` object
377
+
378
+ ```python
379
+ from textprompts import save_prompt
380
+
381
+ # Save a simple prompt with metadata template
382
+ save_prompt("my_prompt.txt", "You are a helpful assistant.")
383
+
384
+ # Save a Prompt object with full metadata
385
+ save_prompt("my_prompt.txt", prompt_object)
386
+ ```
387
+
388
+ ### `SafeString`
389
+
390
+ A string subclass that validates `format()` calls:
391
+
392
+ ```python
393
+ from textprompts import SafeString
394
+
395
+ template = SafeString("Hello {name}, you are {role}")
396
+
397
+ # Strict formatting (default) - all placeholders required
398
+ result = template.format(name="Alice", role="admin") # ✅ Works
399
+ result = template.format(name="Alice") # ❌ Raises ValueError
400
+
401
+ # Partial formatting - replace only available placeholders
402
+ partial = template.format(name="Alice", skip_validation=True) # ✅ "Hello Alice, you are {role}"
403
+
404
+ # Access placeholder information
405
+ print(template.placeholders) # {'name', 'role'}
406
+ ```
407
+
408
+ ## Error Handling
409
+
410
+ TextPrompts provides specific exception types:
411
+
412
+ ```python
413
+ from textprompts import (
414
+ TextPromptsError, # Base exception
415
+ FileMissingError, # File not found
416
+ MissingMetadataError, # No TOML front-matter when required
417
+ InvalidMetadataError, # Invalid TOML syntax
418
+ MalformedHeaderError, # Malformed front-matter structure
419
+ MetadataMode, # Metadata handling mode enum
420
+ set_metadata, # Set global metadata mode
421
+ get_metadata # Get global metadata mode
422
+ )
423
+ ```
424
+
425
+ ## CLI Tool
426
+
427
+ TextPrompts includes a CLI for quick prompt inspection:
428
+
429
+ ```bash
430
+ # View a single prompt
431
+ textprompts show greeting.txt
432
+
433
+ # List all prompts in a directory
434
+ textprompts list prompts/ --recursive
435
+
436
+ # Validate prompts
437
+ textprompts validate prompts/
438
+ ```
439
+
440
+ ## Best Practices
441
+
442
+ 1. **Organize by purpose**: Group related prompts in folders
443
+ ```
444
+ prompts/
445
+ ├── customer-support/
446
+ ├── content-generation/
447
+ └── code-review/
448
+ ```
449
+
450
+ 2. **Use semantic versioning**: Version your prompts like code
451
+ ```
452
+ version = "1.2.0" # major.minor.patch
453
+ ```
454
+
455
+ 3. **Document your variables**: List expected variables in descriptions
456
+ ```
457
+ description = "Requires: customer_name, issue_type, agent_name"
458
+ ```
459
+
460
+ 4. **Test your prompts**: Write unit tests for critical prompts
461
+ ```python
462
+ def test_greeting_prompt():
463
+ prompt = load_prompt("greeting.txt")
464
+ result = prompt.body.format(customer_name="Test")
465
+ assert "Test" in result
466
+ ```
467
+
468
+ 5. **Use environment-specific prompts**: Different prompts for dev/prod
469
+ ```python
470
+ env = os.getenv("ENV", "development")
471
+ prompt = load_prompt(f"prompts/{env}/system.txt")
472
+ ```
473
+
474
+ ## Why Not Just Use String Templates?
475
+
476
+ You could, but then you lose:
477
+ - **Metadata tracking** (versions, authors, descriptions)
478
+ - **Safe formatting** (catch missing variables)
479
+ - **Organized storage** (searchable, documentable)
480
+ - **Version control benefits** (proper diffs, blame, history)
481
+ - **Tooling support** (CLI, validation, testing)
482
+
483
+ ## Contributing
484
+
485
+ We welcome contributions! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
486
+
487
+ ## License
488
+
489
+ MIT License - see [LICENSE](LICENSE) for details.
490
+
491
+ ---
492
+
493
+ **textprompts** - Because your prompts deserve better than being buried in code strings. 🚀
@@ -0,0 +1,15 @@
1
+ textprompts/__init__.py,sha256=60050c36585fc2a0efb2a00832304f65614a9d521e95b3f1424aee3095608360,676
2
+ textprompts/_parser.py,sha256=95f6bfd6ff904f6ce301d1b480197fd1b6167c3f8bc67756955304ef2578c644,5159
3
+ textprompts/cli.py,sha256=3e69ba206767319db597d2c742233786c5d2f6a473a008e47e5cd42b94639b85,810
4
+ textprompts/config.py,sha256=9f7fad3b30e5033b85ec9f54dd9afa5746157e8b788c91b7392f0dbf703611af,2846
5
+ textprompts/errors.py,sha256=7eda4a1bdf4ee8a50b420886d2016a52923baa05a5b5a65d6f582e3e500290d2,354
6
+ textprompts/loaders.py,sha256=d30719e53faa4c5870955e3fea5050e32e3b2381def69978256922fce6fe259b,3895
7
+ textprompts/models.py,sha256=d200b9f90936c19c8d1c8cd1c2deead61f8459cab776e2b7555311dad87372bd,1241
8
+ textprompts/placeholder_utils.py,sha256=7f362bbf8cf865a2812310f5b73abe7a6107b048381da7a972698751334e85b3,4461
9
+ textprompts/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
10
+ textprompts/safe_string.py,sha256=55d537f3ef8b57c20e6b8442fe972a990ab55ae69bdf49200aad03819946a0f1,4048
11
+ textprompts/savers.py,sha256=4afb10e4b1e2189eb39504349e7e6414c0d7b16e61cbb7f9332a36dad3ebbd50,2036
12
+ textprompts-0.0.1.dist-info/WHEEL,sha256=607c46fee47e440c91332c738096ff0f5e54ca3b0818ee85462dd5172a38e793,79
13
+ textprompts-0.0.1.dist-info/entry_points.txt,sha256=f8f14b032092a81e77431911104853b39293c983c9390aa11fe023e8bcd5c049,54
14
+ textprompts-0.0.1.dist-info/METADATA,sha256=7ce0ad8eb5a7464379486174fb4619d12e51775236f82e047bd596e2993e3d38,14667
15
+ textprompts-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.7.19
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ textprompts = textprompts.cli:main
3
+