textprompts 0.0.2__tar.gz → 0.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: textprompts
3
- Version: 0.0.2
3
+ Version: 0.0.4
4
4
  Summary: Minimal text-based prompt-loader with TOML front-matter
5
5
  Keywords: prompts,toml,frontmatter,template
6
6
  Author: Jan Siml
@@ -24,9 +24,16 @@ Description-Content-Type: text/markdown
24
24
 
25
25
  # textprompts
26
26
 
27
+ [![PyPI version](https://img.shields.io/pypi/v/textprompts.svg)](https://pypi.org/project/textprompts/)
28
+ [![Python versions](https://img.shields.io/pypi/pyversions/textprompts.svg)](https://pypi.org/project/textprompts/)
29
+ [![CI status](https://github.com/svilupp/textprompts/workflows/CI/badge.svg)](https://github.com/svilupp/textprompts/actions)
30
+ [![Coverage](https://img.shields.io/codecov/c/github/svilupp/textprompts)](https://codecov.io/gh/svilupp/textprompts)
31
+ [![License](https://img.shields.io/pypi/l/textprompts.svg)](https://github.com/svilupp/textprompts/blob/main/LICENSE)
32
+
33
+
27
34
  > **So simple, it's not even worth vibing about coding yet it just makes so much sense.**
28
35
 
29
- Are you tired of vendors trying to sell you fancy UIs for prompt management that just make your system more confusing and harder to debug? Isn't it nice to just have your prompts **next to your code**?
36
+ Are you tired of vendors trying to sell you fancy UIs for prompt management that just make your system more confusing and harder to debug? Isn't it nice to just have your prompts **next to your code**?
30
37
 
31
38
  But then you worry: *Did my formatter change my prompt? Are those spaces at the beginning actually part of the prompt or just indentation?*
32
39
 
@@ -35,7 +42,7 @@ But then you worry: *Did my formatter change my prompt? Are those spaces at the
35
42
  ## Why textprompts?
36
43
 
37
44
  - ✅ **Prompts live next to your code** - no external systems to manage
38
- - ✅ **Git is your version control** - diff, branch, and experiment with ease
45
+ - ✅ **Git is your version control** - diff, branch, and experiment with ease
39
46
  - ✅ **No formatter headaches** - your prompts stay exactly as you wrote them
40
47
  - ✅ **Minimal markup** - just TOML front-matter when you need metadata (or no metadata if you prefer!)
41
48
  - ✅ **Zero dependencies** - well, almost (just Pydantic)
@@ -59,7 +66,6 @@ title = "Customer Greeting"
59
66
  version = "1.0.0"
60
67
  description = "Friendly greeting for customer support"
61
68
  ---
62
-
63
69
  Hello {customer_name}!
64
70
 
65
71
  Welcome to {company_name}. We're here to help you with {issue_type}.
@@ -74,11 +80,13 @@ import textprompts
74
80
 
75
81
  # Just load it - works with or without metadata
76
82
  prompt = textprompts.load_prompt("greeting.txt")
83
+ # Or simply
84
+ alt = textprompts.Prompt.from_path("greeting.txt")
77
85
 
78
86
  # Use it safely - all placeholders must be provided
79
87
  message = prompt.prompt.format(
80
88
  customer_name="Alice",
81
- company_name="ACME Corp",
89
+ company_name="ACME Corp",
82
90
  issue_type="billing question",
83
91
  agent_name="Sarah"
84
92
  )
@@ -161,7 +169,7 @@ prompt = textprompts.load_prompt("prompt.txt") # No metadata parsing
161
169
  print(prompt.meta.title) # "prompt" (from filename)
162
170
 
163
171
  # 2. ALLOW: Load metadata if present, don't worry if it's incomplete
164
- textprompts.set_metadata("allow") # Flexible metadata loading
172
+ textprompts.set_metadata("allow") # Flexible metadata loading
165
173
  prompt = textprompts.load_prompt("prompt.txt") # Loads any metadata found
166
174
 
167
175
  # 3. STRICT: Require complete metadata for production use
@@ -174,7 +182,7 @@ prompt = textprompts.load_prompt("prompt.txt", meta="strict")
174
182
 
175
183
  **Why this design?**
176
184
  - **Default = Simple**: No configuration needed, just load files
177
- - **Flexible**: Add metadata when you want structure
185
+ - **Flexible**: Add metadata when you want structure
178
186
  - **Production-Safe**: Use strict mode to catch missing metadata before deployment
179
187
 
180
188
  ## Real-World Examples
@@ -199,7 +207,7 @@ response = openai.chat.completions.create(
199
207
  )
200
208
  },
201
209
  {
202
- "role": "user",
210
+ "role": "user",
203
211
  "content": user_prompt.prompt.format(
204
212
  query="How do I return an item?",
205
213
  customer_tier="premium"
@@ -220,7 +228,6 @@ title = "Product Search Tool"
220
228
  version = "2.1.0"
221
229
  description = "Search our product catalog"
222
230
  ---
223
-
224
231
  {
225
232
  "type": "function",
226
233
  "function": {
@@ -234,7 +241,7 @@ description = "Search our product catalog"
234
241
  "description": "Search query for products"
235
242
  },
236
243
  "category": {
237
- "type": "string",
244
+ "type": "string",
238
245
  "enum": ["electronics", "clothing", "books"],
239
246
  "description": "Product category to search within"
240
247
  },
@@ -306,7 +313,6 @@ description = "What this prompt does"
306
313
  created = "2024-01-15"
307
314
  tags = ["customer-support", "greeting"]
308
315
  ---
309
-
310
316
  Your prompt content goes here.
311
317
 
312
318
  Use {variables} for templating.
@@ -317,13 +323,17 @@ Use {variables} for templating.
317
323
  Choose the right level of strictness for your use case:
318
324
 
319
325
  1. **IGNORE** (default) - Simple text file loading, filename becomes title
320
- 2. **ALLOW** - Load metadata if present, don't worry about completeness
326
+ 2. **ALLOW** - Load metadata if present, don't worry about completeness
321
327
  3. **STRICT** - Require complete metadata (title, description, version) for production safety
322
328
 
329
+ You can also set the environment variable `TEXTPROMPTS_METADATA_MODE` to one of
330
+ `strict`, `allow`, or `ignore` before importing the library to configure the
331
+ default mode.
332
+
323
333
  ```python
324
334
  # Set globally
325
335
  textprompts.set_metadata("ignore") # Default: simple file loading
326
- textprompts.set_metadata("allow") # Flexible: load any metadata
336
+ textprompts.set_metadata("allow") # Flexible: load any metadata
327
337
  textprompts.set_metadata("strict") # Production: require complete metadata
328
338
 
329
339
  # Or override per prompt
@@ -401,7 +411,7 @@ template = PromptString("Hello {name}, you are {role}")
401
411
  result = template.format(name="Alice", role="admin") # ✅ Works
402
412
  result = template.format(name="Alice") # ❌ Raises ValueError
403
413
 
404
- # Partial formatting - replace only available placeholders
414
+ # Partial formatting - replace only available placeholders
405
415
  partial = template.format(name="Alice", skip_validation=True) # ✅ "Hello Alice, you are {role}"
406
416
 
407
417
  # Access placeholder information
@@ -493,4 +503,4 @@ MIT License - see [LICENSE](LICENSE) for details.
493
503
 
494
504
  ---
495
505
 
496
- **textprompts** - Because your prompts deserve better than being buried in code strings. 🚀
506
+ **textprompts** - Because your prompts deserve better than being buried in code strings. 🚀
@@ -1,8 +1,15 @@
1
1
  # textprompts
2
2
 
3
+ [![PyPI version](https://img.shields.io/pypi/v/textprompts.svg)](https://pypi.org/project/textprompts/)
4
+ [![Python versions](https://img.shields.io/pypi/pyversions/textprompts.svg)](https://pypi.org/project/textprompts/)
5
+ [![CI status](https://github.com/svilupp/textprompts/workflows/CI/badge.svg)](https://github.com/svilupp/textprompts/actions)
6
+ [![Coverage](https://img.shields.io/codecov/c/github/svilupp/textprompts)](https://codecov.io/gh/svilupp/textprompts)
7
+ [![License](https://img.shields.io/pypi/l/textprompts.svg)](https://github.com/svilupp/textprompts/blob/main/LICENSE)
8
+
9
+
3
10
  > **So simple, it's not even worth vibing about coding yet it just makes so much sense.**
4
11
 
5
- Are you tired of vendors trying to sell you fancy UIs for prompt management that just make your system more confusing and harder to debug? Isn't it nice to just have your prompts **next to your code**?
12
+ Are you tired of vendors trying to sell you fancy UIs for prompt management that just make your system more confusing and harder to debug? Isn't it nice to just have your prompts **next to your code**?
6
13
 
7
14
  But then you worry: *Did my formatter change my prompt? Are those spaces at the beginning actually part of the prompt or just indentation?*
8
15
 
@@ -11,7 +18,7 @@ But then you worry: *Did my formatter change my prompt? Are those spaces at the
11
18
  ## Why textprompts?
12
19
 
13
20
  - ✅ **Prompts live next to your code** - no external systems to manage
14
- - ✅ **Git is your version control** - diff, branch, and experiment with ease
21
+ - ✅ **Git is your version control** - diff, branch, and experiment with ease
15
22
  - ✅ **No formatter headaches** - your prompts stay exactly as you wrote them
16
23
  - ✅ **Minimal markup** - just TOML front-matter when you need metadata (or no metadata if you prefer!)
17
24
  - ✅ **Zero dependencies** - well, almost (just Pydantic)
@@ -35,7 +42,6 @@ title = "Customer Greeting"
35
42
  version = "1.0.0"
36
43
  description = "Friendly greeting for customer support"
37
44
  ---
38
-
39
45
  Hello {customer_name}!
40
46
 
41
47
  Welcome to {company_name}. We're here to help you with {issue_type}.
@@ -50,11 +56,13 @@ import textprompts
50
56
 
51
57
  # Just load it - works with or without metadata
52
58
  prompt = textprompts.load_prompt("greeting.txt")
59
+ # Or simply
60
+ alt = textprompts.Prompt.from_path("greeting.txt")
53
61
 
54
62
  # Use it safely - all placeholders must be provided
55
63
  message = prompt.prompt.format(
56
64
  customer_name="Alice",
57
- company_name="ACME Corp",
65
+ company_name="ACME Corp",
58
66
  issue_type="billing question",
59
67
  agent_name="Sarah"
60
68
  )
@@ -137,7 +145,7 @@ prompt = textprompts.load_prompt("prompt.txt") # No metadata parsing
137
145
  print(prompt.meta.title) # "prompt" (from filename)
138
146
 
139
147
  # 2. ALLOW: Load metadata if present, don't worry if it's incomplete
140
- textprompts.set_metadata("allow") # Flexible metadata loading
148
+ textprompts.set_metadata("allow") # Flexible metadata loading
141
149
  prompt = textprompts.load_prompt("prompt.txt") # Loads any metadata found
142
150
 
143
151
  # 3. STRICT: Require complete metadata for production use
@@ -150,7 +158,7 @@ prompt = textprompts.load_prompt("prompt.txt", meta="strict")
150
158
 
151
159
  **Why this design?**
152
160
  - **Default = Simple**: No configuration needed, just load files
153
- - **Flexible**: Add metadata when you want structure
161
+ - **Flexible**: Add metadata when you want structure
154
162
  - **Production-Safe**: Use strict mode to catch missing metadata before deployment
155
163
 
156
164
  ## Real-World Examples
@@ -175,7 +183,7 @@ response = openai.chat.completions.create(
175
183
  )
176
184
  },
177
185
  {
178
- "role": "user",
186
+ "role": "user",
179
187
  "content": user_prompt.prompt.format(
180
188
  query="How do I return an item?",
181
189
  customer_tier="premium"
@@ -196,7 +204,6 @@ title = "Product Search Tool"
196
204
  version = "2.1.0"
197
205
  description = "Search our product catalog"
198
206
  ---
199
-
200
207
  {
201
208
  "type": "function",
202
209
  "function": {
@@ -210,7 +217,7 @@ description = "Search our product catalog"
210
217
  "description": "Search query for products"
211
218
  },
212
219
  "category": {
213
- "type": "string",
220
+ "type": "string",
214
221
  "enum": ["electronics", "clothing", "books"],
215
222
  "description": "Product category to search within"
216
223
  },
@@ -282,7 +289,6 @@ description = "What this prompt does"
282
289
  created = "2024-01-15"
283
290
  tags = ["customer-support", "greeting"]
284
291
  ---
285
-
286
292
  Your prompt content goes here.
287
293
 
288
294
  Use {variables} for templating.
@@ -293,13 +299,17 @@ Use {variables} for templating.
293
299
  Choose the right level of strictness for your use case:
294
300
 
295
301
  1. **IGNORE** (default) - Simple text file loading, filename becomes title
296
- 2. **ALLOW** - Load metadata if present, don't worry about completeness
302
+ 2. **ALLOW** - Load metadata if present, don't worry about completeness
297
303
  3. **STRICT** - Require complete metadata (title, description, version) for production safety
298
304
 
305
+ You can also set the environment variable `TEXTPROMPTS_METADATA_MODE` to one of
306
+ `strict`, `allow`, or `ignore` before importing the library to configure the
307
+ default mode.
308
+
299
309
  ```python
300
310
  # Set globally
301
311
  textprompts.set_metadata("ignore") # Default: simple file loading
302
- textprompts.set_metadata("allow") # Flexible: load any metadata
312
+ textprompts.set_metadata("allow") # Flexible: load any metadata
303
313
  textprompts.set_metadata("strict") # Production: require complete metadata
304
314
 
305
315
  # Or override per prompt
@@ -377,7 +387,7 @@ template = PromptString("Hello {name}, you are {role}")
377
387
  result = template.format(name="Alice", role="admin") # ✅ Works
378
388
  result = template.format(name="Alice") # ❌ Raises ValueError
379
389
 
380
- # Partial formatting - replace only available placeholders
390
+ # Partial formatting - replace only available placeholders
381
391
  partial = template.format(name="Alice", skip_validation=True) # ✅ "Hello Alice, you are {role}"
382
392
 
383
393
  # Access placeholder information
@@ -469,4 +479,4 @@ MIT License - see [LICENSE](LICENSE) for details.
469
479
 
470
480
  ---
471
481
 
472
- **textprompts** - Because your prompts deserve better than being buried in code strings. 🚀
482
+ **textprompts** - Because your prompts deserve better than being buried in code strings. 🚀
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "textprompts"
3
- version = "0.0.2"
3
+ version = "0.0.4"
4
4
  description = "Minimal text-based prompt-loader with TOML front-matter"
5
5
  readme = "README.md"
6
6
  license = "MIT"
@@ -38,6 +38,9 @@ dev = [
38
38
  "ruff>=0.12.2",
39
39
  "pre-commit>=3.0.0",
40
40
  ]
41
+ test = [
42
+ "pydantic-ai>=0.4.5",
43
+ ]
41
44
 
42
45
  [build-system]
43
46
  requires = ["uv_build>=0.7.19,<0.8.0"]
@@ -57,6 +60,7 @@ ignore = ["E501"] # Ignore line length
57
60
  [tool.mypy]
58
61
  python_version = "3.11"
59
62
  strict = true
63
+ mypy_path = "src"
60
64
 
61
65
  [tool.pytest.ini_options]
62
66
  testpaths = ["tests"]
@@ -71,8 +71,12 @@ def parse_file(path: Path, *, metadata_mode: MetadataMode) -> Prompt:
71
71
  stacklevel=2,
72
72
  )
73
73
  ignore_meta = PromptMeta(title=path.stem)
74
- return Prompt(
75
- path=path, meta=ignore_meta, prompt=PromptString(textwrap.dedent(raw))
74
+ return Prompt.model_validate(
75
+ {
76
+ "path": path,
77
+ "meta": ignore_meta,
78
+ "prompt": PromptString(textwrap.dedent(raw)),
79
+ }
76
80
  )
77
81
 
78
82
  # For STRICT and ALLOW modes, try to parse front matter
@@ -149,4 +153,10 @@ def parse_file(path: Path, *, metadata_mode: MetadataMode) -> Prompt:
149
153
  if meta.title is None:
150
154
  meta.title = path.stem
151
155
 
152
- return Prompt(path=path, meta=meta, prompt=PromptString(textwrap.dedent(body)))
156
+ return Prompt.model_validate(
157
+ {
158
+ "path": path,
159
+ "meta": meta,
160
+ "prompt": PromptString(textwrap.dedent(body)),
161
+ }
162
+ )
@@ -2,6 +2,7 @@
2
2
  Global configuration for textprompts metadata handling.
3
3
  """
4
4
 
5
+ import os
5
6
  from enum import Enum
6
7
  from typing import Union
7
8
 
@@ -29,7 +30,13 @@ class MetadataMode(Enum):
29
30
 
30
31
 
31
32
  # Global configuration variable
32
- _METADATA_MODE: MetadataMode = MetadataMode.IGNORE
33
+ _env_mode = os.getenv("TEXTPROMPTS_METADATA_MODE")
34
+ try:
35
+ _METADATA_MODE: MetadataMode = (
36
+ MetadataMode(_env_mode.lower()) if _env_mode else MetadataMode.IGNORE
37
+ )
38
+ except ValueError:
39
+ _METADATA_MODE = MetadataMode.IGNORE
33
40
  _WARN_ON_IGNORED_META: bool = True
34
41
 
35
42
 
@@ -4,6 +4,7 @@ from typing import Any, Union
4
4
 
5
5
  from pydantic import BaseModel, Field, field_validator
6
6
 
7
+ from .config import MetadataMode
7
8
  from .prompt_string import PromptString
8
9
 
9
10
 
@@ -20,6 +21,15 @@ class Prompt(BaseModel):
20
21
  meta: Union[PromptMeta, None]
21
22
  prompt: PromptString
22
23
 
24
+ @classmethod
25
+ def from_path(
26
+ cls, path: Union[str, Path], *, meta: Union[MetadataMode, str, None] = None
27
+ ) -> "Prompt":
28
+ """Load a Prompt from ``path`` using ``load_prompt``."""
29
+ from .loaders import load_prompt
30
+
31
+ return load_prompt(path, meta=meta)
32
+
23
33
  @field_validator("prompt")
24
34
  @classmethod
25
35
  def prompt_not_empty(cls, v: str) -> PromptString: