ai-prompter 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ai_prompter/__init__.py CHANGED
@@ -122,7 +122,11 @@ class Prompter:
122
122
  if os.path.exists(prompt_path_default):
123
123
  prompt_dirs.append(prompt_path_default)
124
124
  env = Environment(loader=FileSystemLoader(prompt_dirs))
125
- self.template = env.get_template(f"{self.prompt_template}.jinja")
125
+ # Strip .jinja extension if present to avoid double extension
126
+ template_name = self.prompt_template
127
+ if template_name.endswith('.jinja'):
128
+ template_name = template_name[:-6] # Remove '.jinja'
129
+ self.template = env.get_template(f"{template_name}.jinja")
126
130
  self.prompt_folders = prompt_dirs
127
131
  else:
128
132
  self.template_text = template_text
@@ -178,11 +182,11 @@ class Prompter:
178
182
  if template_name in visited:
179
183
  raise ValueError(f"Circular include detected for {template_name}")
180
184
  visited.add(template_name)
181
- # Ensure we don't add .jinja if it's already in the name
182
- if template_name.endswith('.jinja'):
183
- template_file = os.path.join(base_dir, template_name)
184
- else:
185
- template_file = os.path.join(base_dir, f"{template_name}.jinja")
185
+ # Strip .jinja extension if present to avoid double extension
186
+ clean_name = template_name
187
+ if clean_name.endswith('.jinja'):
188
+ clean_name = clean_name[:-6] # Remove '.jinja'
189
+ template_file = os.path.join(base_dir, f"{clean_name}.jinja")
186
190
  if not os.path.exists(template_file):
187
191
  raise ValueError(f"Template file {template_file} not found")
188
192
  with open(template_file, 'r', encoding='utf-8') as f:
@@ -228,7 +232,11 @@ class Prompter:
228
232
  return 'text'
229
233
 
230
234
  for folder in self.prompt_folders:
231
- template_file = os.path.join(folder, f"{template_name}.jinja")
235
+ # Strip .jinja extension if present to avoid double extension
236
+ clean_name = template_name
237
+ if clean_name.endswith('.jinja'):
238
+ clean_name = clean_name[:-6] # Remove '.jinja'
239
+ template_file = os.path.join(folder, f"{clean_name}.jinja")
232
240
  if os.path.exists(template_file):
233
241
  return template_file
234
242
 
@@ -0,0 +1,884 @@
1
+ Metadata-Version: 2.4
2
+ Name: ai-prompter
3
+ Version: 0.3.1
4
+ Summary: A prompt management library using Jinja2 templates to build complex prompts easily.
5
+ Author-email: LUIS NOVO <lfnovo@gmail.com>
6
+ License: MIT
7
+ License-File: LICENSE
8
+ Requires-Python: >=3.10
9
+ Requires-Dist: jinja2>=3.1.6
10
+ Requires-Dist: pip>=25.0.1
11
+ Requires-Dist: pydantic>=2.0
12
+ Provides-Extra: langchain
13
+ Requires-Dist: langchain-core>=0.3; extra == 'langchain'
14
+ Description-Content-Type: text/markdown
15
+
16
+ # AI Prompter: Professional Prompt Management Made Simple
17
+
18
+ **Stop hardcoding prompts. Start building maintainable, reusable AI prompt templates.**
19
+
20
+ AI Prompter is a powerful Python library that transforms how you manage AI prompts. Using familiar Jinja2 templating, you can create dynamic, reusable prompts that scale with your applications - whether you're building chatbots, content generators, or complex AI workflows.
21
+
22
+ ## Why AI Prompter?
23
+
24
+ - **🎯 Template-Driven**: Write prompts once, reuse everywhere with dynamic variables
25
+ - **📁 Organized**: Keep prompts in separate files, organized and version-controlled
26
+ - **🔧 Flexible**: Works with any LLM provider - OpenAI, Anthropic, local models
27
+ - **⚡ LangChain Ready**: Seamless integration with LangChain workflows
28
+ - **🏗️ Structured Output**: Built-in support for JSON, Pydantic models, and custom parsers
29
+ - **🎨 Modular**: Include and compose templates for complex prompt engineering
30
+
31
+ ## Quick Start
32
+
33
+ ### Installation
34
+
35
+ ```bash
36
+ pip install ai-prompter
37
+
38
+ # For LangChain integration
39
+ pip install ai-prompter[langchain]
40
+ ```
41
+
42
+ ### 30-Second Example
43
+
44
+ ```python
45
+ from ai_prompter import Prompter
46
+
47
+ # Create a simple prompt template
48
+ prompter = Prompter(template_text="""
49
+ You are a {{ role }} expert. Help the user with their {{ task_type }} question.
50
+
51
+ User Question: {{ question }}
52
+
53
+ Please provide a {{ tone }} and detailed response.
54
+ """)
55
+
56
+ # Use it with different scenarios
57
+ response = prompter.render({
58
+ "role": "Python programming",
59
+ "task_type": "debugging",
60
+ "question": "Why is my list comprehension not working?",
61
+ "tone": "friendly"
62
+ })
63
+
64
+ print(response)
65
+ # Output: You are a Python programming expert. Help the user with their debugging question...
66
+ ```
67
+
68
+ ### File-Based Templates (Recommended)
69
+
70
+ Create a `prompts/` folder in your project and save templates as `.jinja` files:
71
+
72
+ ```jinja
73
+ <!-- prompts/code_review.jinja -->
74
+ You are an experienced {{ language }} developer conducting a code review.
75
+
76
+ Code to review:
77
+ ```{{ language }}
78
+ {{ code }}
79
+ ```
80
+
81
+ Focus on:
82
+ {% for focus_area in focus_areas %}
83
+ - {{ focus_area }}
84
+ {% endfor %}
85
+
86
+ Provide specific, actionable feedback with examples.
87
+ ```
88
+
89
+ ```python
90
+ from ai_prompter import Prompter
91
+
92
+ # Load the template by name (finds prompts/code_review.jinja automatically)
93
+ reviewer = Prompter(prompt_template="code_review")
94
+
95
+ prompt = reviewer.render({
96
+ "language": "python",
97
+ "code": "def calculate(x, y): return x + y",
98
+ "focus_areas": ["error handling", "documentation", "performance"]
99
+ })
100
+ ```
101
+
102
+ ## Features
103
+
104
+ - Define prompts as Jinja templates.
105
+ - Load default templates from `src/ai_prompter/prompts`.
106
+ - Override templates via `PROMPTS_PATH` environment variable.
107
+ - Automatic project root detection for prompt templates.
108
+ - Render prompts with arbitrary data or Pydantic models.
109
+ - Export to LangChain `ChatPromptTemplate`.
110
+ - Automatic output parser integration for structured outputs.
111
+
112
+ ## Installation & Setup
113
+
114
+ ### Basic Installation
115
+
116
+ ```bash
117
+ # Install from PyPI
118
+ pip install ai-prompter
119
+
120
+ # Or using uv (recommended for Python projects)
121
+ uv add ai-prompter
122
+ ```
123
+
124
+ ### With LangChain Integration
125
+
126
+ ```bash
127
+ pip install ai-prompter[langchain]
128
+ # or
129
+ uv add ai-prompter[langchain]
130
+ ```
131
+
132
+ ### Development Installation
133
+
134
+ ```bash
135
+ git clone https://github.com/lfnovo/ai-prompter
136
+ cd ai-prompter
137
+ uv sync # installs with all dev dependencies
138
+ ```
139
+
140
+ ## Configuration
141
+
142
+ Configure a custom template path by creating a `.env` file in the project root:
143
+
144
+ ```dotenv
145
+ PROMPTS_PATH=path/to/custom/templates
146
+ ```
147
+
148
+ ## Usage
149
+
150
+ ### Basic Usage
151
+
152
+ ```python
153
+ from ai_prompter import Prompter
154
+
155
+ # Initialize with a template name
156
+ prompter = Prompter('my_template')
157
+
158
+ # Render a prompt with variables
159
+ prompt = prompter.render({'variable': 'value'})
160
+ print(prompt)
161
+ ```
162
+
163
+ ### Custom Prompt Directory
164
+
165
+ You can specify a custom directory for your prompt templates using the `prompt_dir` parameter:
166
+
167
+ ```python
168
+ prompter = Prompter(template_text='Hello {{ name }}!', prompt_dir='/path/to/your/prompts')
169
+ ```
170
+
171
+ ### Using Environment Variable for Prompt Path
172
+
173
+ Set the `PROMPTS_PATH` environment variable to point to your custom prompts directory:
174
+
175
+ ```bash
176
+ export PROMPTS_PATH=/path/to/your/prompts
177
+ ```
178
+
179
+ You can specify multiple directories separated by `:` (colon):
180
+
181
+ ```bash
182
+ export PROMPTS_PATH=/path/to/templates1:/path/to/templates2
183
+ ```
184
+
185
+ ### Template Search Order
186
+
187
+ The `Prompter` class searches for templates in the following locations (in order of priority):
188
+
189
+ 1. **Custom directory** - If you provide `prompt_dir` parameter when initializing Prompter
190
+ 2. **Environment variable paths** - Directories specified in `PROMPTS_PATH` (colon-separated)
191
+ 3. **Current directory prompts** - `./prompts` subfolder in your current working directory
192
+ 4. **Project root prompts** - Automatically detects your Python project root (by looking for `pyproject.toml`, `setup.py`, `setup.cfg`, or `.git`) and checks for a `prompts` folder there
193
+ 5. **Home directory** - `~/ai-prompter` folder
194
+ 6. **Package defaults** - Built-in templates at `src/ai_prompter/prompts`
195
+
196
+ This allows you to organize your project with prompts at the root level, regardless of your package structure:
197
+ ```
198
+ my-project/
199
+ ├── prompts/ # <- Templates here will be found automatically
200
+ │ └── my_template.jinja
201
+ ├── src/
202
+ │ └── my_package/
203
+ │ └── main.py
204
+ └── pyproject.toml
205
+ ```
206
+
207
+ ### Using File-based Templates
208
+
209
+ You can store your templates in files and reference them by name. The library will search through all configured paths (see Template Search Order above) until a matching template is found.
210
+
211
+ **Template naming**: You can reference templates either with or without the `.jinja` extension:
212
+ - `prompt_template="greet"` → searches for `greet.jinja`
213
+ - `prompt_template="greet.jinja"` → also searches for `greet.jinja`
214
+
215
+ Both approaches work identically, so use whichever feels more natural for your workflow.
216
+
217
+ ```python
218
+ from ai_prompter import Prompter
219
+
220
+ # Will search for 'greet.jinja' in all configured paths
221
+ prompter = Prompter(prompt_template="greet")
222
+ result = prompter.render({"name": "World"})
223
+ print(result) # Output depends on the content of greet.jinja
224
+ ```
225
+
226
+ You can also specify multiple search paths via environment variable:
227
+
228
+ ```python
229
+ import os
230
+ from ai_prompter import Prompter
231
+
232
+ # Set multiple search paths
233
+ os.environ["PROMPTS_PATH"] = "/path/to/templates1:/path/to/templates2"
234
+
235
+ prompter = Prompter(prompt_template="greet")
236
+ result = prompter.render({"name": "World"})
237
+ print(result) # Uses greet.jinja from the first path where it's found
238
+ ```
239
+
240
+ ### Raw text template
241
+
242
+ ```python
243
+ from ai_prompter import Prompter
244
+
245
+ template = """Write an article about {{ topic }}."""
246
+ prompter = Prompter(template_text=template)
247
+ prompt = prompter.render({"topic": "AI"})
248
+ print(prompt) # Write an article about AI.
249
+ ```
250
+
251
+ ### Using Raw Text Templates
252
+
253
+ Alternatively, you can provide the template content directly as raw text using the `template_text` parameter or the `from_text` class method.
254
+
255
+ ```python
256
+ from ai_prompter import Prompter
257
+
258
+ # Using template_text parameter
259
+ prompter = Prompter(template_text="Hello, {{ name }}!")
260
+ result = prompter.render({"name": "World"})
261
+ print(result) # Output: Hello, World!
262
+
263
+ # Using from_text class method
264
+ prompter = Prompter.from_text("Hi, {{ person }}!", model="gpt-4")
265
+ result = prompter.render({"person": "Alice"})
266
+ print(result) # Output: Hi, Alice!
267
+ ```
268
+
269
+ ### LangChain Integration
270
+
271
+ You can convert your prompts to LangChain's `ChatPromptTemplate` format for use in LangChain workflows. This works for both text-based and file-based templates.
272
+
273
+ ```python
274
+ from ai_prompter import Prompter
275
+
276
+ # With text-based template
277
+ text_prompter = Prompter(template_text="Hello, {{ name }}!")
278
+ lc_text_prompt = text_prompter.to_langchain()
279
+
280
+ # With file-based template
281
+ file_prompter = Prompter(prompt_template="greet")
282
+ lc_file_prompt = file_prompter.to_langchain()
283
+ ```
284
+
285
+ **Note**: LangChain integration requires the `langchain-core` package. Install it with `pip install .[langchain]`.
286
+
287
+ ### Using Output Parsers
288
+
289
+ The Prompter class supports LangChain output parsers to automatically inject formatting instructions into your prompts. When you provide a parser, it will call the parser's `get_format_instructions()` method and make the result available as `{{ format_instructions }}` in your template.
290
+
291
+ ```python
292
+ from ai_prompter import Prompter
293
+ from langchain.output_parsers import PydanticOutputParser
294
+ from pydantic import BaseModel, Field
295
+
296
+ # Define your output model
297
+ class Article(BaseModel):
298
+ title: str = Field(description="Article title")
299
+ summary: str = Field(description="Brief summary")
300
+ tags: list[str] = Field(description="Relevant tags")
301
+
302
+ # Create a parser
303
+ parser = PydanticOutputParser(pydantic_object=Article)
304
+
305
+ # Create a prompter with the parser
306
+ prompter = Prompter(
307
+ template_text="""Write an article about {{ topic }}.
308
+
309
+ {{ format_instructions }}""",
310
+ parser=parser
311
+ )
312
+
313
+ # Render the prompt - format instructions are automatically included
314
+ prompt = prompter.render({"topic": "AI Safety"})
315
+ print(prompt)
316
+ # Output will include the topic AND the parser's format instructions
317
+ ```
318
+
319
+ This works with file-based templates too:
320
+
321
+ ```jinja
322
+ # article_structured.jinja
323
+ Write an article about {{ topic }}.
324
+
325
+ Please format your response according to these instructions:
326
+ {{ format_instructions }}
327
+ ```
328
+
329
+ ```python
330
+ prompter = Prompter(
331
+ prompt_template="article_structured",
332
+ parser=parser
333
+ )
334
+ ```
335
+
336
+ The parser integration supports any LangChain output parser that implements `get_format_instructions()`, including:
337
+ - `PydanticOutputParser` - For structured Pydantic model outputs
338
+ - `OutputFixingParser` - For fixing malformed outputs
339
+ - `RetryOutputParser` - For retrying failed parsing attempts
340
+ - `StructuredOutputParser` - For dictionary-based structured outputs
341
+
342
+ ## Real-World Examples
343
+
344
+ ### Content Generation Pipeline
345
+
346
+ ```python
347
+ # prompts/blog_post.jinja
348
+ You are a professional content writer specializing in {{ niche }}.
349
+
350
+ Write a {{ post_type }} blog post about "{{ title }}" for {{ target_audience }}.
351
+
352
+ Requirements:
353
+ - Length: {{ word_count }} words
354
+ - Tone: {{ tone }}
355
+ - Include {{ num_sections }} main sections
356
+ {% if seo_keywords -%}
357
+ - SEO Keywords to include: {{ seo_keywords|join(', ') }}
358
+ {% endif %}
359
+ {% if call_to_action -%}
360
+ - End with this call-to-action: {{ call_to_action }}
361
+ {% endif %}
362
+
363
+ {{ format_instructions }}
364
+ ```
365
+
366
+ ```python
367
+ from ai_prompter import Prompter
368
+ from pydantic import BaseModel, Field
369
+
370
+ class BlogPost(BaseModel):
371
+ title: str = Field(description="SEO-optimized title")
372
+ sections: list[dict] = Field(description="List of sections with headers and content")
373
+ meta_description: str = Field(description="SEO meta description")
374
+ tags: list[str] = Field(description="Relevant tags")
375
+
376
+ # Create content generator
377
+ blog_generator = Prompter(
378
+ prompt_template="blog_post",
379
+ parser=PydanticOutputParser(pydantic_object=BlogPost)
380
+ )
381
+
382
+ # Generate different types of content
383
+ tech_post = blog_generator.render({
384
+ "niche": "technology",
385
+ "title": "Getting Started with AI Prompt Engineering",
386
+ "target_audience": "software developers",
387
+ "post_type": "how-to guide",
388
+ "word_count": 1500,
389
+ "tone": "technical but accessible",
390
+ "num_sections": 5,
391
+ "seo_keywords": ["AI prompts", "prompt engineering", "LLM"],
392
+ "call_to_action": "Try AI Prompter in your next project!"
393
+ })
394
+ ```
395
+
396
+ ### Multi-Language Support
397
+
398
+ ```python
399
+ # prompts/customer_support.jinja
400
+ {% set greetings = {
401
+ 'en': 'Hello',
402
+ 'es': 'Hola',
403
+ 'fr': 'Bonjour',
404
+ 'de': 'Hallo'
405
+ } %}
406
+
407
+ {{ greetings[language] }}! I'm here to help you with {{ issue_type }}.
408
+
409
+ Customer Issue: {{ customer_message }}
410
+
411
+ {% if language != 'en' -%}
412
+ Please respond in {{ language }}.
413
+ {% endif %}
414
+
415
+ Provide a {{ tone }} response that:
416
+ 1. Acknowledges the customer's concern
417
+ 2. Offers a specific solution or next steps
418
+ 3. Includes relevant {{ company_name }} policies if applicable
419
+ ```
420
+
421
+ ```python
422
+ support_agent = Prompter(prompt_template="customer_support")
423
+
424
+ # Handle support tickets in different languages
425
+ spanish_response = support_agent.render({
426
+ "language": "es",
427
+ "issue_type": "billing inquiry",
428
+ "customer_message": "No puedo encontrar mi factura",
429
+ "tone": "empathetic and professional",
430
+ "company_name": "TechCorp"
431
+ })
432
+ ```
433
+
434
+ ### Dynamic Email Campaigns
435
+
436
+ ```python
437
+ # prompts/email_campaign.jinja
438
+ Subject: {% if user.is_premium %}Exclusive{% else %}Special{% endif %} {{ campaign_type }} - {{ subject_line }}
439
+
440
+ Hi {{ user.first_name|default('there') }},
441
+
442
+ {% if user.last_purchase_days_ago < 30 -%}
443
+ Thanks for your recent purchase of {{ user.last_product }}!
444
+ {% elif user.last_purchase_days_ago > 90 -%}
445
+ We miss you! It's been a while since your last order.
446
+ {% endif %}
447
+
448
+ {{ main_message }}
449
+
450
+ {% if user.is_premium -%}
451
+ As a premium member, you get:
452
+ {% for benefit in premium_benefits -%}
453
+ ✓ {{ benefit }}
454
+ {% endfor %}
455
+ {% else -%}
456
+ {% if upgrade_offer -%}
457
+ Upgrade to premium and save {{ upgrade_discount }}%!
458
+ {% endif %}
459
+ {% endif %}
460
+
461
+ {{ call_to_action }}
462
+
463
+ Best regards,
464
+ {{ sender_name }}
465
+ ```
466
+
467
+ ```python
468
+ email_generator = Prompter(prompt_template="email_campaign")
469
+
470
+ # Personalized emails based on user data
471
+ campaign_email = email_generator.render({
472
+ "user": {
473
+ "first_name": "Sarah",
474
+ "is_premium": False,
475
+ "last_purchase_days_ago": 45,
476
+ "last_product": "Python Course"
477
+ },
478
+ "campaign_type": "Sale",
479
+ "subject_line": "50% Off All Programming Courses",
480
+ "main_message": "Master new skills with our comprehensive programming courses.",
481
+ "upgrade_offer": True,
482
+ "upgrade_discount": 25,
483
+ "premium_benefits": ["Early access to new courses", "1-on-1 mentoring", "Certificate priority"],
484
+ "call_to_action": "Shop Now →",
485
+ "sender_name": "The Learning Team"
486
+ })
487
+ ```
488
+
489
+ ### API Documentation Generator
490
+
491
+ ```python
492
+ # prompts/api_docs.jinja
493
+ # {{ endpoint.method }} {{ endpoint.path }}
494
+
495
+ {{ endpoint.description }}
496
+
497
+ ## Request
498
+
499
+ {% if endpoint.parameters -%}
500
+ ### Parameters
501
+
502
+ | Name | Type | Required | Description |
503
+ |------|------|----------|-------------|
504
+ {% for param in endpoint.parameters -%}
505
+ | `{{ param.name }}` | {{ param.type }} | {{ "Yes" if param.required else "No" }} | {{ param.description }} |
506
+ {% endfor %}
507
+ {% endif %}
508
+
509
+ {% if endpoint.body_schema -%}
510
+ ### Request Body
511
+
512
+ ```json
513
+ {{ endpoint.body_schema|tojson(indent=2) }}
514
+ ```
515
+ {% endif %}
516
+
517
+ ## Response
518
+
519
+ ```json
520
+ {{ endpoint.response_example|tojson(indent=2) }}
521
+ ```
522
+
523
+ {% if endpoint.error_codes -%}
524
+ ## Error Codes
525
+
526
+ {% for error in endpoint.error_codes -%}
527
+ - **{{ error.code }}**: {{ error.description }}
528
+ {% endfor %}
529
+ {% endif %}
530
+
531
+ ## Example
532
+
533
+ ```bash
534
+ curl -X {{ endpoint.method }} "{{ base_url }}{{ endpoint.path }}" \
535
+ {% for header in endpoint.headers -%}
536
+ -H "{{ header.name }}: {{ header.value }}" \
537
+ {% endfor %}
538
+ {% if endpoint.body_example -%}
539
+ -d '{{ endpoint.body_example|tojson }}'
540
+ {% endif %}
541
+ ```
542
+ ```
543
+
544
+ ```python
545
+ docs_generator = Prompter(prompt_template="api_docs")
546
+
547
+ endpoint_doc = docs_generator.render({
548
+ "base_url": "https://api.example.com",
549
+ "endpoint": {
550
+ "method": "POST",
551
+ "path": "/users",
552
+ "description": "Create a new user account",
553
+ "parameters": [
554
+ {"name": "api_key", "type": "string", "required": True, "description": "Your API key"}
555
+ ],
556
+ "body_schema": {"name": "string", "email": "string", "role": "string"},
557
+ "body_example": {"name": "John Doe", "email": "john@example.com", "role": "user"},
558
+ "response_example": {"id": 123, "name": "John Doe", "created_at": "2024-01-01T00:00:00Z"},
559
+ "error_codes": [
560
+ {"code": 400, "description": "Invalid request data"},
561
+ {"code": 409, "description": "Email already exists"}
562
+ ],
563
+ "headers": [{"name": "Authorization", "value": "Bearer YOUR_API_KEY"}]
564
+ }
565
+ })
566
+ ```
567
+
568
+ ## Best Practices
569
+
570
+ ### 1. Organize Templates by Use Case
571
+
572
+ ```
573
+ prompts/
574
+ ├── content/
575
+ │ ├── blog_post.jinja
576
+ │ ├── social_media.jinja
577
+ │ └── email_newsletter.jinja
578
+ ├── analysis/
579
+ │ ├── code_review.jinja
580
+ │ ├── data_analysis.jinja
581
+ │ └── competitor_research.jinja
582
+ └── support/
583
+ ├── customer_support.jinja
584
+ └── technical_troubleshooting.jinja
585
+ ```
586
+
587
+ ### 2. Use Descriptive Variable Names
588
+
589
+ ```python
590
+ # Good ✅
591
+ prompter.render({
592
+ "user_expertise_level": "beginner",
593
+ "preferred_learning_style": "visual",
594
+ "target_completion_time": "2 weeks"
595
+ })
596
+
597
+ # Avoid ❌
598
+ prompter.render({
599
+ "level": "beginner",
600
+ "style": "visual",
601
+ "time": "2 weeks"
602
+ })
603
+ ```
604
+
605
+ ### 3. Include Validation and Defaults
606
+
607
+ ```jinja
608
+ <!-- prompts/content_generator.jinja -->
609
+ {% if not topic -%}
610
+ {{ raise_error("topic is required") }}
611
+ {% endif %}
612
+
613
+ Generate content about {{ topic }} for {{ audience|default("general audience") }}.
614
+
615
+ Word count: {{ word_count|default(500) }}
616
+ Tone: {{ tone|default("professional") }}
617
+ ```
618
+
619
+ ### 4. Leverage Jinja2 Features
620
+
621
+ ```jinja
622
+ <!-- Use filters for formatting -->
623
+ Today's date: {{ current_time|strftime("%B %d, %Y") }}
624
+ Uppercase title: {{ title|upper }}
625
+ Comma-separated tags: {{ tags|join(", ") }}
626
+
627
+ <!-- Use conditionals for dynamic content -->
628
+ {% if user.subscription_type == "premium" %}
629
+ You have access to premium features!
630
+ {% else %}
631
+ Upgrade to premium for advanced features.
632
+ {% endif %}
633
+
634
+ <!-- Use loops for repetitive content -->
635
+ {% for step in instructions %}
636
+ {{ loop.index }}. {{ step }}
637
+ {% endfor %}
638
+ ```
639
+
640
+ ### 5. Version Control Your Prompts
641
+
642
+ ```bash
643
+ # Track prompt changes with git
644
+ git add prompts/
645
+ git commit -m "feat: add support for multi-language customer service prompts"
646
+
647
+ # Use branches for prompt experiments
648
+ git checkout -b experiment/new-tone-testing
649
+ ```
650
+
651
+ ### 6. Test Templates with Sample Data
652
+
653
+ ```python
654
+ # Create test data for your templates
655
+ test_data = {
656
+ "user": {"name": "Test User", "level": "beginner"},
657
+ "product": {"name": "AI Course", "price": 99.99},
658
+ "current_time": "2024-01-15 10:30:00"
659
+ }
660
+
661
+ # Test all your templates
662
+ for template_name in ["welcome", "product_recommendation", "follow_up"]:
663
+ prompter = Prompter(prompt_template=template_name)
664
+ result = prompter.render(test_data)
665
+ print(f"Template: {template_name}")
666
+ print(f"Length: {len(result)} characters")
667
+ print("---")
668
+ ```
669
+
670
+ ## Advanced Features
671
+
672
+ ### Including Other Templates
673
+
674
+ You can include other template files within a template using Jinja2's `{% include %}` directive. This allows you to build modular templates.
675
+
676
+ ```jinja
677
+ # outer.jinja
678
+ This is the outer file
679
+
680
+ {% include 'inner.jinja' %}
681
+
682
+ This is the end of the outer file
683
+ ```
684
+
685
+ ```jinja
686
+ # inner.jinja
687
+ This is the inner file
688
+
689
+ {% if type == 'a' %}
690
+ You selected A
691
+ {% else %}
692
+ You didn't select A
693
+ {% endif %}
694
+ ```
695
+
696
+ ```python
697
+ from ai_prompter import Prompter
698
+
699
+ prompter = Prompter(prompt_template="outer")
700
+ prompt = prompter.render(dict(type="a"))
701
+ print(prompt)
702
+ # This is the outer file
703
+ #
704
+ # This is the inner file
705
+ #
706
+ # You selected A
707
+ #
708
+ #
709
+ # This is the end of the outer file
710
+ ```
711
+
712
+ ### Using Variables
713
+
714
+ Templates can use variables that you pass in through the `render()` method. You can use Jinja2 filters and conditionals to control the output based on your data.
715
+
716
+ ```python
717
+ from ai_prompter import Prompter
718
+
719
+ prompter = Prompter(template_text="Hello {{name|default('Guest')}}!")
720
+ prompt = prompter.render() # No data provided, uses default
721
+ print(prompt) # Hello Guest!
722
+ prompt = prompter.render({"name": "Alice"}) # Data provided
723
+ print(prompt) # Hello Alice!
724
+ ```
725
+
726
+ The library also automatically provides a `current_time` variable with the current timestamp in format "YYYY-MM-DD HH:MM:SS".
727
+
728
+ ```python
729
+ from ai_prompter import Prompter
730
+
731
+ prompter = Prompter(template_text="Current time: {{current_time}}")
732
+ prompt = prompter.render()
733
+ print(prompt) # Current time: 2025-04-19 23:28:00
734
+ ```
735
+
736
+ ### File-based template
737
+
738
+ Place a Jinja file (e.g., `article.jinja`) in the default prompts directory (`src/ai_prompter/prompts`) or your custom path:
739
+
740
+ ```jinja
741
+ Write an article about {{ topic }}.
742
+ ```
743
+
744
+ ```python
745
+ from ai_prompter import Prompter
746
+
747
+ prompter = Prompter(prompt_template="article")
748
+ prompt = prompter.render({"topic": "AI"})
749
+ print(prompt)
750
+ ```
751
+
752
+ ## Troubleshooting
753
+
754
+ ### Common Issues
755
+
756
+ **Template Not Found Error**
757
+ ```python
758
+ # Check where AI Prompter is looking for templates
759
+ prompter = Prompter(prompt_template="my_template")
760
+ print("Template locations searched:")
761
+ for folder in prompter.prompt_folders:
762
+ print(f" - {folder}")
763
+
764
+ # Verify template location
765
+ location = prompter.template_location("my_template")
766
+ print(f"Template location: {location}")
767
+ ```
768
+
769
+ **Jinja2 Syntax Errors**
770
+ ```python
771
+ # Test templates in isolation
772
+ from jinja2 import Template
773
+
774
+ template_content = "Hello {{ name }}!"
775
+ template = Template(template_content)
776
+ result = template.render(name="World") # Test basic rendering
777
+ ```
778
+
779
+ **Environment Variable Issues**
780
+ ```bash
781
+ # Check current PROMPTS_PATH
782
+ echo $PROMPTS_PATH
783
+
784
+ # Set for current session
785
+ export PROMPTS_PATH="/path/to/templates"
786
+
787
+ # Set permanently in ~/.bashrc or ~/.zshrc
788
+ echo 'export PROMPTS_PATH="/path/to/templates"' >> ~/.bashrc
789
+ ```
790
+
791
+ ### Performance Tips
792
+
793
+ - **Cache Prompter instances** for frequently used templates
794
+ - **Use file-based templates** for better performance with includes
795
+ - **Keep template files small** and modular
796
+ - **Minimize variable processing** in templates when possible
797
+
798
+ ```python
799
+ # Good: Reuse prompter instances
800
+ email_prompter = Prompter(prompt_template="email_template")
801
+ for user in users:
802
+ email = email_prompter.render({"user": user})
803
+ send_email(email)
804
+
805
+ # Avoid: Creating new instances repeatedly
806
+ for user in users: # Less efficient
807
+ prompter = Prompter(prompt_template="email_template")
808
+ email = prompter.render({"user": user})
809
+ ```
810
+
811
+ ## Interactive Examples
812
+
813
+ Explore AI Prompter features interactively:
814
+
815
+ ```bash
816
+ # Clone the repository
817
+ git clone https://github.com/lfnovo/ai-prompter
818
+ cd ai-prompter
819
+
820
+ # Install with dev dependencies
821
+ uv sync
822
+
823
+ # Launch Jupyter notebook
824
+ uv run jupyter lab notebooks/prompter_usage.ipynb
825
+ ```
826
+
827
+ ## Testing & Development
828
+
829
+ ```bash
830
+ # Run all tests
831
+ uv run pytest -v
832
+
833
+ # Run tests with coverage
834
+ uv run pytest --cov=ai_prompter
835
+
836
+ # Run specific test file
837
+ uv run pytest tests/test_prompter.py -v
838
+
839
+ # Format code
840
+ uv run black src/
841
+ uv run isort src/
842
+ ```
843
+
844
+ ## Community & Support
845
+
846
+ - **GitHub Issues**: [Report bugs or request features](https://github.com/lfnovo/ai-prompter/issues)
847
+ - **Discussions**: [Ask questions and share templates](https://github.com/lfnovo/ai-prompter/discussions)
848
+ - **Examples**: [Community template gallery](https://github.com/lfnovo/ai-prompter/wiki/Template-Gallery)
849
+
850
+ ## Contributing
851
+
852
+ We welcome contributions! Here's how to get started:
853
+
854
+ 1. **Fork the repository**
855
+ 2. **Create a feature branch**: `git checkout -b feature/amazing-feature`
856
+ 3. **Add tests** for new functionality
857
+ 4. **Ensure tests pass**: `uv run pytest`
858
+ 5. **Submit a Pull Request**
859
+
860
+ ### Contributing Templates
861
+
862
+ Share your templates with the community:
863
+
864
+ 1. Add your template to `examples/community-templates/`
865
+ 2. Include documentation and example usage
866
+ 3. Submit a PR with the `template-contribution` label
867
+
868
+ ## Changelog
869
+
870
+ See [CHANGELOG.md](CHANGELOG.md) for version history and updates.
871
+
872
+ ## License
873
+
874
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
875
+
876
+ ---
877
+
878
+ **Ready to transform your prompt management?**
879
+
880
+ ```bash
881
+ pip install ai-prompter
882
+ ```
883
+
884
+ Start building better AI applications with organized, maintainable prompts today!
@@ -0,0 +1,6 @@
1
+ ai_prompter/__init__.py,sha256=oVRyjDv-V7kjIOwfkR1Hw5vXdA4pB6bwqVeviYn9AnY,13319
2
+ ai_prompter/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
3
+ ai_prompter-0.3.1.dist-info/METADATA,sha256=DOIo5YnRMQ5NhNMBlsaxBifoVjPZa4zbBgJ3Hpx09fo,23801
4
+ ai_prompter-0.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ ai_prompter-0.3.1.dist-info/licenses/LICENSE,sha256=cS0_fa_8BoP0PvVG8D19pn_HDJrG96hd4PyEm9nkRo8,1066
6
+ ai_prompter-0.3.1.dist-info/RECORD,,
@@ -1,361 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: ai-prompter
3
- Version: 0.3.0
4
- Summary: A prompt management library using Jinja2 templates to build complex prompts easily.
5
- Author-email: LUIS NOVO <lfnovo@gmail.com>
6
- License: MIT
7
- License-File: LICENSE
8
- Requires-Python: >=3.10
9
- Requires-Dist: jinja2>=3.1.6
10
- Requires-Dist: pip>=25.0.1
11
- Requires-Dist: pydantic>=2.0
12
- Provides-Extra: langchain
13
- Requires-Dist: langchain-core>=0.3; extra == 'langchain'
14
- Description-Content-Type: text/markdown
15
-
16
- # AI Prompter
17
-
18
- A prompt management library using Jinja2 templates to build complex prompts easily. Supports raw text or file-based templates and integrates with LangChain.
19
-
20
- ## Features
21
-
22
- - Define prompts as Jinja templates.
23
- - Load default templates from `src/ai_prompter/prompts`.
24
- - Override templates via `PROMPTS_PATH` environment variable.
25
- - Automatic project root detection for prompt templates.
26
- - Render prompts with arbitrary data or Pydantic models.
27
- - Export to LangChain `ChatPromptTemplate`.
28
- - Automatic output parser integration for structured outputs.
29
-
30
- ## Installation
31
-
32
- 1. (Optional) Create and activate a virtual environment:
33
- ```bash
34
- python3 -m venv .venv
35
- source .venv/bin/activate
36
- ```
37
- 2. Install the package:
38
- ```bash
39
- pip install .
40
- ```
41
- ### Extras
42
-
43
- To enable LangChain integration:
44
-
45
- ```bash
46
- pip install .[langchain]
47
- # or
48
- uv add langchain_core
49
- ```
50
-
51
- ## Configuration
52
-
53
- Configure a custom template path by creating a `.env` file in the project root:
54
-
55
- ```dotenv
56
- PROMPTS_PATH=path/to/custom/templates
57
- ```
58
-
59
- ## Usage
60
-
61
- ### Basic Usage
62
-
63
- ```python
64
- from ai_prompter import Prompter
65
-
66
- # Initialize with a template name
67
- prompter = Prompter('my_template')
68
-
69
- # Render a prompt with variables
70
- prompt = prompter.render({'variable': 'value'})
71
- print(prompt)
72
- ```
73
-
74
- ### Custom Prompt Directory
75
-
76
- You can specify a custom directory for your prompt templates using the `prompt_dir` parameter:
77
-
78
- ```python
79
- prompter = Prompter(template_text='Hello {{ name }}!', prompt_dir='/path/to/your/prompts')
80
- ```
81
-
82
- ### Using Environment Variable for Prompt Path
83
-
84
- Set the `PROMPTS_PATH` environment variable to point to your custom prompts directory:
85
-
86
- ```bash
87
- export PROMPTS_PATH=/path/to/your/prompts
88
- ```
89
-
90
- You can specify multiple directories separated by `:` (colon):
91
-
92
- ```bash
93
- export PROMPTS_PATH=/path/to/templates1:/path/to/templates2
94
- ```
95
-
96
- ### Template Search Order
97
-
98
- The `Prompter` class searches for templates in the following locations (in order of priority):
99
-
100
- 1. **Custom directory** - If you provide `prompt_dir` parameter when initializing Prompter
101
- 2. **Environment variable paths** - Directories specified in `PROMPTS_PATH` (colon-separated)
102
- 3. **Current directory prompts** - `./prompts` subfolder in your current working directory
103
- 4. **Project root prompts** - Automatically detects your Python project root (by looking for `pyproject.toml`, `setup.py`, `setup.cfg`, or `.git`) and checks for a `prompts` folder there
104
- 5. **Home directory** - `~/ai-prompter` folder
105
- 6. **Package defaults** - Built-in templates at `src/ai_prompter/prompts`
106
-
107
- This allows you to organize your project with prompts at the root level, regardless of your package structure:
108
- ```
109
- my-project/
110
- ├── prompts/ # <- Templates here will be found automatically
111
- │ └── my_template.jinja
112
- ├── src/
113
- │ └── my_package/
114
- │ └── main.py
115
- └── pyproject.toml
116
- ```
117
-
118
- ### Using File-based Templates
119
-
120
- You can store your templates in files and reference them by name (without the `.jinja` extension). The library will search through all configured paths (see Template Search Order above) until a matching template is found.
121
-
122
- ```python
123
- from ai_prompter import Prompter
124
-
125
- # Will search for 'greet.jinja' in all configured paths
126
- prompter = Prompter(prompt_template="greet")
127
- result = prompter.render({"name": "World"})
128
- print(result) # Output depends on the content of greet.jinja
129
- ```
130
-
131
- You can also specify multiple search paths via environment variable:
132
-
133
- ```python
134
- import os
135
- from ai_prompter import Prompter
136
-
137
- # Set multiple search paths
138
- os.environ["PROMPTS_PATH"] = "/path/to/templates1:/path/to/templates2"
139
-
140
- prompter = Prompter(prompt_template="greet")
141
- result = prompter.render({"name": "World"})
142
- print(result) # Uses greet.jinja from the first path where it's found
143
- ```
144
-
145
- ### Raw text template
146
-
147
- ```python
148
- from ai_prompter import Prompter
149
-
150
- template = """Write an article about {{ topic }}."""
151
- prompter = Prompter(template_text=template)
152
- prompt = prompter.render({"topic": "AI"})
153
- print(prompt) # Write an article about AI.
154
- ```
155
-
156
- ### Using Raw Text Templates
157
-
158
- Alternatively, you can provide the template content directly as raw text using the `template_text` parameter or the `from_text` class method.
159
-
160
- ```python
161
- from ai_prompter import Prompter
162
-
163
- # Using template_text parameter
164
- prompter = Prompter(template_text="Hello, {{ name }}!")
165
- result = prompter.render({"name": "World"})
166
- print(result) # Output: Hello, World!
167
-
168
- # Using from_text class method
169
- prompter = Prompter.from_text("Hi, {{ person }}!", model="gpt-4")
170
- result = prompter.render({"person": "Alice"})
171
- print(result) # Output: Hi, Alice!
172
- ```
173
-
174
- ### LangChain Integration
175
-
176
- You can convert your prompts to LangChain's `ChatPromptTemplate` format for use in LangChain workflows. This works for both text-based and file-based templates.
177
-
178
- ```python
179
- from ai_prompter import Prompter
180
-
181
- # With text-based template
182
- text_prompter = Prompter(template_text="Hello, {{ name }}!")
183
- lc_text_prompt = text_prompter.to_langchain()
184
-
185
- # With file-based template
186
- file_prompter = Prompter(prompt_template="greet")
187
- lc_file_prompt = file_prompter.to_langchain()
188
- ```
189
-
190
- **Note**: LangChain integration requires the `langchain-core` package. Install it with `pip install .[langchain]`.
191
-
192
- ### Using Output Parsers
193
-
194
- The Prompter class supports LangChain output parsers to automatically inject formatting instructions into your prompts. When you provide a parser, it will call the parser's `get_format_instructions()` method and make the result available as `{{ format_instructions }}` in your template.
195
-
196
- ```python
197
- from ai_prompter import Prompter
198
- from langchain.output_parsers import PydanticOutputParser
199
- from pydantic import BaseModel, Field
200
-
201
- # Define your output model
202
- class Article(BaseModel):
203
- title: str = Field(description="Article title")
204
- summary: str = Field(description="Brief summary")
205
- tags: list[str] = Field(description="Relevant tags")
206
-
207
- # Create a parser
208
- parser = PydanticOutputParser(pydantic_object=Article)
209
-
210
- # Create a prompter with the parser
211
- prompter = Prompter(
212
- template_text="""Write an article about {{ topic }}.
213
-
214
- {{ format_instructions }}""",
215
- parser=parser
216
- )
217
-
218
- # Render the prompt - format instructions are automatically included
219
- prompt = prompter.render({"topic": "AI Safety"})
220
- print(prompt)
221
- # Output will include the topic AND the parser's format instructions
222
- ```
223
-
224
- This works with file-based templates too:
225
-
226
- ```jinja
227
- # article_structured.jinja
228
- Write an article about {{ topic }}.
229
-
230
- Please format your response according to these instructions:
231
- {{ format_instructions }}
232
- ```
233
-
234
- ```python
235
- prompter = Prompter(
236
- prompt_template="article_structured",
237
- parser=parser
238
- )
239
- ```
240
-
241
- The parser integration supports any LangChain output parser that implements `get_format_instructions()`, including:
242
- - `PydanticOutputParser` - For structured Pydantic model outputs
243
- - `OutputFixingParser` - For fixing malformed outputs
244
- - `RetryOutputParser` - For retrying failed parsing attempts
245
- - `StructuredOutputParser` - For dictionary-based structured outputs
246
-
247
- ### Including Other Templates
248
-
249
- You can include other template files within a template using Jinja2's `{% include %}` directive. This allows you to build modular templates.
250
-
251
- ```jinja
252
- # outer.jinja
253
- This is the outer file
254
-
255
- {% include 'inner.jinja' %}
256
-
257
- This is the end of the outer file
258
- ```
259
-
260
- ```jinja
261
- # inner.jinja
262
- This is the inner file
263
-
264
- {% if type == 'a' %}
265
- You selected A
266
- {% else %}
267
- You didn't select A
268
- {% endif %}
269
- ```
270
-
271
- ```python
272
- from ai_prompter import Prompter
273
-
274
- prompter = Prompter(prompt_template="outer")
275
- prompt = prompter.render(dict(type="a"))
276
- print(prompt)
277
- # This is the outer file
278
- #
279
- # This is the inner file
280
- #
281
- # You selected A
282
- #
283
- #
284
- # This is the end of the outer file
285
- ```
286
-
287
- ### Using Variables
288
-
289
- Templates can use variables that you pass in through the `render()` method. You can use Jinja2 filters and conditionals to control the output based on your data.
290
-
291
- ```python
292
- from ai_prompter import Prompter
293
-
294
- prompter = Prompter(prompt_text="Hello {{name|default('Guest')}}!")
295
- prompt = prompter.render() # No data provided, uses default
296
- print(prompt) # Hello Guest!
297
- prompt = prompter.render({"name": "Alice"}) # Data provided
298
- print(prompt) # Hello Alice!
299
- ```
300
-
301
- The library also automatically provides a `current_time` variable with the current timestamp in format "YYYY-MM-DD HH:MM:SS".
302
-
303
- ```python
304
- from ai_prompter import Prompter
305
-
306
- prompter = Prompter(template_text="Current time: {{current_time}}")
307
- prompt = prompter.render()
308
- print(prompt) # Current time: 2025-04-19 23:28:00
309
- ```
310
-
311
- ### File-based template
312
-
313
- Place a Jinja file (e.g., `article.jinja`) in the default prompts directory (`src/ai_prompter/prompts`) or your custom path:
314
-
315
- ```jinja
316
- Write an article about {{ topic }}.
317
- ```
318
-
319
- ```python
320
- from ai_prompter import Prompter
321
-
322
- prompter = Prompter(prompt_template="article")
323
- prompt = prompter.render({"topic": "AI"})
324
- print(prompt)
325
- ```
326
-
327
- ### Jupyter Notebook
328
-
329
- See `notebooks/prompter_usage.ipynb` for interactive examples.
330
-
331
- ## Project Structure
332
-
333
- ```
334
- ai-prompter/
335
- ├── src/ai_prompter
336
- │ ├── __init__.py
337
- │ └── prompts/
338
- │ └── *.jinja
339
- ├── notebooks/
340
- │ ├── prompter_usage.ipynb
341
- │ └── prompts/
342
- ├── pyproject.toml
343
- ├── README.md
344
- └── .env (optional)
345
- ```
346
-
347
- ## Testing
348
-
349
- Run tests with:
350
-
351
- ```bash
352
- uv run pytest -v
353
- ```
354
-
355
- ## Contributing
356
-
357
- Contributions welcome! Please open issues or PRs.
358
-
359
- ## License
360
-
361
- This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
@@ -1,6 +0,0 @@
1
- ai_prompter/__init__.py,sha256=5T8FDK9wJPWhl69c2wQrPC4pxNkbew7snoUwrrgBz5o,12854
2
- ai_prompter/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
3
- ai_prompter-0.3.0.dist-info/METADATA,sha256=XKGpNCyLpy7uJjtIQEaCmscVkNHij_tIimOoFDcBfVM,9942
4
- ai_prompter-0.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- ai_prompter-0.3.0.dist-info/licenses/LICENSE,sha256=cS0_fa_8BoP0PvVG8D19pn_HDJrG96hd4PyEm9nkRo8,1066
6
- ai_prompter-0.3.0.dist-info/RECORD,,