tasks-prompts-chain 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tasks_prompts_chain/tasks_prompts_chain.py +19 -12
- {tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.1.dist-info}/METADATA +9 -4
- tasks_prompts_chain-0.1.1.dist-info/RECORD +7 -0
- tasks_prompts_chain-0.1.0.dist-info/RECORD +0 -7
- {tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.1.dist-info}/WHEEL +0 -0
- {tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.1.dist-info}/licenses/LICENSE +0 -0
@@ -31,6 +31,7 @@ Copyright 2025 Samir Ben Sghaier - Smirfolio
|
|
31
31
|
from typing import List, Optional, Dict, Union, AsyncGenerator, TypedDict
|
32
32
|
from enum import Enum
|
33
33
|
from .client_llm_sdk import ClientLLMSDK
|
34
|
+
import json
|
34
35
|
|
35
36
|
class OutputFormat(Enum):
|
36
37
|
JSON = "JSON"
|
@@ -46,10 +47,12 @@ class ModelOptions(TypedDict, total=False):
|
|
46
47
|
max_tokens: Optional[int]
|
47
48
|
|
48
49
|
class PromptTemplate:
|
49
|
-
def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None):
|
50
|
+
def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None, llm_id: Optional[str] = None, stop_placeholder: Optional[str] = None):
|
50
51
|
self.prompt = prompt
|
51
52
|
self.output_format = OutputFormat(output_format.upper())
|
52
53
|
self.output_placeholder = output_placeholder
|
54
|
+
self.llm_id=llm_id
|
55
|
+
self.stop_placeholder = stop_placeholder
|
53
56
|
|
54
57
|
class TasksPromptsChain:
|
55
58
|
"""A utility class for creating and executing prompt chains using OpenAI's API."""
|
@@ -165,7 +168,8 @@ class TasksPromptsChain:
|
|
165
168
|
"prompt": str,
|
166
169
|
"output_format": str,
|
167
170
|
"output_placeholder": str,
|
168
|
-
"llm_id": str # Optional: Specifies which LLM to use
|
171
|
+
"llm_id": str, # Optional: Specifies which LLM to use
|
172
|
+
"stop_placeholder": str # Optional: The stop string placeholder
|
169
173
|
}
|
170
174
|
Returns:
|
171
175
|
AsyncGenerator[str, None]: Generator yielding response chunks
|
@@ -178,24 +182,22 @@ class TasksPromptsChain:
|
|
178
182
|
# Convert dict to PromptTemplate if necessary and extract llm_id
|
179
183
|
llm_id = None
|
180
184
|
if isinstance(prompt_data, dict):
|
181
|
-
# Extract llm_id from the prompt data if present
|
182
|
-
llm_id = prompt_data.get("llm_id", self.default_client_id)
|
183
185
|
prompt_template = PromptTemplate(
|
184
186
|
prompt=prompt_data["prompt"],
|
185
187
|
output_format=prompt_data.get("output_format", "TEXT"),
|
186
|
-
output_placeholder=prompt_data.get("output_placeholder")
|
188
|
+
output_placeholder=prompt_data.get("output_placeholder"),
|
189
|
+
llm_id= prompt_data.get("llm_id", self.default_client_id),
|
190
|
+
stop_placeholder=prompt_data.get("stop_placeholder", None)
|
187
191
|
)
|
188
192
|
else:
|
189
193
|
prompt_template = prompt_data
|
190
|
-
# Use default client if llm_id not specified
|
191
|
-
llm_id = self.default_client_id
|
192
194
|
|
193
195
|
# Validate the requested LLM exists
|
194
|
-
if llm_id not in self.clients:
|
195
|
-
raise ValueError(f"LLM with id '{llm_id}' not found. Available LLMs: {list(self.clients.keys())}")
|
196
|
+
if prompt_template.llm_id not in self.clients:
|
197
|
+
raise ValueError(f"LLM with id '{prompt_template.llm_id}' not found. Available LLMs: {list(self.clients.keys())}")
|
196
198
|
|
197
199
|
# Get the client configuration
|
198
|
-
client_config = self.clients[llm_id]
|
200
|
+
client_config = self.clients[prompt_template.llm_id]
|
199
201
|
client = client_config["client"]
|
200
202
|
model = client_config["model"]
|
201
203
|
temperature = client_config["temperature"]
|
@@ -241,7 +243,13 @@ class TasksPromptsChain:
|
|
241
243
|
placeholder_values[prompt_template.output_placeholder] = response_content
|
242
244
|
self._results[prompt_template.output_placeholder] = response_content
|
243
245
|
if streamout:
|
244
|
-
yield delta
|
246
|
+
yield delta
|
247
|
+
|
248
|
+
# Stop excution if the stop_placeholder is detected in the response content
|
249
|
+
if prompt_template.stop_placeholder and (prompt_template.stop_placeholder in response_content):
|
250
|
+
raise Exception({"type": "error", "content": "Invalid project description. Please provide a valid project description."})
|
251
|
+
#yield json.dumps({"type": "error", "content": "Invalid project description. Please provide a valid project description."})
|
252
|
+
#return
|
245
253
|
|
246
254
|
except Exception as e:
|
247
255
|
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
@@ -285,4 +293,3 @@ class TasksPromptsChain:
|
|
285
293
|
if len(self._results) > 0:
|
286
294
|
raise Exception("template_output must be called before execute_chain")
|
287
295
|
self.set_output_template(template)
|
288
|
-
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tasks_prompts_chain
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.1
|
4
4
|
Summary: A Python library for creating and executing chains of prompts using multiple LLM providers with streaming support and template formatting.
|
5
5
|
Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
|
6
6
|
Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
|
@@ -23,6 +23,7 @@ A Mini Python library for creating and executing chains of prompts using multipl
|
|
23
23
|
- Template-based output formatting
|
24
24
|
- System prompt support
|
25
25
|
- Placeholder replacement between prompts
|
26
|
+
- Stop placeholder string, that can be defined, so if the LLM responds with this placeholder, the chain prompt will interrupt, as an LLM error handler
|
26
27
|
- Multiple output formats (JSON, Markdown, CSV, Text)
|
27
28
|
- Async/await support
|
28
29
|
- Support for multiple LLM providers (OpenAI, Anthropic, Cerebras, etc.)
|
@@ -120,10 +121,11 @@ async def main():
|
|
120
121
|
# Define your prompts - specify which LLM to use for each prompt
|
121
122
|
prompts = [
|
122
123
|
{
|
123
|
-
"prompt": "Create a design concept for a luxury chocolate bar",
|
124
|
+
"prompt": "Create a design concept for a luxury chocolate bar, if not inspired respond with this string %%cant_be_inspired%% so the chain prompt query will be stopped",
|
124
125
|
"output_format": "TEXT",
|
125
126
|
"output_placeholder": "design_concept",
|
126
|
-
"llm_id": "gpt" # Use the GPT model for this prompt
|
127
|
+
"llm_id": "gpt", # Use the GPT model for this prompt
|
128
|
+
"stop_placholder": "%%cant_be_inspired%%" # the stop placeholder string that will interrupt the prompt chain query
|
127
129
|
},
|
128
130
|
{
|
129
131
|
"prompt": "Based on this concept: {{design_concept}}, suggest a color palette",
|
@@ -290,7 +292,8 @@ Each prompt in the chain can be defined as a dictionary:
|
|
290
292
|
"prompt": str, # The actual prompt text
|
291
293
|
"output_format": str, # "JSON", "MARKDOWN", "CSV", or "TEXT"
|
292
294
|
"output_placeholder": str, # Identifier for accessing this result
|
293
|
-
"llm_id": str
|
295
|
+
"llm_id": str, # Optional: ID of the LLM to use for this prompt
|
296
|
+
"stop_placholder": str # Optional: The stop string placeholder that may interrupt the chaining prompt query, defined in a prompt, and may be returned by the LLM
|
294
297
|
}
|
295
298
|
```
|
296
299
|
|
@@ -311,6 +314,7 @@ The library includes comprehensive error handling:
|
|
311
314
|
- API error handling
|
312
315
|
- Placeholder validation
|
313
316
|
- LLM validation (checks if specified LLM ID exists)
|
317
|
+
- stop_placholder to validate the LLM output and stop the chain prompt excution
|
314
318
|
|
315
319
|
Errors are raised with descriptive messages indicating the specific issue and prompt number where the error occurred.
|
316
320
|
|
@@ -318,6 +322,7 @@ Errors are raised with descriptive messages indicating the specific issue and pr
|
|
318
322
|
|
319
323
|
1. Always set templates before executing the chain
|
320
324
|
2. Use meaningful placeholder names
|
325
|
+
3. Implement a stop_placholder in each prompt, to catch the LLM error defined bad responses to stop the subsequent requests
|
321
326
|
3. Handle streaming responses appropriately
|
322
327
|
4. Choose appropriate models for different types of tasks
|
323
328
|
5. Use system prompts for consistent context
|
@@ -0,0 +1,7 @@
|
|
1
|
+
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
+
tasks_prompts_chain/client_llm_sdk.py,sha256=ifwsecvykP5RalXMqolAClPHdpmv-5hF4eT61dxo1f8,3708
|
3
|
+
tasks_prompts_chain/tasks_prompts_chain.py,sha256=T8WaqAdgDBqHjNa48dPgny_mDnEN1OvMTFTX6YndGgo,13409
|
4
|
+
tasks_prompts_chain-0.1.1.dist-info/METADATA,sha256=1zVlU2deVaP-Fv_Zj6ph6LcDJrJOD4qSffehnp1umRM,13331
|
5
|
+
tasks_prompts_chain-0.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
6
|
+
tasks_prompts_chain-0.1.1.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
7
|
+
tasks_prompts_chain-0.1.1.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
-
tasks_prompts_chain/client_llm_sdk.py,sha256=ifwsecvykP5RalXMqolAClPHdpmv-5hF4eT61dxo1f8,3708
|
3
|
-
tasks_prompts_chain/tasks_prompts_chain.py,sha256=ZqZIzAWEZMgKvbkXUT8_VZisSydb-jLuWxuTrMT1-Fw,12643
|
4
|
-
tasks_prompts_chain-0.1.0.dist-info/METADATA,sha256=_-5r7zd9EnnPut3CF45s1VFGVte0iZHYb9Epx219UfQ,12573
|
5
|
-
tasks_prompts_chain-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
6
|
-
tasks_prompts_chain-0.1.0.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
7
|
-
tasks_prompts_chain-0.1.0.dist-info/RECORD,,
|
File without changes
|
{tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.1.dist-info}/licenses/LICENSE
RENAMED
File without changes
|