tasks-prompts-chain 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tasks_prompts_chain/client_llm_sdk.py +37 -7
- tasks_prompts_chain/tasks_prompts_chain.py +21 -12
- {tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.2.dist-info}/METADATA +9 -4
- tasks_prompts_chain-0.1.2.dist-info/RECORD +7 -0
- tasks_prompts_chain-0.1.0.dist-info/RECORD +0 -7
- {tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.2.dist-info}/WHEEL +0 -0
- {tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,5 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
1
3
|
class ClientLLMSDK:
|
2
4
|
"""
|
3
5
|
A class to handle LLM SDKs for various providers.
|
@@ -18,6 +20,25 @@ class ClientLLMSDK:
|
|
18
20
|
# Instantiate the LLM
|
19
21
|
self.client = AsyncLLmAi(**client_kwargs)
|
20
22
|
|
23
|
+
def _extract_content(self, chunk) -> Optional[str]:
|
24
|
+
"""Extract content from different response formats"""
|
25
|
+
if self.llm_class_name == "AsyncAnthropic":
|
26
|
+
# Handle different Anthropic event types
|
27
|
+
if hasattr(chunk, 'type'):
|
28
|
+
if chunk.type == 'content_block_delta':
|
29
|
+
return chunk.delta.text
|
30
|
+
elif chunk.type == 'message_stop':
|
31
|
+
return None
|
32
|
+
return None
|
33
|
+
else:
|
34
|
+
# Handle OpenAI and other formats
|
35
|
+
if hasattr(chunk, 'choices') and chunk.choices:
|
36
|
+
if hasattr(chunk.choices[0], 'delta'):
|
37
|
+
return chunk.choices[0].delta.content
|
38
|
+
elif hasattr(chunk.choices[0], 'message'):
|
39
|
+
return chunk.choices[0].message.content
|
40
|
+
return None
|
41
|
+
|
21
42
|
async def generat_response(self, **kwargs):
|
22
43
|
"""
|
23
44
|
Generate a response from the LLM.
|
@@ -42,8 +63,14 @@ class ClientLLMSDK:
|
|
42
63
|
)
|
43
64
|
|
44
65
|
async for chunk in response:
|
45
|
-
if chunk
|
46
|
-
|
66
|
+
if chunk is not None:
|
67
|
+
if isinstance(chunk, str):
|
68
|
+
delta = chunk
|
69
|
+
else:
|
70
|
+
# Handle different response formats
|
71
|
+
delta = self._extract_content(chunk)
|
72
|
+
if delta is not None:
|
73
|
+
yield delta
|
47
74
|
|
48
75
|
|
49
76
|
elif self.llm_class_name == "AsyncAnthropic": # Anthropic SDK
|
@@ -69,11 +96,14 @@ class ClientLLMSDK:
|
|
69
96
|
)
|
70
97
|
|
71
98
|
async for chunk in response:
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
99
|
+
if chunk is not None:
|
100
|
+
if isinstance(chunk, str):
|
101
|
+
delta = chunk
|
102
|
+
else:
|
103
|
+
# Handle different response formats
|
104
|
+
delta = self._extract_content(chunk)
|
105
|
+
if delta is not None:
|
106
|
+
yield delta
|
77
107
|
|
78
108
|
elif self.llm_class_name == "AsyncCerebras": # AsyncCerebras SDK
|
79
109
|
response = await self.client.chat.completions.create(
|
@@ -31,6 +31,7 @@ Copyright 2025 Samir Ben Sghaier - Smirfolio
|
|
31
31
|
from typing import List, Optional, Dict, Union, AsyncGenerator, TypedDict
|
32
32
|
from enum import Enum
|
33
33
|
from .client_llm_sdk import ClientLLMSDK
|
34
|
+
import json
|
34
35
|
|
35
36
|
class OutputFormat(Enum):
|
36
37
|
JSON = "JSON"
|
@@ -46,10 +47,12 @@ class ModelOptions(TypedDict, total=False):
|
|
46
47
|
max_tokens: Optional[int]
|
47
48
|
|
48
49
|
class PromptTemplate:
|
49
|
-
def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None):
|
50
|
+
def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None, llm_id: Optional[str] = None, stop_placeholder: Optional[str] = None):
|
50
51
|
self.prompt = prompt
|
51
52
|
self.output_format = OutputFormat(output_format.upper())
|
52
53
|
self.output_placeholder = output_placeholder
|
54
|
+
self.llm_id=llm_id
|
55
|
+
self.stop_placeholder = stop_placeholder
|
53
56
|
|
54
57
|
class TasksPromptsChain:
|
55
58
|
"""A utility class for creating and executing prompt chains using OpenAI's API."""
|
@@ -165,7 +168,8 @@ class TasksPromptsChain:
|
|
165
168
|
"prompt": str,
|
166
169
|
"output_format": str,
|
167
170
|
"output_placeholder": str,
|
168
|
-
"llm_id": str # Optional: Specifies which LLM to use
|
171
|
+
"llm_id": str, # Optional: Specifies which LLM to use
|
172
|
+
"stop_placeholder": str # Optional: The stop string placeholder
|
169
173
|
}
|
170
174
|
Returns:
|
171
175
|
AsyncGenerator[str, None]: Generator yielding response chunks
|
@@ -178,24 +182,22 @@ class TasksPromptsChain:
|
|
178
182
|
# Convert dict to PromptTemplate if necessary and extract llm_id
|
179
183
|
llm_id = None
|
180
184
|
if isinstance(prompt_data, dict):
|
181
|
-
# Extract llm_id from the prompt data if present
|
182
|
-
llm_id = prompt_data.get("llm_id", self.default_client_id)
|
183
185
|
prompt_template = PromptTemplate(
|
184
186
|
prompt=prompt_data["prompt"],
|
185
187
|
output_format=prompt_data.get("output_format", "TEXT"),
|
186
|
-
output_placeholder=prompt_data.get("output_placeholder")
|
188
|
+
output_placeholder=prompt_data.get("output_placeholder"),
|
189
|
+
llm_id= prompt_data.get("llm_id", self.default_client_id),
|
190
|
+
stop_placeholder=prompt_data.get("stop_placeholder", None)
|
187
191
|
)
|
188
192
|
else:
|
189
193
|
prompt_template = prompt_data
|
190
|
-
# Use default client if llm_id not specified
|
191
|
-
llm_id = self.default_client_id
|
192
194
|
|
193
195
|
# Validate the requested LLM exists
|
194
|
-
if llm_id not in self.clients:
|
195
|
-
raise ValueError(f"LLM with id '{llm_id}' not found. Available LLMs: {list(self.clients.keys())}")
|
196
|
+
if prompt_template.llm_id not in self.clients:
|
197
|
+
raise ValueError(f"LLM with id '{prompt_template.llm_id}' not found. Available LLMs: {list(self.clients.keys())}")
|
196
198
|
|
197
199
|
# Get the client configuration
|
198
|
-
client_config = self.clients[llm_id]
|
200
|
+
client_config = self.clients[prompt_template.llm_id]
|
199
201
|
client = client_config["client"]
|
200
202
|
model = client_config["model"]
|
201
203
|
temperature = client_config["temperature"]
|
@@ -232,6 +234,8 @@ class TasksPromptsChain:
|
|
232
234
|
async for chunk in streamResponse:
|
233
235
|
if chunk is not None:
|
234
236
|
delta = chunk
|
237
|
+
if delta:
|
238
|
+
delta = chunk
|
235
239
|
response_content += delta
|
236
240
|
self._current_stream_buffer = response_content
|
237
241
|
self._format_current_stream()
|
@@ -241,7 +245,13 @@ class TasksPromptsChain:
|
|
241
245
|
placeholder_values[prompt_template.output_placeholder] = response_content
|
242
246
|
self._results[prompt_template.output_placeholder] = response_content
|
243
247
|
if streamout:
|
244
|
-
yield delta
|
248
|
+
yield delta
|
249
|
+
|
250
|
+
# Stop excution if the stop_placeholder is detected in the response content
|
251
|
+
if prompt_template.stop_placeholder and (prompt_template.stop_placeholder in response_content):
|
252
|
+
raise Exception({"type": "error", "content": "Invalid project description. Please provide a valid project description."})
|
253
|
+
#yield json.dumps({"type": "error", "content": "Invalid project description. Please provide a valid project description."})
|
254
|
+
#return
|
245
255
|
|
246
256
|
except Exception as e:
|
247
257
|
raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
|
@@ -285,4 +295,3 @@ class TasksPromptsChain:
|
|
285
295
|
if len(self._results) > 0:
|
286
296
|
raise Exception("template_output must be called before execute_chain")
|
287
297
|
self.set_output_template(template)
|
288
|
-
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: tasks_prompts_chain
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.2
|
4
4
|
Summary: A Python library for creating and executing chains of prompts using multiple LLM providers with streaming support and template formatting.
|
5
5
|
Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
|
6
6
|
Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
|
@@ -23,6 +23,7 @@ A Mini Python library for creating and executing chains of prompts using multipl
|
|
23
23
|
- Template-based output formatting
|
24
24
|
- System prompt support
|
25
25
|
- Placeholder replacement between prompts
|
26
|
+
- Stop placeholder string, that can be defined, so if the LLM responds with this placeholder, the chain prompt will interrupt, as an LLM error handler
|
26
27
|
- Multiple output formats (JSON, Markdown, CSV, Text)
|
27
28
|
- Async/await support
|
28
29
|
- Support for multiple LLM providers (OpenAI, Anthropic, Cerebras, etc.)
|
@@ -120,10 +121,11 @@ async def main():
|
|
120
121
|
# Define your prompts - specify which LLM to use for each prompt
|
121
122
|
prompts = [
|
122
123
|
{
|
123
|
-
"prompt": "Create a design concept for a luxury chocolate bar",
|
124
|
+
"prompt": "Create a design concept for a luxury chocolate bar, if not inspired respond with this string %%cant_be_inspired%% so the chain prompt query will be stopped",
|
124
125
|
"output_format": "TEXT",
|
125
126
|
"output_placeholder": "design_concept",
|
126
|
-
"llm_id": "gpt" # Use the GPT model for this prompt
|
127
|
+
"llm_id": "gpt", # Use the GPT model for this prompt
|
128
|
+
"stop_placholder": "%%cant_be_inspired%%" # the stop placeholder string that will interrupt the prompt chain query
|
127
129
|
},
|
128
130
|
{
|
129
131
|
"prompt": "Based on this concept: {{design_concept}}, suggest a color palette",
|
@@ -290,7 +292,8 @@ Each prompt in the chain can be defined as a dictionary:
|
|
290
292
|
"prompt": str, # The actual prompt text
|
291
293
|
"output_format": str, # "JSON", "MARKDOWN", "CSV", or "TEXT"
|
292
294
|
"output_placeholder": str, # Identifier for accessing this result
|
293
|
-
"llm_id": str
|
295
|
+
"llm_id": str, # Optional: ID of the LLM to use for this prompt
|
296
|
+
"stop_placholder": str # Optional: The stop string placeholder that may interrupt the chaining prompt query, defined in a prompt, and may be returned by the LLM
|
294
297
|
}
|
295
298
|
```
|
296
299
|
|
@@ -311,6 +314,7 @@ The library includes comprehensive error handling:
|
|
311
314
|
- API error handling
|
312
315
|
- Placeholder validation
|
313
316
|
- LLM validation (checks if specified LLM ID exists)
|
317
|
+
- stop_placholder to validate the LLM output and stop the chain prompt excution
|
314
318
|
|
315
319
|
Errors are raised with descriptive messages indicating the specific issue and prompt number where the error occurred.
|
316
320
|
|
@@ -318,6 +322,7 @@ Errors are raised with descriptive messages indicating the specific issue and pr
|
|
318
322
|
|
319
323
|
1. Always set templates before executing the chain
|
320
324
|
2. Use meaningful placeholder names
|
325
|
+
3. Implement a stop_placholder in each prompt, to catch the LLM error defined bad responses to stop the subsequent requests
|
321
326
|
3. Handle streaming responses appropriately
|
322
327
|
4. Choose appropriate models for different types of tasks
|
323
328
|
5. Use system prompts for consistent context
|
@@ -0,0 +1,7 @@
|
|
1
|
+
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
+
tasks_prompts_chain/client_llm_sdk.py,sha256=T2x7kyEliZdVm-hl5Ci88s8kOvWcy3lxoOLko1bk8Sk,4830
|
3
|
+
tasks_prompts_chain/tasks_prompts_chain.py,sha256=V5t68vSMzUnY6TmansQmHcrjAE3NtRPD0KvzsdnC_A4,13477
|
4
|
+
tasks_prompts_chain-0.1.2.dist-info/METADATA,sha256=sXTgsZW_t0lbwVFEIFALkibzkowhizo7zYS0QcffsB4,13331
|
5
|
+
tasks_prompts_chain-0.1.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
6
|
+
tasks_prompts_chain-0.1.2.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
7
|
+
tasks_prompts_chain-0.1.2.dist-info/RECORD,,
|
@@ -1,7 +0,0 @@
|
|
1
|
-
tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
|
2
|
-
tasks_prompts_chain/client_llm_sdk.py,sha256=ifwsecvykP5RalXMqolAClPHdpmv-5hF4eT61dxo1f8,3708
|
3
|
-
tasks_prompts_chain/tasks_prompts_chain.py,sha256=ZqZIzAWEZMgKvbkXUT8_VZisSydb-jLuWxuTrMT1-Fw,12643
|
4
|
-
tasks_prompts_chain-0.1.0.dist-info/METADATA,sha256=_-5r7zd9EnnPut3CF45s1VFGVte0iZHYb9Epx219UfQ,12573
|
5
|
-
tasks_prompts_chain-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
6
|
-
tasks_prompts_chain-0.1.0.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
|
7
|
-
tasks_prompts_chain-0.1.0.dist-info/RECORD,,
|
File without changes
|
{tasks_prompts_chain-0.1.0.dist-info → tasks_prompts_chain-0.1.2.dist-info}/licenses/LICENSE
RENAMED
File without changes
|