tasks-prompts-chain 0.0.4__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,91 @@
1
+ class ClientLLMSDK:
2
+ """
3
+ A class to handle LLM SDKs for various providers.
4
+ This class is designed to work with different LLM SDKs by
5
+ dynamically loading the appropriate class based on the provider.
6
+ """
7
+ def __init__(self, AsyncLLmAi, model_options):
8
+ """
9
+ Initialize with any LLM SDK class.
10
+
11
+ :param llm_class: The LLM class to be used (e.g., openai.AsyncOpenAI, anthropic.AsyncAnthropic)
12
+ :param kwargs: Additional parameters for initializing the LLM instance
13
+ """
14
+ self.llm_class_name = AsyncLLmAi.__name__ # Store the class type
15
+ client_kwargs = {"api_key": model_options["api_key"]}
16
+ if "base_url" in model_options:
17
+ client_kwargs["base_url"] = model_options["base_url"]
18
+ # Instantiate the LLM
19
+ self.client = AsyncLLmAi(**client_kwargs)
20
+
21
+ async def generat_response(self, **kwargs):
22
+ """
23
+ Generate a response from the LLM.
24
+
25
+ :param prompt: The prompt to be sent to the LLM
26
+ :param kwargs: Additional parameters for generating the response
27
+ :return: The generated response
28
+ """
29
+ model = kwargs.get("model", "gpt-4")
30
+ messages= kwargs.get("messages", [])
31
+ temperature = kwargs.get("temperature", 0.7)
32
+ max_tokens = kwargs.get("max_tokens", 512)
33
+ stream = kwargs.get("stream", True)
34
+
35
+ if self.llm_class_name == "AsyncOpenAI": # OpenAI SDK
36
+ response = await self.client.chat.completions.create(
37
+ model=model,
38
+ messages=messages,
39
+ temperature=temperature,
40
+ max_tokens=max_tokens,
41
+ stream=stream
42
+ )
43
+
44
+ async for chunk in response:
45
+ if chunk.choices[0].delta.content is not None:
46
+ yield chunk.choices[0].delta.content
47
+
48
+
49
+ elif self.llm_class_name == "AsyncAnthropic": # Anthropic SDK
50
+ # Extract system message if present
51
+ system_message = ""
52
+ filtered_messages = []
53
+
54
+ for message in messages:
55
+ if message.get("role") == "system" and message.get("content") != None:
56
+ system_message = message.get("content")
57
+ else:
58
+ filtered_messages.append(message)
59
+
60
+ # Update messages without system message
61
+ messages = filtered_messages
62
+ response = await self.client.messages.create(
63
+ system=system_message,
64
+ model=model,
65
+ messages=messages,
66
+ temperature=temperature,
67
+ max_tokens=max_tokens,
68
+ stream=stream
69
+ )
70
+
71
+ async for chunk in response:
72
+ # Based on the observed output format: RawContentBlockDeltaEvent with TextDelta
73
+ if chunk.type == "content_block_delta" and hasattr(chunk.delta, "text"):
74
+ yield chunk.delta.text
75
+ elif chunk.type == "content_block_stop":
76
+ pass
77
+
78
+ elif self.llm_class_name == "AsyncCerebras": # AsyncCerebras SDK
79
+ response = await self.client.chat.completions.create(
80
+ model=model,
81
+ messages=messages,
82
+ temperature=temperature,
83
+ max_tokens=max_tokens,
84
+ stream=stream
85
+ )
86
+
87
+ async for chunk in response:
88
+ if chunk.choices[0].delta.content is not None:
89
+ yield chunk.choices[0].delta.content
90
+ else:
91
+ raise NotImplementedError(f"Unsupported LLM: {self.llm_class_name}")
@@ -29,8 +29,8 @@ Copyright 2025 Samir Ben Sghaier - Smirfolio
29
29
 
30
30
  """
31
31
  from typing import List, Optional, Dict, Union, AsyncGenerator, TypedDict
32
- from openai import AsyncOpenAI
33
32
  from enum import Enum
33
+ from .client_llm_sdk import ClientLLMSDK
34
34
 
35
35
  class OutputFormat(Enum):
36
36
  JSON = "JSON"
@@ -44,7 +44,6 @@ class ModelOptions(TypedDict, total=False):
44
44
  base_url: Optional[str]
45
45
  temperature: Optional[float]
46
46
  max_tokens: Optional[int]
47
- stream: Optional[bool]
48
47
 
49
48
  class PromptTemplate:
50
49
  def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None):
@@ -55,35 +54,62 @@ class PromptTemplate:
55
54
  class TasksPromptsChain:
56
55
  """A utility class for creating and executing prompt chains using OpenAI's API."""
57
56
 
58
- def __init__(self,
59
- model_options: ModelOptions,
57
+ def __init__(self,
58
+ llm_configs: List[Dict],
60
59
  system_prompt: Optional[str] = None,
61
60
  final_result_placeholder: Optional[str] = None,
62
61
  system_apply_to_all_prompts: bool = False):
63
62
  """
64
- Initialize the TasksPromptsChain with OpenAI configuration.
63
+ Initialize the TasksPromptsChain with multiple LLM configurations.
65
64
 
66
65
  Args:
67
- model_options (ModelOptions): Dictionary containing model configuration:
68
- - model (str): The model identifier to use (e.g., 'gpt-3.5-turbo')
69
- - api_key (str): The OpenAI API key
70
- - base_url (str, optional): API endpoint URL
71
- - temperature (float, optional): Temperature parameter (default: 0.7)
72
- - max_tokens (int, optional): Maximum tokens (default: 4120)
73
- - stream (bool, optional): Whether to stream responses (default: True)
66
+ llm_configs (List[Dict]): List of LLM configurations, each containing:
67
+ - llm_id (str): Unique identifier for this LLM configuration
68
+ - llm_class: The LLM class to use (e.g., openai.AsyncOpenAI, anthropic.AsyncAnthropic)
69
+ - model_options (Dict): Configuration for the LLM:
70
+ - model (str): The model identifier to use
71
+ - api_key (str): API key
72
+ - base_url (str, optional): Custom API endpoint URL
73
+ - temperature (float, optional): Temperature parameter (default: 0.7)
74
+ - max_tokens (int, optional): Maximum tokens (default: 4120)
74
75
  system_prompt (str, optional): System prompt to set context for the LLM
75
76
  final_result_placeholder (str, optional): The placeholder name for the final result
76
77
  system_apply_to_all_prompts (bool): Whether to apply system prompt to all prompts
77
78
  """
78
- self.model = model_options["model"]
79
- self.temperature = model_options.get("temperature", 0.7)
80
- self.max_tokens = model_options.get("max_tokens", 4120)
81
- self.stream = model_options.get("stream", True)
79
+ # Initialize clients dictionary
80
+ self.clients = {}
81
+ self.default_client_id = None
82
82
 
83
- client_kwargs = {"api_key": model_options["api_key"]}
84
- if "base_url" in model_options:
85
- client_kwargs["base_url"] = model_options["base_url"]
86
- self.client = AsyncOpenAI(**client_kwargs)
83
+ # Set up each LLM client
84
+ for config in llm_configs:
85
+ llm_id = config.get("llm_id")
86
+ if not llm_id:
87
+ raise ValueError("Each LLM configuration must have a 'llm_id'")
88
+
89
+ llm_class = config.get("llm_class")
90
+ if not llm_class:
91
+ raise ValueError(f"LLM configuration '{llm_id}' must specify 'llm_class'")
92
+
93
+ model_options = config.get("model_options", {})
94
+ if "api_key" not in model_options:
95
+ raise ValueError(f"LLM configuration '{llm_id}' must include 'api_key' in model_options")
96
+
97
+ # Set the first client as default if not already set
98
+ if self.default_client_id is None:
99
+ self.default_client_id = llm_id
100
+
101
+ # Store common settings in the client record for easy access
102
+ client_kwargs = {"api_key": model_options["api_key"]}
103
+ if "base_url" in model_options:
104
+ client_kwargs["base_url"] = model_options["base_url"]
105
+
106
+ self.clients[llm_id] = {
107
+ "llm_id": llm_id,
108
+ "model": model_options.get("model", "gpt-3.5-turbo"),
109
+ "temperature": model_options.get("temperature", 0.7),
110
+ "max_tokens": model_options.get("max_tokens", 4120),
111
+ "client": ClientLLMSDK(llm_class, client_kwargs)
112
+ }
87
113
  self.system_prompt = system_prompt
88
114
  self.system_apply_to_all_prompts = system_apply_to_all_prompts
89
115
  self.final_result_placeholder = final_result_placeholder or "final_result"
@@ -91,7 +117,15 @@ class TasksPromptsChain:
91
117
  self._output_template = None
92
118
  self._final_output_template = None
93
119
  self._current_stream_buffer = ""
94
-
120
+
121
+ def get_reflection(self):
122
+ """
123
+ Get the reflection of the class instance with available LLM clients.
124
+
125
+ Returns:
126
+ dict: Dictionary of available LLM clients and their IDs
127
+ """
128
+ return {llm_id: config["client"].llm_class_name for llm_id, config in self.clients.items()}
95
129
  def set_output_template(self, template: str) -> None:
96
130
  """
97
131
  Set the output template to be used for streaming responses.
@@ -121,7 +155,7 @@ class TasksPromptsChain:
121
155
  self._final_output_template=output
122
156
  return output
123
157
 
124
- async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]) -> AsyncGenerator[str, None]:
158
+ async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]], streamout = True) -> AsyncGenerator[str, None]:
125
159
  """
126
160
  Execute a chain of prompts sequentially, with placeholder replacement.
127
161
 
@@ -130,18 +164,22 @@ class TasksPromptsChain:
130
164
  {
131
165
  "prompt": str,
132
166
  "output_format": str,
133
- "output_placeholder": str
167
+ "output_placeholder": str,
168
+ "llm_id": str # Optional: Specifies which LLM to use
134
169
  }
135
170
  Returns:
136
- List[str]: List of responses for each prompt
171
+ AsyncGenerator[str, None]: Generator yielding response chunks
137
172
  """
138
173
  responses = []
139
174
  placeholder_values = {}
140
175
 
141
176
  try:
142
177
  for i, prompt_data in enumerate(prompts):
143
- # Convert dict to PromptTemplate if necessary
178
+ # Convert dict to PromptTemplate if necessary and extract llm_id
179
+ llm_id = None
144
180
  if isinstance(prompt_data, dict):
181
+ # Extract llm_id from the prompt data if present
182
+ llm_id = prompt_data.get("llm_id", self.default_client_id)
145
183
  prompt_template = PromptTemplate(
146
184
  prompt=prompt_data["prompt"],
147
185
  output_format=prompt_data.get("output_format", "TEXT"),
@@ -149,6 +187,19 @@ class TasksPromptsChain:
149
187
  )
150
188
  else:
151
189
  prompt_template = prompt_data
190
+ # Use default client if llm_id not specified
191
+ llm_id = self.default_client_id
192
+
193
+ # Validate the requested LLM exists
194
+ if llm_id not in self.clients:
195
+ raise ValueError(f"LLM with id '{llm_id}' not found. Available LLMs: {list(self.clients.keys())}")
196
+
197
+ # Get the client configuration
198
+ client_config = self.clients[llm_id]
199
+ client = client_config["client"]
200
+ model = client_config["model"]
201
+ temperature = client_config["temperature"]
202
+ max_tokens = client_config["max_tokens"]
152
203
 
153
204
  # Replace placeholders in the prompt
154
205
  current_prompt = prompt_template.prompt
@@ -163,22 +214,24 @@ class TasksPromptsChain:
163
214
  messages = []
164
215
  if self.system_prompt and (i == 0 or self.system_apply_to_all_prompts):
165
216
  messages.append({"role": "system", "content": self.system_prompt})
217
+
166
218
  messages.append({"role": "user", "content": current_prompt + format_instruction})
167
219
 
168
- stream = await self.client.chat.completions.create(
169
- model=self.model,
220
+ # Generate response using the selected LLM
221
+ streamResponse = client.generat_response(
222
+ model=model,
170
223
  messages=messages,
171
- temperature=self.temperature,
172
- max_tokens=self.max_tokens,
173
- stream=self.stream
224
+ temperature=temperature,
225
+ max_tokens=max_tokens,
226
+ stream=True
174
227
  )
175
228
 
176
229
  response_content = ""
177
230
  self._current_stream_buffer = ""
178
231
 
179
- async for chunk in stream:
180
- if chunk.choices[0].delta.content is not None:
181
- delta = chunk.choices[0].delta.content
232
+ async for chunk in streamResponse:
233
+ if chunk is not None:
234
+ delta = chunk
182
235
  response_content += delta
183
236
  self._current_stream_buffer = response_content
184
237
  self._format_current_stream()
@@ -187,7 +240,8 @@ class TasksPromptsChain:
187
240
  if prompt_template.output_placeholder:
188
241
  placeholder_values[prompt_template.output_placeholder] = response_content
189
242
  self._results[prompt_template.output_placeholder] = response_content
190
- yield response_content
243
+ if streamout:
244
+ yield delta
191
245
 
192
246
  except Exception as e:
193
247
  raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
@@ -195,6 +249,7 @@ class TasksPromptsChain:
195
249
  # Store the last response with the final result placeholder
196
250
  if responses:
197
251
  self._results[self.final_result_placeholder] = responses[-1]
252
+ yield "<tasks-sys>Done</tasks-sys>"
198
253
 
199
254
  def get_result(self, placeholder: str) -> Optional[str]:
200
255
  """
@@ -0,0 +1,407 @@
1
+ Metadata-Version: 2.4
2
+ Name: tasks_prompts_chain
3
+ Version: 0.1.0
4
+ Summary: A Python library for creating and executing chains of prompts using multiple LLM providers with streaming support and template formatting.
5
+ Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
6
+ Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
7
+ Author-email: Samir Ben Sghaier <ben.sghaier.samir@gmail.com>
8
+ License-Expression: Apache-2.0
9
+ License-File: LICENSE
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.8
13
+ Description-Content-Type: text/markdown
14
+
15
+ # TasksPromptsChain
16
+
17
+ A Mini Python library for creating and executing chains of prompts using multiple LLM providers with streaming support and output template formatting.
18
+
19
+ ## Features
20
+
21
+ - Sequential prompt chain execution
22
+ - Streaming responses
23
+ - Template-based output formatting
24
+ - System prompt support
25
+ - Placeholder replacement between prompts
26
+ - Multiple output formats (JSON, Markdown, CSV, Text)
27
+ - Async/await support
28
+ - Support for multiple LLM providers (OpenAI, Anthropic, Cerebras, etc.)
29
+ - Multi-model support - use different models for different prompts in the chain
30
+
31
+ ## Dependencies
32
+
33
+ Please install typing-extensions and the SDK for your preferred LLM providers:
34
+
35
+ For OpenAI:
36
+ ```bash
37
+ pip install typing-extensions
38
+ pip install openai
39
+ ```
40
+
41
+ For Anthropic:
42
+ ```bash
43
+ pip install typing-extensions
44
+ pip install anthropic
45
+ ```
46
+
47
+ For Cerebras:
48
+ ```bash
49
+ pip install typing-extensions
50
+ pip install cerebras
51
+ ```
52
+
53
+ To Install the library:
54
+ ```
55
+ pip install tasks_prompts_chain
56
+ ```
57
+
58
+ ## Installation from source code
59
+
60
+ ### For Users required from source gitHub repo
61
+ ```bash
62
+ pip install -r requirements/requirements.txt
63
+ ```
64
+
65
+ ### For Developers required from source gitHub repo
66
+ ```bash
67
+ pip install -r requirements/requirements.txt
68
+ pip install -r requirements/requirements-dev.txt
69
+ ```
70
+
71
+ ## Quick Start
72
+
73
+ ```python
74
+ from tasks_prompts_chain import TasksPromptsChain
75
+ from openai import AsyncOpenAI
76
+ from anthropic import AsyncAnthropic
77
+ from cerebras import AsyncCerebras
78
+
79
+ async def main():
80
+ # Initialize the chain with multiple LLM configurations
81
+ llm_configs = [
82
+ {
83
+ "llm_id": "gpt", # Unique identifier for this LLM
84
+ "llm_class": AsyncOpenAI, # LLM SDK class
85
+ "model_options": {
86
+ "model": "gpt-4o",
87
+ "api_key": "your-openai-api-key",
88
+ "temperature": 0.1,
89
+ "max_tokens": 4120,
90
+ }
91
+ },
92
+ {
93
+ "llm_id": "claude", # Unique identifier for this LLM
94
+ "llm_class": AsyncAnthropic, # LLM SDK class
95
+ "model_options": {
96
+ "model": "claude-3-sonnet-20240229",
97
+ "api_key": "your-anthropic-api-key",
98
+ "temperature": 0.1,
99
+ "max_tokens": 8192,
100
+ }
101
+ },
102
+ {
103
+ "llm_id": "llama", # Unique identifier for this LLM
104
+ "llm_class": AsyncCerebras, # LLM SDK class
105
+ "model_options": {
106
+ "model": "llama-3.3-70b",
107
+ "api_key": "your-cerebras-api-key",
108
+ "base_url": "https://api.cerebras.ai/v1",
109
+ "temperature": 0.1,
110
+ "max_tokens": 4120,
111
+ }
112
+ }
113
+ ]
114
+
115
+ chain = TasksPromptsChain(
116
+ llm_configs,
117
+ final_result_placeholder="design_result"
118
+ )
119
+
120
+ # Define your prompts - specify which LLM to use for each prompt
121
+ prompts = [
122
+ {
123
+ "prompt": "Create a design concept for a luxury chocolate bar",
124
+ "output_format": "TEXT",
125
+ "output_placeholder": "design_concept",
126
+ "llm_id": "gpt" # Use the GPT model for this prompt
127
+ },
128
+ {
129
+ "prompt": "Based on this concept: {{design_concept}}, suggest a color palette",
130
+ "output_format": "JSON",
131
+ "output_placeholder": "color_palette",
132
+ "llm_id": "claude" # Use the Claude model for this prompt
133
+ },
134
+ {
135
+ "prompt": "Based on the design and colors: {{design_concept}} and {{color_palette}}, suggest packaging materials",
136
+ "output_format": "MARKDOWN",
137
+ "output_placeholder": "packaging",
138
+ "llm_id": "llama" # Use the Cerebras model for this prompt
139
+ }
140
+ ]
141
+
142
+ # Stream the responses
143
+ async for chunk in chain.execute_chain(prompts):
144
+ print(chunk, end="", flush=True)
145
+
146
+ # Get specific results
147
+ design = chain.get_result("design_concept")
148
+ colors = chain.get_result("color_palette")
149
+ packaging = chain.get_result("packaging")
150
+ ```
151
+
152
+ ## Advanced Usage
153
+
154
+ ### Using System Prompts
155
+
156
+ ```python
157
+ chain = TasksPromptsChain(
158
+ llm_configs=[
159
+ {
160
+ "llm_id": "default_model",
161
+ "llm_class": AsyncOpenAI,
162
+ "model_options": {
163
+ "model": "gpt-4o",
164
+ "api_key": "your-openai-api-key",
165
+ "temperature": 0.1,
166
+ "max_tokens": 4120,
167
+ }
168
+ }
169
+ ],
170
+ final_result_placeholder="result",
171
+ system_prompt="You are a professional design expert specialized in luxury products",
172
+ system_apply_to_all_prompts=True
173
+ )
174
+ ```
175
+
176
+ ### Using Cerebras Models
177
+
178
+ ```python
179
+ from cerebras import AsyncCerebras
180
+
181
+ llm_configs = [
182
+ {
183
+ "llm_id": "cerebras",
184
+ "llm_class": AsyncCerebras,
185
+ "model_options": {
186
+ "model": "llama-3.3-70b",
187
+ "api_key": "your-cerebras-api-key",
188
+ "base_url": "https://api.cerebras.ai/v1",
189
+ "temperature": 0.1,
190
+ "max_tokens": 4120,
191
+ }
192
+ }
193
+ ]
194
+
195
+ chain = TasksPromptsChain(
196
+ llm_configs,
197
+ final_result_placeholder="result",
198
+ )
199
+ ```
200
+
201
+ ### Custom API Endpoint
202
+
203
+ ```python
204
+ llm_configs = [
205
+ {
206
+ "llm_id": "custom_endpoint",
207
+ "llm_class": AsyncOpenAI,
208
+ "model_options": {
209
+ "model": "your-custom-model",
210
+ "api_key": "your-api-key",
211
+ "base_url": "https://your-custom-endpoint.com/v1",
212
+ "temperature": 0.1,
213
+ "max_tokens": 4120,
214
+ }
215
+ }
216
+ ]
217
+
218
+ chain = TasksPromptsChain(
219
+ llm_configs,
220
+ final_result_placeholder="result",
221
+ )
222
+ ```
223
+
224
+ ### Using Templates
225
+
226
+ You must call this set method before the execution of the prompting query (chain.execute_chain(prompts))
227
+
228
+ ```python
229
+ # Set output template before execution
230
+ chain.template_output("""
231
+ <result>
232
+ <design>
233
+ ### Design Concept:
234
+ {{design_concept}}
235
+ </design>
236
+
237
+ <colors>
238
+ ### Color Palette:
239
+ {{color_palette}}
240
+ </colors>
241
+ </result>
242
+ """)
243
+ ```
244
+ then retrieves the final result within the template :
245
+
246
+ ```python
247
+ # print out the final result in the well formated template
248
+ print(chain.get_final_result_within_template())
249
+ ```
250
+
251
+
252
+ ## API Reference
253
+
254
+ ### TasksPromptsChain Class
255
+
256
+ #### Constructor Parameters
257
+
258
+ - `llm_configs` (List[Dict]): List of LLM configurations, each containing:
259
+ - `llm_id` (str): Unique identifier for this LLM configuration
260
+ - `llm_class`: The LLM class to use (e.g., `AsyncOpenAI`, `AsyncAnthropic`, `AsyncCerebras`)
261
+ - `model_options` (Dict): Configuration for the LLM:
262
+ - `model` (str): The model identifier
263
+ - `api_key` (str): Your API key for the LLM provider
264
+ - `temperature` (float): Temperature setting for response generation
265
+ - `max_tokens` (int): Maximum tokens in generated responses
266
+ - `base_url` (Optional[str]): Custom API endpoint URL
267
+ - `system_prompt` (Optional[str]): System prompt for context
268
+ - `final_result_placeholder` (str): Name for the final result placeholder
269
+ - `system_apply_to_all_prompts` (Optional[bool]): Apply system prompt to all prompts
270
+
271
+ #### Methods
272
+
273
+ - `execute_chain(prompts: List[Dict], streamout: bool = True) -> AsyncGenerator[str, None]`
274
+ - Executes the prompt chain and streams responses
275
+
276
+ - `template_output(template: str) -> None`
277
+ - Sets the output template format
278
+
279
+ - `get_final_result_within_template(self) -> Optional[str]`
280
+ - Retrieves the final query result with the defined template in template_output();
281
+
282
+ - `get_result(placeholder: str) -> Optional[str]`
283
+ - Retrieves a specific result by placeholder
284
+
285
+ ### Prompt Format
286
+
287
+ Each prompt in the chain can be defined as a dictionary:
288
+ ```python
289
+ {
290
+ "prompt": str, # The actual prompt text
291
+ "output_format": str, # "JSON", "MARKDOWN", "CSV", or "TEXT"
292
+ "output_placeholder": str, # Identifier for accessing this result
293
+ "llm_id": str # Optional: ID of the LLM to use for this prompt
294
+ }
295
+ ```
296
+
297
+ ## Supported LLM Providers
298
+
299
+ TasksPromptsChain currently supports the following LLM providers:
300
+
301
+ 1. **OpenAI** - via `AsyncOpenAI` from the `openai` package
302
+ 2. **Anthropic** - via `AsyncAnthropic` from the `anthropic` package
303
+ 3. **Cerebras** - via `AsyncCerebras` from the `cerebras` package
304
+
305
+ Each provider has different capabilities and models. The library adapts the API calls to work with each provider's specific requirements.
306
+
307
+ ## Error Handling
308
+
309
+ The library includes comprehensive error handling:
310
+ - Template validation
311
+ - API error handling
312
+ - Placeholder validation
313
+ - LLM validation (checks if specified LLM ID exists)
314
+
315
+ Errors are raised with descriptive messages indicating the specific issue and prompt number where the error occurred.
316
+
317
+ ## Best Practices
318
+
319
+ 1. Always set templates before executing the chain
320
+ 2. Use meaningful placeholder names
321
+ 3. Handle streaming responses appropriately
322
+ 4. Choose appropriate models for different types of tasks
323
+ 5. Use system prompts for consistent context
324
+ 6. Select the best provider for specific tasks:
325
+ - OpenAI is great for general purpose applications
326
+ - Anthropic (Claude) excels at longer contexts and complex reasoning
327
+ - Cerebras is excellent for high-performance AI tasks
328
+
329
+ ## How You Can Get Involved
330
+ ✅ Try out tasks_prompts_chain: Give our software a try in your own setup and let us know how it goes - your experience helps us improve!
331
+
332
+ ✅ Find a bug: Found something that doesn't work quite right? We'd appreciate your help in documenting it so we can fix it together.
333
+
334
+ ✅ Fixing Bugs: Even small code contributions make a big difference! Pick an issue that interests you and share your solution with us.
335
+
336
+ ✅ Share your thoughts: Have an idea that would make this project more useful? We're excited to hear your thoughts and explore new possibilities together!
337
+
338
+ Your contributions, big or small, truly matter to us. We're grateful for any help you can provide and look forward to welcoming you to our community!
339
+
340
+ ### Developer Contribution Workflow
341
+ 1. Fork the Repository: Create your own copy of the project by clicking the "Fork" button on our GitHub repository.
342
+ 2. Clone Your Fork:
343
+ ``` bash
344
+ git clone git@github.com:<your-username>/tasks_prompts_chain.git
345
+ cd tasks_prompts_chain/
346
+ ```
347
+ 3. Set Up Development Environment
348
+ ``` bash
349
+ # Create and activate a virtual environment
350
+ python3 -m venv .venv
351
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
352
+
353
+ # Install development dependencies
354
+ pip install -r requirements/requirements-dev.txt
355
+ ```
356
+ 4. Stay Updated
357
+ ```bash
358
+ # Add the upstream repository
359
+ git remote add upstream https://github.com/original-owner/tasks_prompts_chain.git
360
+
361
+ # Fetch latest changes from upstream
362
+ git fetch upstream
363
+ git merge upstream/main
364
+ ```
365
+ #### Making Changes
366
+ 1. Create a Feature Branch
367
+ ```bash
368
+ git checkout -b feature/your-feature-name
369
+ # or
370
+ git checkout -b bugfix/issue-you-are-fixing
371
+ ```
372
+ 2. Implement Your Changes
373
+ - Write tests for your changes when applicable
374
+ - Ensure existing tests pass with pytest
375
+ - Follow our code style guidelines
376
+
377
+ 3. Commit Your Changes
378
+ ```bash
379
+ git add .
380
+ git commit -m "Your descriptive commit message"
381
+ ```
382
+ 4. Push to Your Fork
383
+ ```bash
384
+ git push origin feature/your-feature-name
385
+ ```
386
+ 5. Create a Pull Request
387
+ 6. Code Review Process
388
+ - Maintainers will review your PR
389
+ - Address any requested changes
390
+ - Once approved, your contribution will be merged!
391
+
392
+ ## Release Notes
393
+
394
+ ### 0.1.0 - Breaking Changes
395
+
396
+ - **Complete API redesign**: The constructor now requires a list of LLM configurations instead of a single LLM class
397
+ - **Multi-model support**: Use different models for different prompts in the chain
398
+ - **Constructor changes**: Replaced `AsyncLLmAi` and `model_options` with `llm_configs`
399
+ - **New provider support**: Added official support for Cerebras models
400
+ - **Removed dependencies**: No longer directly depends on OpenAI SDK
401
+ - **Prompt configuration**: Added `llm_id` field to prompt dictionaries to specify which LLM to use
402
+
403
+ Users upgrading from version 0.0.x will need to modify their code to use the new API structure.
404
+
405
+ ## License
406
+
407
+ MIT License
@@ -0,0 +1,7 @@
1
+ tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
+ tasks_prompts_chain/client_llm_sdk.py,sha256=ifwsecvykP5RalXMqolAClPHdpmv-5hF4eT61dxo1f8,3708
3
+ tasks_prompts_chain/tasks_prompts_chain.py,sha256=ZqZIzAWEZMgKvbkXUT8_VZisSydb-jLuWxuTrMT1-Fw,12643
4
+ tasks_prompts_chain-0.1.0.dist-info/METADATA,sha256=_-5r7zd9EnnPut3CF45s1VFGVte0iZHYb9Epx219UfQ,12573
5
+ tasks_prompts_chain-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ tasks_prompts_chain-0.1.0.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
7
+ tasks_prompts_chain-0.1.0.dist-info/RECORD,,
@@ -1,200 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: tasks_prompts_chain
3
- Version: 0.0.4
4
- Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
5
- Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
6
- Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
7
- Author-email: Samir Ben Sghaier <ben.sghaier.samir@gmail.com>
8
- License-Expression: Apache-2.0
9
- License-File: LICENSE
10
- Classifier: Operating System :: OS Independent
11
- Classifier: Programming Language :: Python :: 3
12
- Requires-Python: >=3.8
13
- Description-Content-Type: text/markdown
14
-
15
- # TasksPromptsChain
16
-
17
- A Mini Python library for creating and executing chains of prompts using OpenAI's API with streaming support and output template formatting.
18
-
19
- ## Features
20
-
21
- - Sequential prompt chain execution
22
- - Streaming responses
23
- - Template-based output formatting
24
- - System prompt support
25
- - Placeholder replacement between prompts
26
- - Multiple output formats (JSON, Markdown, CSV, Text)
27
- - Async/await support
28
-
29
- ## Dependancies
30
-
31
- Please install typing-extensions and openai python packages
32
- ```bash
33
- pip install typing-extensions
34
- pip install openai
35
- ```
36
- To Install the library:
37
- ```
38
- pip install tasks_prompts_chain
39
- ```
40
-
41
- ## Installation from source code
42
-
43
- ### For Users required from source gitHub repo
44
- ```bash
45
- pip install -r requirements/requirements.txt
46
- ```
47
-
48
- ### For Developers required from source gitHub repo
49
- ```bash
50
- pip install -r requirements/requirements.txt
51
- pip install -r requirements/requirements-dev.txt
52
- ```
53
-
54
- ## Quick Start
55
-
56
- ```python
57
- from tasks_prompts_chain import TasksPromptsChain
58
-
59
- async def main():
60
- # Initialize the chain
61
- chain = TasksPromptsChain(
62
- model="gpt-3.5-turbo",
63
- api_key="your-api-key",
64
- final_result_placeholder="design_result"
65
- )
66
-
67
- # Define your prompts
68
- prompts = [
69
- {
70
- "prompt": "Create a design concept for a luxury chocolate bar",
71
- "output_format": "TEXT",
72
- "output_placeholder": "design_concept"
73
- },
74
- {
75
- "prompt": "Based on this concept: {{design_concept}}, suggest a color palette",
76
- "output_format": "JSON",
77
- "output_placeholder": "color_palette"
78
- }
79
- ]
80
-
81
- # Stream the responses
82
- async for chunk in chain.execute_chain(prompts):
83
- print(chunk, end="", flush=True)
84
-
85
- # Get specific results
86
- design = chain.get_result("design_concept")
87
- colors = chain.get_result("color_palette")
88
- ```
89
-
90
- ## Advanced Usage
91
-
92
- ### Using System Prompts
93
-
94
- ```python
95
- chain = TasksPromptsChain(
96
- model="gpt-3.5-turbo",
97
- api_key="your-api-key",
98
- final_result_placeholder="result",
99
- system_prompt="You are a professional design expert specialized in luxury products",
100
- system_apply_to_all_prompts=True
101
- )
102
- ```
103
-
104
- ### Custom API Endpoint
105
-
106
- ```python
107
- chain = TasksPromptsChain(
108
- model="gpt-3.5-turbo",
109
- api_key="your-api-key",
110
- final_result_placeholder="result",
111
- base_url="https://your-custom-endpoint.com/v1"
112
- )
113
- ```
114
-
115
- ### Using Templates
116
-
117
- You must call this set method befor the excution of the prompting query (chain.execute_chain(prompts))
118
-
119
- ```python
120
- # Set output template before execution
121
- chain.template_output("""
122
- <result>
123
- <design>
124
- ### Design Concept:
125
- {{design_concept}}
126
- </design>
127
-
128
- <colors>
129
- ### Color Palette:
130
- {{color_palette}}
131
- </colors>
132
- </result>
133
- """)
134
- ```
135
- then retrieves the final result within the template :
136
-
137
- ```python
138
- # print out the final result in the well formated template
139
- print(chain.get_final_result_within_template())
140
- ```
141
-
142
-
143
- ## API Reference
144
-
145
- ### TasksPromptsChain Class
146
-
147
- #### Constructor Parameters
148
-
149
- - `model` (str): The model identifier (e.g., 'gpt-3.5-turbo')
150
- - `api_key` (str): Your OpenAI API key
151
- - `final_result_placeholder` (str): Name for the final result placeholder
152
- - `system_prompt` (Optional[str]): System prompt for context
153
- - `system_apply_to_all_prompts` (Optional[bool]): Apply system prompt to all prompts
154
- - `base_url` (Optional[str]): Custom API endpoint URL
155
-
156
- #### Methods
157
-
158
- - `execute_chain(prompts: List[Dict], temperature: float = 0.7) -> AsyncGenerator[str, None]`
159
- - Executes the prompt chain and streams responses
160
-
161
- - `template_output(template: str) -> None`
162
- - Sets the output template format
163
-
164
- - `get_final_result_within_template(self) -> Optional[str]`
165
- - Retrieves the final query result with the defined template in template_output();
166
-
167
- - `get_result(placeholder: str) -> Optional[str]`
168
- - Retrieves a specific result by placeholder
169
-
170
- ### Prompt Format
171
-
172
- Each prompt in the chain can be defined as a dictionary:
173
- ```python
174
- {
175
- "prompt": str, # The actual prompt text
176
- "output_format": str, # "JSON", "MARKDOWN", "CSV", or "TEXT"
177
- "output_placeholder": str # Identifier for accessing this result
178
- }
179
- ```
180
-
181
- ## Error Handling
182
-
183
- The library includes comprehensive error handling:
184
- - Template validation
185
- - API error handling
186
- - Placeholder validation
187
-
188
- Errors are raised with descriptive messages indicating the specific issue and prompt number where the error occurred.
189
-
190
- ## Best Practices
191
-
192
- 1. Always set templates before executing the chain
193
- 2. Use meaningful placeholder names
194
- 3. Handle streaming responses appropriately
195
- 4. Consider temperature settings based on your use case
196
- 5. Use system prompts for consistent context
197
-
198
- ## License
199
-
200
- MIT License
@@ -1,6 +0,0 @@
1
- tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
- tasks_prompts_chain/tasks_prompts_chain.py,sha256=NwF9PmIHXbEzEusx7B5NTph3zgrJ-SuN74eCc2dRTPI,9986
3
- tasks_prompts_chain-0.0.4.dist-info/METADATA,sha256=pfN6RkU9RnR7fJm0i4_PnBKBGknzjd6URaXm0184HwA,5334
4
- tasks_prompts_chain-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- tasks_prompts_chain-0.0.4.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
6
- tasks_prompts_chain-0.0.4.dist-info/RECORD,,