tasks-prompts-chain 0.0.6__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,91 @@
1
+ class ClientLLMSDK:
2
+ """
3
+ A class to handle LLM SDKs for various providers.
4
+ This class is designed to work with different LLM SDKs by
5
+ dynamically loading the appropriate class based on the provider.
6
+ """
7
+ def __init__(self, AsyncLLmAi, model_options):
8
+ """
9
+ Initialize with any LLM SDK class.
10
+
11
+ :param llm_class: The LLM class to be used (e.g., openai.AsyncOpenAI, anthropic.AsyncAnthropic)
12
+ :param kwargs: Additional parameters for initializing the LLM instance
13
+ """
14
+ self.llm_class_name = AsyncLLmAi.__name__ # Store the class type
15
+ client_kwargs = {"api_key": model_options["api_key"]}
16
+ if "base_url" in model_options:
17
+ client_kwargs["base_url"] = model_options["base_url"]
18
+ # Instantiate the LLM
19
+ self.client = AsyncLLmAi(**client_kwargs)
20
+
21
+ async def generat_response(self, **kwargs):
22
+ """
23
+ Generate a response from the LLM.
24
+
25
+ :param prompt: The prompt to be sent to the LLM
26
+ :param kwargs: Additional parameters for generating the response
27
+ :return: The generated response
28
+ """
29
+ model = kwargs.get("model", "gpt-4")
30
+ messages= kwargs.get("messages", [])
31
+ temperature = kwargs.get("temperature", 0.7)
32
+ max_tokens = kwargs.get("max_tokens", 512)
33
+ stream = kwargs.get("stream", True)
34
+
35
+ if self.llm_class_name == "AsyncOpenAI": # OpenAI SDK
36
+ response = await self.client.chat.completions.create(
37
+ model=model,
38
+ messages=messages,
39
+ temperature=temperature,
40
+ max_tokens=max_tokens,
41
+ stream=stream
42
+ )
43
+
44
+ async for chunk in response:
45
+ if chunk.choices[0].delta.content is not None:
46
+ yield chunk.choices[0].delta.content
47
+
48
+
49
+ elif self.llm_class_name == "AsyncAnthropic": # Anthropic SDK
50
+ # Extract system message if present
51
+ system_message = ""
52
+ filtered_messages = []
53
+
54
+ for message in messages:
55
+ if message.get("role") == "system" and message.get("content") != None:
56
+ system_message = message.get("content")
57
+ else:
58
+ filtered_messages.append(message)
59
+
60
+ # Update messages without system message
61
+ messages = filtered_messages
62
+ response = await self.client.messages.create(
63
+ system=system_message,
64
+ model=model,
65
+ messages=messages,
66
+ temperature=temperature,
67
+ max_tokens=max_tokens,
68
+ stream=stream
69
+ )
70
+
71
+ async for chunk in response:
72
+ # Based on the observed output format: RawContentBlockDeltaEvent with TextDelta
73
+ if chunk.type == "content_block_delta" and hasattr(chunk.delta, "text"):
74
+ yield chunk.delta.text
75
+ elif chunk.type == "content_block_stop":
76
+ pass
77
+
78
+ elif self.llm_class_name == "AsyncCerebras": # AsyncCerebras SDK
79
+ response = await self.client.chat.completions.create(
80
+ model=model,
81
+ messages=messages,
82
+ temperature=temperature,
83
+ max_tokens=max_tokens,
84
+ stream=stream
85
+ )
86
+
87
+ async for chunk in response:
88
+ if chunk.choices[0].delta.content is not None:
89
+ yield chunk.choices[0].delta.content
90
+ else:
91
+ raise NotImplementedError(f"Unsupported LLM: {self.llm_class_name}")
@@ -29,8 +29,9 @@ Copyright 2025 Samir Ben Sghaier - Smirfolio
29
29
 
30
30
  """
31
31
  from typing import List, Optional, Dict, Union, AsyncGenerator, TypedDict
32
- from openai import AsyncOpenAI
33
32
  from enum import Enum
33
+ from .client_llm_sdk import ClientLLMSDK
34
+ import json
34
35
 
35
36
  class OutputFormat(Enum):
36
37
  JSON = "JSON"
@@ -44,46 +45,74 @@ class ModelOptions(TypedDict, total=False):
44
45
  base_url: Optional[str]
45
46
  temperature: Optional[float]
46
47
  max_tokens: Optional[int]
47
- stream: Optional[bool]
48
48
 
49
49
  class PromptTemplate:
50
- def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None):
50
+ def __init__(self, prompt: str, output_format: str = "TEXT", output_placeholder: Optional[str] = None, llm_id: Optional[str] = None, stop_placeholder: Optional[str] = None):
51
51
  self.prompt = prompt
52
- self.output_format = OutputFormat(output_format.upper()).value
52
+ self.output_format = OutputFormat(output_format.upper())
53
53
  self.output_placeholder = output_placeholder
54
+ self.llm_id=llm_id
55
+ self.stop_placeholder = stop_placeholder
54
56
 
55
57
  class TasksPromptsChain:
56
58
  """A utility class for creating and executing prompt chains using OpenAI's API."""
57
59
 
58
- def __init__(self,
59
- model_options: ModelOptions,
60
+ def __init__(self,
61
+ llm_configs: List[Dict],
60
62
  system_prompt: Optional[str] = None,
61
63
  final_result_placeholder: Optional[str] = None,
62
64
  system_apply_to_all_prompts: bool = False):
63
65
  """
64
- Initialize the TasksPromptsChain with OpenAI configuration.
66
+ Initialize the TasksPromptsChain with multiple LLM configurations.
65
67
 
66
68
  Args:
67
- model_options (ModelOptions): Dictionary containing model configuration:
68
- - model (str): The model identifier to use (e.g., 'gpt-3.5-turbo')
69
- - api_key (str): The OpenAI API key
70
- - base_url (str, optional): API endpoint URL
71
- - temperature (float, optional): Temperature parameter (default: 0.7)
72
- - max_tokens (int, optional): Maximum tokens (default: 4120)
73
- - stream (bool, optional): Whether to stream responses (default: True)
69
+ llm_configs (List[Dict]): List of LLM configurations, each containing:
70
+ - llm_id (str): Unique identifier for this LLM configuration
71
+ - llm_class: The LLM class to use (e.g., openai.AsyncOpenAI, anthropic.AsyncAnthropic)
72
+ - model_options (Dict): Configuration for the LLM:
73
+ - model (str): The model identifier to use
74
+ - api_key (str): API key
75
+ - base_url (str, optional): Custom API endpoint URL
76
+ - temperature (float, optional): Temperature parameter (default: 0.7)
77
+ - max_tokens (int, optional): Maximum tokens (default: 4120)
74
78
  system_prompt (str, optional): System prompt to set context for the LLM
75
79
  final_result_placeholder (str, optional): The placeholder name for the final result
76
80
  system_apply_to_all_prompts (bool): Whether to apply system prompt to all prompts
77
81
  """
78
- self.model = model_options["model"]
79
- self.temperature = model_options.get("temperature", 0.7)
80
- self.max_tokens = model_options.get("max_tokens", 4120)
81
- self.stream = model_options.get("stream", True)
82
+ # Initialize clients dictionary
83
+ self.clients = {}
84
+ self.default_client_id = None
82
85
 
83
- client_kwargs = {"api_key": model_options["api_key"]}
84
- if "base_url" in model_options:
85
- client_kwargs["base_url"] = model_options["base_url"]
86
- self.client = AsyncOpenAI(**client_kwargs)
86
+ # Set up each LLM client
87
+ for config in llm_configs:
88
+ llm_id = config.get("llm_id")
89
+ if not llm_id:
90
+ raise ValueError("Each LLM configuration must have a 'llm_id'")
91
+
92
+ llm_class = config.get("llm_class")
93
+ if not llm_class:
94
+ raise ValueError(f"LLM configuration '{llm_id}' must specify 'llm_class'")
95
+
96
+ model_options = config.get("model_options", {})
97
+ if "api_key" not in model_options:
98
+ raise ValueError(f"LLM configuration '{llm_id}' must include 'api_key' in model_options")
99
+
100
+ # Set the first client as default if not already set
101
+ if self.default_client_id is None:
102
+ self.default_client_id = llm_id
103
+
104
+ # Store common settings in the client record for easy access
105
+ client_kwargs = {"api_key": model_options["api_key"]}
106
+ if "base_url" in model_options:
107
+ client_kwargs["base_url"] = model_options["base_url"]
108
+
109
+ self.clients[llm_id] = {
110
+ "llm_id": llm_id,
111
+ "model": model_options.get("model", "gpt-3.5-turbo"),
112
+ "temperature": model_options.get("temperature", 0.7),
113
+ "max_tokens": model_options.get("max_tokens", 4120),
114
+ "client": ClientLLMSDK(llm_class, client_kwargs)
115
+ }
87
116
  self.system_prompt = system_prompt
88
117
  self.system_apply_to_all_prompts = system_apply_to_all_prompts
89
118
  self.final_result_placeholder = final_result_placeholder or "final_result"
@@ -91,7 +120,15 @@ class TasksPromptsChain:
91
120
  self._output_template = None
92
121
  self._final_output_template = None
93
122
  self._current_stream_buffer = ""
94
-
123
+
124
+ def get_reflection(self):
125
+ """
126
+ Get the reflection of the class instance with available LLM clients.
127
+
128
+ Returns:
129
+ dict: Dictionary of available LLM clients and their IDs
130
+ """
131
+ return {llm_id: config["client"].llm_class_name for llm_id, config in self.clients.items()}
95
132
  def set_output_template(self, template: str) -> None:
96
133
  """
97
134
  Set the output template to be used for streaming responses.
@@ -121,7 +158,7 @@ class TasksPromptsChain:
121
158
  self._final_output_template=output
122
159
  return output
123
160
 
124
- async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]) -> AsyncGenerator[str, None]:
161
+ async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]], streamout = True) -> AsyncGenerator[str, None]:
125
162
  """
126
163
  Execute a chain of prompts sequentially, with placeholder replacement.
127
164
 
@@ -130,87 +167,89 @@ class TasksPromptsChain:
130
167
  {
131
168
  "prompt": str,
132
169
  "output_format": str,
133
- "output_placeholder": str
170
+ "output_placeholder": str,
171
+ "llm_id": str, # Optional: Specifies which LLM to use
172
+ "stop_placeholder": str # Optional: The stop string placeholder
134
173
  }
135
174
  Returns:
136
- List[str]: List of responses for each prompt
175
+ AsyncGenerator[str, None]: Generator yielding response chunks
137
176
  """
138
177
  responses = []
139
178
  placeholder_values = {}
140
179
 
141
180
  try:
142
181
  for i, prompt_data in enumerate(prompts):
143
- # Convert dict to PromptTemplate if necessary
182
+ # Convert dict to PromptTemplate if necessary and extract llm_id
183
+ llm_id = None
144
184
  if isinstance(prompt_data, dict):
145
185
  prompt_template = PromptTemplate(
146
186
  prompt=prompt_data["prompt"],
147
187
  output_format=prompt_data.get("output_format", "TEXT"),
148
- output_placeholder=prompt_data.get("output_placeholder")
188
+ output_placeholder=prompt_data.get("output_placeholder"),
189
+ llm_id= prompt_data.get("llm_id", self.default_client_id),
190
+ stop_placeholder=prompt_data.get("stop_placeholder", None)
149
191
  )
150
192
  else:
151
193
  prompt_template = prompt_data
194
+
195
+ # Validate the requested LLM exists
196
+ if prompt_template.llm_id not in self.clients:
197
+ raise ValueError(f"LLM with id '{prompt_template.llm_id}' not found. Available LLMs: {list(self.clients.keys())}")
198
+
199
+ # Get the client configuration
200
+ client_config = self.clients[prompt_template.llm_id]
201
+ client = client_config["client"]
202
+ model = client_config["model"]
203
+ temperature = client_config["temperature"]
204
+ max_tokens = client_config["max_tokens"]
152
205
 
153
206
  # Replace placeholders in the prompt
154
207
  current_prompt = prompt_template.prompt
155
208
  for placeholder, value in placeholder_values.items():
156
209
  current_prompt = current_prompt.replace(f"{{{{{placeholder}}}}}", value)
157
210
 
158
- wants_json_output = prompt_template.output_format == OutputFormat.JSON.value
159
211
  # Format system message based on output format
160
212
  format_instruction = ""
161
- if wants_json_output:
162
- format_instruction = f"\nPlease provide your response in {prompt_template.output_format}."
213
+ if prompt_template.output_format != OutputFormat.TEXT:
214
+ format_instruction = f"\nPlease provide your response in {prompt_template.output_format.value} format."
163
215
 
164
216
  messages = []
165
217
  if self.system_prompt and (i == 0 or self.system_apply_to_all_prompts):
166
218
  messages.append({"role": "system", "content": self.system_prompt})
167
-
219
+
168
220
  messages.append({"role": "user", "content": current_prompt + format_instruction})
169
221
 
170
- # Default completition.create parameters
171
- create_kwargs = {
172
- "model": self.model,
173
- "messages": messages,
174
- "temperature": self.temperature,
175
- "max_tokens": self.max_tokens,
176
- "stream": self.stream
177
- }
178
-
179
- # Check if used model is GPT
180
- is_gpt_model = self.model.lower().startswith("gpt")
181
- # GPT model text = {"format" : {"type": "json_object"}}
182
- if is_gpt_model and wants_json_output:
183
- create_kwargs["text"] = {
184
- "format": {
185
- "type": "json_object"
186
- }
187
- }
188
-
189
- # Non Gpt model response_format={"type": "json_object|text"}
190
- elif not is_gpt_model:
191
- create_kwargs["response_format"] = {
192
- "type": "json_object" if wants_json_output else "text"
193
- }
194
-
195
- stream = await self.client.chat.completions.create(**create_kwargs)
222
+ # Generate response using the selected LLM
223
+ streamResponse = client.generat_response(
224
+ model=model,
225
+ messages=messages,
226
+ temperature=temperature,
227
+ max_tokens=max_tokens,
228
+ stream=True
229
+ )
196
230
 
197
231
  response_content = ""
198
232
  self._current_stream_buffer = ""
199
233
 
200
- async for chunk in stream:
201
- if not chunk.choices or not chunk.choices[0].delta or not chunk.choices[0].delta.content:
202
- continue
203
-
204
- delta = chunk.choices[0].delta.content
205
- response_content += delta
206
- self._current_stream_buffer = response_content
207
- self._format_current_stream()
208
- responses.append(response_content)
209
- # Store response with placeholder if specified
210
- if prompt_template.output_placeholder:
211
- placeholder_values[prompt_template.output_placeholder] = response_content
212
- self._results[prompt_template.output_placeholder] = response_content
213
- yield delta
234
+ async for chunk in streamResponse:
235
+ if chunk is not None:
236
+ delta = chunk
237
+ response_content += delta
238
+ self._current_stream_buffer = response_content
239
+ self._format_current_stream()
240
+ responses.append(response_content)
241
+ # Store response with placeholder if specified
242
+ if prompt_template.output_placeholder:
243
+ placeholder_values[prompt_template.output_placeholder] = response_content
244
+ self._results[prompt_template.output_placeholder] = response_content
245
+ if streamout:
246
+ yield delta
247
+
248
+ # Stop excution if the stop_placeholder is detected in the response content
249
+ if prompt_template.stop_placeholder and (prompt_template.stop_placeholder in response_content):
250
+ raise Exception({"type": "error", "content": "Invalid project description. Please provide a valid project description."})
251
+ #yield json.dumps({"type": "error", "content": "Invalid project description. Please provide a valid project description."})
252
+ #return
214
253
 
215
254
  except Exception as e:
216
255
  raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
@@ -218,6 +257,7 @@ class TasksPromptsChain:
218
257
  # Store the last response with the final result placeholder
219
258
  if responses:
220
259
  self._results[self.final_result_placeholder] = responses[-1]
260
+ yield "<tasks-sys>Done</tasks-sys>"
221
261
 
222
262
  def get_result(self, placeholder: str) -> Optional[str]:
223
263
  """
@@ -253,4 +293,3 @@ class TasksPromptsChain:
253
293
  if len(self._results) > 0:
254
294
  raise Exception("template_output must be called before execute_chain")
255
295
  self.set_output_template(template)
256
-
@@ -0,0 +1,412 @@
1
+ Metadata-Version: 2.4
2
+ Name: tasks_prompts_chain
3
+ Version: 0.1.1
4
+ Summary: A Python library for creating and executing chains of prompts using multiple LLM providers with streaming support and template formatting.
5
+ Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
6
+ Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
7
+ Author-email: Samir Ben Sghaier <ben.sghaier.samir@gmail.com>
8
+ License-Expression: Apache-2.0
9
+ License-File: LICENSE
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python :: 3
12
+ Requires-Python: >=3.8
13
+ Description-Content-Type: text/markdown
14
+
15
+ # TasksPromptsChain
16
+
17
+ A Mini Python library for creating and executing chains of prompts using multiple LLM providers with streaming support and output template formatting.
18
+
19
+ ## Features
20
+
21
+ - Sequential prompt chain execution
22
+ - Streaming responses
23
+ - Template-based output formatting
24
+ - System prompt support
25
+ - Placeholder replacement between prompts
26
+ - Stop placeholder string, that can be defined, so if the LLM responds with this placeholder, the chain prompt will interrupt, as an LLM error handler
27
+ - Multiple output formats (JSON, Markdown, CSV, Text)
28
+ - Async/await support
29
+ - Support for multiple LLM providers (OpenAI, Anthropic, Cerebras, etc.)
30
+ - Multi-model support - use different models for different prompts in the chain
31
+
32
+ ## Dependencies
33
+
34
+ Please install typing-extensions and the SDK for your preferred LLM providers:
35
+
36
+ For OpenAI:
37
+ ```bash
38
+ pip install typing-extensions
39
+ pip install openai
40
+ ```
41
+
42
+ For Anthropic:
43
+ ```bash
44
+ pip install typing-extensions
45
+ pip install anthropic
46
+ ```
47
+
48
+ For Cerebras:
49
+ ```bash
50
+ pip install typing-extensions
51
+ pip install cerebras
52
+ ```
53
+
54
+ To Install the library:
55
+ ```
56
+ pip install tasks_prompts_chain
57
+ ```
58
+
59
+ ## Installation from source code
60
+
61
+ ### For Users required from source gitHub repo
62
+ ```bash
63
+ pip install -r requirements/requirements.txt
64
+ ```
65
+
66
+ ### For Developers required from source gitHub repo
67
+ ```bash
68
+ pip install -r requirements/requirements.txt
69
+ pip install -r requirements/requirements-dev.txt
70
+ ```
71
+
72
+ ## Quick Start
73
+
74
+ ```python
75
+ from tasks_prompts_chain import TasksPromptsChain
76
+ from openai import AsyncOpenAI
77
+ from anthropic import AsyncAnthropic
78
+ from cerebras import AsyncCerebras
79
+
80
+ async def main():
81
+ # Initialize the chain with multiple LLM configurations
82
+ llm_configs = [
83
+ {
84
+ "llm_id": "gpt", # Unique identifier for this LLM
85
+ "llm_class": AsyncOpenAI, # LLM SDK class
86
+ "model_options": {
87
+ "model": "gpt-4o",
88
+ "api_key": "your-openai-api-key",
89
+ "temperature": 0.1,
90
+ "max_tokens": 4120,
91
+ }
92
+ },
93
+ {
94
+ "llm_id": "claude", # Unique identifier for this LLM
95
+ "llm_class": AsyncAnthropic, # LLM SDK class
96
+ "model_options": {
97
+ "model": "claude-3-sonnet-20240229",
98
+ "api_key": "your-anthropic-api-key",
99
+ "temperature": 0.1,
100
+ "max_tokens": 8192,
101
+ }
102
+ },
103
+ {
104
+ "llm_id": "llama", # Unique identifier for this LLM
105
+ "llm_class": AsyncCerebras, # LLM SDK class
106
+ "model_options": {
107
+ "model": "llama-3.3-70b",
108
+ "api_key": "your-cerebras-api-key",
109
+ "base_url": "https://api.cerebras.ai/v1",
110
+ "temperature": 0.1,
111
+ "max_tokens": 4120,
112
+ }
113
+ }
114
+ ]
115
+
116
+ chain = TasksPromptsChain(
117
+ llm_configs,
118
+ final_result_placeholder="design_result"
119
+ )
120
+
121
+ # Define your prompts - specify which LLM to use for each prompt
122
+ prompts = [
123
+ {
124
+ "prompt": "Create a design concept for a luxury chocolate bar, if not inspired respond with this string %%cant_be_inspired%% so the chain prompt query will be stopped",
125
+ "output_format": "TEXT",
126
+ "output_placeholder": "design_concept",
127
+ "llm_id": "gpt", # Use the GPT model for this prompt
128
+ "stop_placholder": "%%cant_be_inspired%%" # the stop placeholder string that will interrupt the prompt chain query
129
+ },
130
+ {
131
+ "prompt": "Based on this concept: {{design_concept}}, suggest a color palette",
132
+ "output_format": "JSON",
133
+ "output_placeholder": "color_palette",
134
+ "llm_id": "claude" # Use the Claude model for this prompt
135
+ },
136
+ {
137
+ "prompt": "Based on the design and colors: {{design_concept}} and {{color_palette}}, suggest packaging materials",
138
+ "output_format": "MARKDOWN",
139
+ "output_placeholder": "packaging",
140
+ "llm_id": "llama" # Use the Cerebras model for this prompt
141
+ }
142
+ ]
143
+
144
+ # Stream the responses
145
+ async for chunk in chain.execute_chain(prompts):
146
+ print(chunk, end="", flush=True)
147
+
148
+ # Get specific results
149
+ design = chain.get_result("design_concept")
150
+ colors = chain.get_result("color_palette")
151
+ packaging = chain.get_result("packaging")
152
+ ```
153
+
154
+ ## Advanced Usage
155
+
156
+ ### Using System Prompts
157
+
158
+ ```python
159
+ chain = TasksPromptsChain(
160
+ llm_configs=[
161
+ {
162
+ "llm_id": "default_model",
163
+ "llm_class": AsyncOpenAI,
164
+ "model_options": {
165
+ "model": "gpt-4o",
166
+ "api_key": "your-openai-api-key",
167
+ "temperature": 0.1,
168
+ "max_tokens": 4120,
169
+ }
170
+ }
171
+ ],
172
+ final_result_placeholder="result",
173
+ system_prompt="You are a professional design expert specialized in luxury products",
174
+ system_apply_to_all_prompts=True
175
+ )
176
+ ```
177
+
178
+ ### Using Cerebras Models
179
+
180
+ ```python
181
+ from cerebras import AsyncCerebras
182
+
183
+ llm_configs = [
184
+ {
185
+ "llm_id": "cerebras",
186
+ "llm_class": AsyncCerebras,
187
+ "model_options": {
188
+ "model": "llama-3.3-70b",
189
+ "api_key": "your-cerebras-api-key",
190
+ "base_url": "https://api.cerebras.ai/v1",
191
+ "temperature": 0.1,
192
+ "max_tokens": 4120,
193
+ }
194
+ }
195
+ ]
196
+
197
+ chain = TasksPromptsChain(
198
+ llm_configs,
199
+ final_result_placeholder="result",
200
+ )
201
+ ```
202
+
203
+ ### Custom API Endpoint
204
+
205
+ ```python
206
+ llm_configs = [
207
+ {
208
+ "llm_id": "custom_endpoint",
209
+ "llm_class": AsyncOpenAI,
210
+ "model_options": {
211
+ "model": "your-custom-model",
212
+ "api_key": "your-api-key",
213
+ "base_url": "https://your-custom-endpoint.com/v1",
214
+ "temperature": 0.1,
215
+ "max_tokens": 4120,
216
+ }
217
+ }
218
+ ]
219
+
220
+ chain = TasksPromptsChain(
221
+ llm_configs,
222
+ final_result_placeholder="result",
223
+ )
224
+ ```
225
+
226
+ ### Using Templates
227
+
228
+ You must call this set method before the execution of the prompting query (chain.execute_chain(prompts))
229
+
230
+ ```python
231
+ # Set output template before execution
232
+ chain.template_output("""
233
+ <result>
234
+ <design>
235
+ ### Design Concept:
236
+ {{design_concept}}
237
+ </design>
238
+
239
+ <colors>
240
+ ### Color Palette:
241
+ {{color_palette}}
242
+ </colors>
243
+ </result>
244
+ """)
245
+ ```
246
+ then retrieves the final result within the template :
247
+
248
+ ```python
249
+ # print out the final result in the well formated template
250
+ print(chain.get_final_result_within_template())
251
+ ```
252
+
253
+
254
+ ## API Reference
255
+
256
+ ### TasksPromptsChain Class
257
+
258
+ #### Constructor Parameters
259
+
260
+ - `llm_configs` (List[Dict]): List of LLM configurations, each containing:
261
+ - `llm_id` (str): Unique identifier for this LLM configuration
262
+ - `llm_class`: The LLM class to use (e.g., `AsyncOpenAI`, `AsyncAnthropic`, `AsyncCerebras`)
263
+ - `model_options` (Dict): Configuration for the LLM:
264
+ - `model` (str): The model identifier
265
+ - `api_key` (str): Your API key for the LLM provider
266
+ - `temperature` (float): Temperature setting for response generation
267
+ - `max_tokens` (int): Maximum tokens in generated responses
268
+ - `base_url` (Optional[str]): Custom API endpoint URL
269
+ - `system_prompt` (Optional[str]): System prompt for context
270
+ - `final_result_placeholder` (str): Name for the final result placeholder
271
+ - `system_apply_to_all_prompts` (Optional[bool]): Apply system prompt to all prompts
272
+
273
+ #### Methods
274
+
275
+ - `execute_chain(prompts: List[Dict], streamout: bool = True) -> AsyncGenerator[str, None]`
276
+ - Executes the prompt chain and streams responses
277
+
278
+ - `template_output(template: str) -> None`
279
+ - Sets the output template format
280
+
281
+ - `get_final_result_within_template(self) -> Optional[str]`
282
+ - Retrieves the final query result with the defined template in template_output();
283
+
284
+ - `get_result(placeholder: str) -> Optional[str]`
285
+ - Retrieves a specific result by placeholder
286
+
287
+ ### Prompt Format
288
+
289
+ Each prompt in the chain can be defined as a dictionary:
290
+ ```python
291
+ {
292
+ "prompt": str, # The actual prompt text
293
+ "output_format": str, # "JSON", "MARKDOWN", "CSV", or "TEXT"
294
+ "output_placeholder": str, # Identifier for accessing this result
295
+ "llm_id": str, # Optional: ID of the LLM to use for this prompt
296
+ "stop_placholder": str # Optional: The stop string placeholder that may interrupt the chaining prompt query, defined in a prompt, and may be returned by the LLM
297
+ }
298
+ ```
299
+
300
+ ## Supported LLM Providers
301
+
302
+ TasksPromptsChain currently supports the following LLM providers:
303
+
304
+ 1. **OpenAI** - via `AsyncOpenAI` from the `openai` package
305
+ 2. **Anthropic** - via `AsyncAnthropic` from the `anthropic` package
306
+ 3. **Cerebras** - via `AsyncCerebras` from the `cerebras` package
307
+
308
+ Each provider has different capabilities and models. The library adapts the API calls to work with each provider's specific requirements.
309
+
310
+ ## Error Handling
311
+
312
+ The library includes comprehensive error handling:
313
+ - Template validation
314
+ - API error handling
315
+ - Placeholder validation
316
+ - LLM validation (checks if specified LLM ID exists)
317
+ - stop_placholder to validate the LLM output and stop the chain prompt excution
318
+
319
+ Errors are raised with descriptive messages indicating the specific issue and prompt number where the error occurred.
320
+
321
+ ## Best Practices
322
+
323
+ 1. Always set templates before executing the chain
324
+ 2. Use meaningful placeholder names
325
+ 3. Implement a stop_placholder in each prompt, to catch the LLM error defined bad responses to stop the subsequent requests
326
+ 3. Handle streaming responses appropriately
327
+ 4. Choose appropriate models for different types of tasks
328
+ 5. Use system prompts for consistent context
329
+ 6. Select the best provider for specific tasks:
330
+ - OpenAI is great for general purpose applications
331
+ - Anthropic (Claude) excels at longer contexts and complex reasoning
332
+ - Cerebras is excellent for high-performance AI tasks
333
+
334
+ ## How You Can Get Involved
335
+ ✅ Try out tasks_prompts_chain: Give our software a try in your own setup and let us know how it goes - your experience helps us improve!
336
+
337
+ ✅ Find a bug: Found something that doesn't work quite right? We'd appreciate your help in documenting it so we can fix it together.
338
+
339
+ ✅ Fixing Bugs: Even small code contributions make a big difference! Pick an issue that interests you and share your solution with us.
340
+
341
+ ✅ Share your thoughts: Have an idea that would make this project more useful? We're excited to hear your thoughts and explore new possibilities together!
342
+
343
+ Your contributions, big or small, truly matter to us. We're grateful for any help you can provide and look forward to welcoming you to our community!
344
+
345
+ ### Developer Contribution Workflow
346
+ 1. Fork the Repository: Create your own copy of the project by clicking the "Fork" button on our GitHub repository.
347
+ 2. Clone Your Fork:
348
+ ``` bash
349
+ git clone git@github.com:<your-username>/tasks_prompts_chain.git
350
+ cd tasks_prompts_chain/
351
+ ```
352
+ 3. Set Up Development Environment
353
+ ``` bash
354
+ # Create and activate a virtual environment
355
+ python3 -m venv .venv
356
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
357
+
358
+ # Install development dependencies
359
+ pip install -r requirements/requirements-dev.txt
360
+ ```
361
+ 4. Stay Updated
362
+ ```bash
363
+ # Add the upstream repository
364
+ git remote add upstream https://github.com/original-owner/tasks_prompts_chain.git
365
+
366
+ # Fetch latest changes from upstream
367
+ git fetch upstream
368
+ git merge upstream/main
369
+ ```
370
+ #### Making Changes
371
+ 1. Create a Feature Branch
372
+ ```bash
373
+ git checkout -b feature/your-feature-name
374
+ # or
375
+ git checkout -b bugfix/issue-you-are-fixing
376
+ ```
377
+ 2. Implement Your Changes
378
+ - Write tests for your changes when applicable
379
+ - Ensure existing tests pass with pytest
380
+ - Follow our code style guidelines
381
+
382
+ 3. Commit Your Changes
383
+ ```bash
384
+ git add .
385
+ git commit -m "Your descriptive commit message"
386
+ ```
387
+ 4. Push to Your Fork
388
+ ```bash
389
+ git push origin feature/your-feature-name
390
+ ```
391
+ 5. Create a Pull Request
392
+ 6. Code Review Process
393
+ - Maintainers will review your PR
394
+ - Address any requested changes
395
+ - Once approved, your contribution will be merged!
396
+
397
+ ## Release Notes
398
+
399
+ ### 0.1.0 - Breaking Changes
400
+
401
+ - **Complete API redesign**: The constructor now requires a list of LLM configurations instead of a single LLM class
402
+ - **Multi-model support**: Use different models for different prompts in the chain
403
+ - **Constructor changes**: Replaced `AsyncLLmAi` and `model_options` with `llm_configs`
404
+ - **New provider support**: Added official support for Cerebras models
405
+ - **Removed dependencies**: No longer directly depends on OpenAI SDK
406
+ - **Prompt configuration**: Added `llm_id` field to prompt dictionaries to specify which LLM to use
407
+
408
+ Users upgrading from version 0.0.x will need to modify their code to use the new API structure.
409
+
410
+ ## License
411
+
412
+ MIT License
@@ -0,0 +1,7 @@
1
+ tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
+ tasks_prompts_chain/client_llm_sdk.py,sha256=ifwsecvykP5RalXMqolAClPHdpmv-5hF4eT61dxo1f8,3708
3
+ tasks_prompts_chain/tasks_prompts_chain.py,sha256=T8WaqAdgDBqHjNa48dPgny_mDnEN1OvMTFTX6YndGgo,13409
4
+ tasks_prompts_chain-0.1.1.dist-info/METADATA,sha256=1zVlU2deVaP-Fv_Zj6ph6LcDJrJOD4qSffehnp1umRM,13331
5
+ tasks_prompts_chain-0.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ tasks_prompts_chain-0.1.1.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
7
+ tasks_prompts_chain-0.1.1.dist-info/RECORD,,
@@ -1,222 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: tasks_prompts_chain
3
- Version: 0.0.6
4
- Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
5
- Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
6
- Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
7
- Author-email: Samir Ben Sghaier <ben.sghaier.samir@gmail.com>
8
- License-Expression: Apache-2.0
9
- License-File: LICENSE
10
- Classifier: Operating System :: OS Independent
11
- Classifier: Programming Language :: Python :: 3
12
- Requires-Python: >=3.8
13
- Description-Content-Type: text/markdown
14
-
15
- # TasksPromptsChain
16
-
17
- A Mini Python library for creating and executing chains of prompts using OpenAI's API with streaming support and output template formatting.
18
-
19
- ## Features
20
-
21
- - Sequential prompt chain execution
22
- - Streaming responses
23
- - Template-based output formatting
24
- - System prompt support
25
- - Placeholder replacement between prompts
26
- - Multiple output formats (JSON, Markdown, CSV, Text)
27
- - Async/await support
28
-
29
- ## Dependancies
30
-
31
- Please install typing-extensions and openai python packages
32
- ```bash
33
- pip install typing-extensions
34
- pip install openai
35
- ```
36
- To Install the library:
37
- ```
38
- pip install tasks_prompts_chain
39
- ```
40
-
41
- ## Installation from source code
42
-
43
- ### For Users required from source gitHub repo
44
- ```bash
45
- pip install -r requirements/requirements.txt
46
- ```
47
-
48
- ### For Developers required from source gitHub repo
49
- ```bash
50
- pip install -r requirements/requirements.txt
51
- pip install -r requirements/requirements-dev.txt
52
- ```
53
-
54
- ## Quick Start
55
-
56
- ```python
57
- from tasks_prompts_chain import TasksPromptsChain
58
-
59
- async def main():
60
- # Initialize the chain
61
- chain = TasksPromptsChain(
62
- model_options={
63
- "model": "gpt-3.5-turbo",
64
- "api_key": "your-api-key",
65
- "temperature": 0.1,
66
- "max_tokens": 4120,
67
- "stream": True
68
- },
69
- final_result_placeholder="design_result"
70
- )
71
-
72
- # Define your prompts
73
- prompts = [
74
- {
75
- "prompt": "Create a design concept for a luxury chocolate bar",
76
- "output_format": "TEXT",
77
- "output_placeholder": "design_concept"
78
- },
79
- {
80
- "prompt": "Based on this concept: {{design_concept}}, suggest a color palette",
81
- "output_format": "JSON",
82
- "output_placeholder": "color_palette"
83
- }
84
- ]
85
-
86
- # Stream the responses
87
- async for chunk in chain.execute_chain(prompts):
88
- print(chunk, end="", flush=True)
89
-
90
- # Get specific results
91
- design = chain.get_result("design_concept")
92
- colors = chain.get_result("color_palette")
93
- ```
94
-
95
- ## Advanced Usage
96
-
97
- ### Using System Prompts
98
-
99
- ```python
100
- chain = TasksPromptsChain(
101
- model_options={
102
- "model": "gpt-3.5-turbo",
103
- "api_key": "your-api-key",
104
- "temperature": 0.1,
105
- "max_tokens": 4120,
106
- "stream": True
107
- },
108
- final_result_placeholder="result",
109
- system_prompt="You are a professional design expert specialized in luxury products",
110
- system_apply_to_all_prompts=True
111
- )
112
- ```
113
-
114
- ### Custom API Endpoint
115
-
116
- ```python
117
- chain = TasksPromptsChain(
118
- model_options={
119
- "model": "your-custom-model",
120
- "api_key": "your-api-key",
121
- "base_url": "https://your-custom-endpoint.com/v1",
122
- "temperature": 0.1,
123
- "max_tokens": 4120,
124
- "stream": True
125
- },
126
- final_result_placeholder="result",
127
- )
128
- ```
129
-
130
- ### Using Templates
131
-
132
- You must call this set method befor the excution of the prompting query (chain.execute_chain(prompts))
133
-
134
- ```python
135
- # Set output template before execution
136
- chain.template_output("""
137
- <result>
138
- <design>
139
- ### Design Concept:
140
- {{design_concept}}
141
- </design>
142
-
143
- <colors>
144
- ### Color Palette:
145
- {{color_palette}}
146
- </colors>
147
- </result>
148
- """)
149
- ```
150
- then retrieves the final result within the template :
151
-
152
- ```python
153
- # print out the final result in the well formated template
154
- print(chain.get_final_result_within_template())
155
- ```
156
-
157
-
158
- ## API Reference
159
-
160
- ### TasksPromptsChain Class
161
-
162
- #### Constructor Parameters
163
-
164
- - `model` (str): The model identifier (e.g., 'gpt-3.5-turbo')
165
- - `api_key` (str): Your OpenAI API key
166
- - `final_result_placeholder` (str): Name for the final result placeholder
167
- - `system_prompt` (Optional[str]): System prompt for context
168
- - `system_apply_to_all_prompts` (Optional[bool]): Apply system prompt to all prompts
169
- - `base_url` (Optional[str]): Custom API endpoint URL
170
-
171
- #### Methods
172
-
173
- - `execute_chain(prompts: List[Dict], temperature: float = 0.7) -> AsyncGenerator[str, None]`
174
- - Executes the prompt chain and streams responses
175
-
176
- - `template_output(template: str) -> None`
177
- - Sets the output template format
178
-
179
- - `get_final_result_within_template(self) -> Optional[str]`
180
- - Retrieves the final query result with the defined template in template_output();
181
-
182
- - `get_result(placeholder: str) -> Optional[str]`
183
- - Retrieves a specific result by placeholder
184
-
185
- ### Prompt Format
186
-
187
- Each prompt in the chain can be defined as a dictionary:
188
- ```python
189
- {
190
- "prompt": str, # The actual prompt text
191
- "output_format": str, # "JSON", "MARKDOWN", "CSV", or "TEXT"
192
- "output_placeholder": str # Identifier for accessing this result
193
- }
194
- ```
195
-
196
- ## Error Handling
197
-
198
- The library includes comprehensive error handling:
199
- - Template validation
200
- - API error handling
201
- - Placeholder validation
202
-
203
- Errors are raised with descriptive messages indicating the specific issue and prompt number where the error occurred.
204
-
205
- ## Best Practices
206
-
207
- 1. Always set templates before executing the chain
208
- 2. Use meaningful placeholder names
209
- 3. Handle streaming responses appropriately
210
- 4. Consider temperature settings based on your use case
211
- 5. Use system prompts for consistent context
212
-
213
- ## Compatible Models
214
-
215
- - Llama3.3
216
- - llama-4-scout
217
- - claude-3-7-sonnet-20250219 (Other claude version)
218
- - ChatGpt's
219
-
220
- ## License
221
-
222
- MIT License
@@ -1,6 +0,0 @@
1
- tasks_prompts_chain/__init__.py,sha256=HVhC_vMTYCyZW6vnoErHh-TkAnNRqJ2JJqClJQSfU8Y,148
2
- tasks_prompts_chain/tasks_prompts_chain.py,sha256=DUKe5PseHKA9ownVXQpawcdAKGcwHvHtx1U_st2hT_8,10924
3
- tasks_prompts_chain-0.0.6.dist-info/METADATA,sha256=RNClygTof_WT8IryvMw4SeoVgWZeMYSKY5SlfLUwk4I,5892
4
- tasks_prompts_chain-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
- tasks_prompts_chain-0.0.6.dist-info/licenses/LICENSE,sha256=WYmcYJG1QFgu1hfo7qrEkZ3Jhcz8NUWe6XUraZvlIFs,10172
6
- tasks_prompts_chain-0.0.6.dist-info/RECORD,,