tasks-prompts-chain 0.0.3__tar.gz → 0.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tasks_prompts_chain
3
- Version: 0.0.3
3
+ Version: 0.0.4
4
4
  Summary: A Python library for creating and executing chains of prompts using OpenAI's SDK with streaming support and template formatting.
5
5
  Project-URL: Homepage, https://github.com/smirfolio/tasks_prompts_chain
6
6
  Project-URL: Issues, https://github.com/smirfolio/tasks_prompts_chain/issues
@@ -89,25 +89,6 @@ async def main():
89
89
 
90
90
  ## Advanced Usage
91
91
 
92
- ### Using Templates
93
-
94
- ```python
95
- # Set output template before execution
96
- chain.template_output("""
97
- <result>
98
- <design>
99
- ### Design Concept:
100
- {{design_concept}}
101
- </design>
102
-
103
- <colors>
104
- ### Color Palette:
105
- {{color_palette}}
106
- </colors>
107
- </result>
108
- """)
109
- ```
110
-
111
92
  ### Using System Prompts
112
93
 
113
94
  ```python
@@ -131,6 +112,34 @@ chain = TasksPromptsChain(
131
112
  )
132
113
  ```
133
114
 
115
+ ### Using Templates
116
+
117
+ You must call this set method befor the excution of the prompting query (chain.execute_chain(prompts))
118
+
119
+ ```python
120
+ # Set output template before execution
121
+ chain.template_output("""
122
+ <result>
123
+ <design>
124
+ ### Design Concept:
125
+ {{design_concept}}
126
+ </design>
127
+
128
+ <colors>
129
+ ### Color Palette:
130
+ {{color_palette}}
131
+ </colors>
132
+ </result>
133
+ """)
134
+ ```
135
+ then retrieves the final result within the template :
136
+
137
+ ```python
138
+ # print out the final result in the well formated template
139
+ print(chain.get_final_result_within_template())
140
+ ```
141
+
142
+
134
143
  ## API Reference
135
144
 
136
145
  ### TasksPromptsChain Class
@@ -152,6 +161,9 @@ chain = TasksPromptsChain(
152
161
  - `template_output(template: str) -> None`
153
162
  - Sets the output template format
154
163
 
164
+ - `get_final_result_within_template(self) -> Optional[str]`
165
+ - Retrieves the final query result with the defined template in template_output();
166
+
155
167
  - `get_result(placeholder: str) -> Optional[str]`
156
168
  - Retrieves a specific result by placeholder
157
169
 
@@ -75,25 +75,6 @@ async def main():
75
75
 
76
76
  ## Advanced Usage
77
77
 
78
- ### Using Templates
79
-
80
- ```python
81
- # Set output template before execution
82
- chain.template_output("""
83
- <result>
84
- <design>
85
- ### Design Concept:
86
- {{design_concept}}
87
- </design>
88
-
89
- <colors>
90
- ### Color Palette:
91
- {{color_palette}}
92
- </colors>
93
- </result>
94
- """)
95
- ```
96
-
97
78
  ### Using System Prompts
98
79
 
99
80
  ```python
@@ -117,6 +98,34 @@ chain = TasksPromptsChain(
117
98
  )
118
99
  ```
119
100
 
101
+ ### Using Templates
102
+
103
+ You must call this set method befor the excution of the prompting query (chain.execute_chain(prompts))
104
+
105
+ ```python
106
+ # Set output template before execution
107
+ chain.template_output("""
108
+ <result>
109
+ <design>
110
+ ### Design Concept:
111
+ {{design_concept}}
112
+ </design>
113
+
114
+ <colors>
115
+ ### Color Palette:
116
+ {{color_palette}}
117
+ </colors>
118
+ </result>
119
+ """)
120
+ ```
121
+ then retrieves the final result within the template :
122
+
123
+ ```python
124
+ # print out the final result in the well formated template
125
+ print(chain.get_final_result_within_template())
126
+ ```
127
+
128
+
120
129
  ## API Reference
121
130
 
122
131
  ### TasksPromptsChain Class
@@ -138,6 +147,9 @@ chain = TasksPromptsChain(
138
147
  - `template_output(template: str) -> None`
139
148
  - Sets the output template format
140
149
 
150
+ - `get_final_result_within_template(self) -> Optional[str]`
151
+ - Retrieves the final query result with the defined template in template_output();
152
+
141
153
  - `get_result(placeholder: str) -> Optional[str]`
142
154
  - Retrieves a specific result by placeholder
143
155
 
@@ -3,7 +3,7 @@ requires = ["hatchling"]
3
3
  build-backend = "hatchling.build"
4
4
  [project]
5
5
  name = "tasks_prompts_chain"
6
- version = "0.0.3"
6
+ version = "0.0.4"
7
7
  authors = [
8
8
  { name="Samir Ben Sghaier", email="ben.sghaier.samir@gmail.com" },
9
9
  ]
@@ -121,7 +121,7 @@ class TasksPromptsChain:
121
121
  self._final_output_template=output
122
122
  return output
123
123
 
124
- async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]], temperature: float = 0.7) -> AsyncGenerator[str, None]:
124
+ async def execute_chain(self, prompts: List[Union[Dict, PromptTemplate]]) -> AsyncGenerator[str, None]:
125
125
  """
126
126
  Execute a chain of prompts sequentially, with placeholder replacement.
127
127
 
@@ -132,8 +132,6 @@ class TasksPromptsChain:
132
132
  "output_format": str,
133
133
  "output_placeholder": str
134
134
  }
135
- temperature (float): Temperature parameter for response generation (0.0 to 1.0)
136
-
137
135
  Returns:
138
136
  List[str]: List of responses for each prompt
139
137
  """
@@ -184,14 +182,12 @@ class TasksPromptsChain:
184
182
  response_content += delta
185
183
  self._current_stream_buffer = response_content
186
184
  self._format_current_stream()
187
- yield response_content
188
-
189
- responses.append(response_content)
190
-
191
- # Store response with placeholder if specified
192
- if prompt_template.output_placeholder:
193
- placeholder_values[prompt_template.output_placeholder] = response_content
194
- self._results[prompt_template.output_placeholder] = response_content
185
+ responses.append(response_content)
186
+ # Store response with placeholder if specified
187
+ if prompt_template.output_placeholder:
188
+ placeholder_values[prompt_template.output_placeholder] = response_content
189
+ self._results[prompt_template.output_placeholder] = response_content
190
+ yield response_content
195
191
 
196
192
  except Exception as e:
197
193
  raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
@@ -235,45 +231,3 @@ class TasksPromptsChain:
235
231
  raise Exception("template_output must be called before execute_chain")
236
232
  self.set_output_template(template)
237
233
 
238
- def execute_chain_with_system(self, prompts: List[str], system_prompt: str, temperature: float = 0.7) -> List[str]:
239
- """
240
- Execute a chain of prompts with a system prompt included.
241
-
242
- Args:
243
- prompts (List[str]): List of prompts to process in sequence
244
- system_prompt (str): System prompt to set context for all interactions
245
- temperature (float): Temperature parameter for response generation (0.0 to 1.0)
246
-
247
- Returns:
248
- List[str]: List of responses for each prompt
249
- """
250
- responses = []
251
- context = ""
252
-
253
- try:
254
- for i, prompt in enumerate(prompts):
255
- # Combine previous context with current prompt if not the first prompt
256
- full_prompt = f"{context}\n{prompt}" if context else prompt
257
-
258
- response = self.client.chat.completions.create(
259
- model=self.model,
260
- messages=[
261
- {"role": "system", "content": system_prompt},
262
- {"role": "user", "content": full_prompt}
263
- ],
264
- temperature=temperature,
265
- max_tokens=4120,
266
- stream=True
267
- )
268
-
269
- # Extract the response content
270
- response_content = response.choices[0].message.content
271
- responses.append(response_content)
272
-
273
- # Update context for next iteration
274
- context = response_content
275
-
276
- except Exception as e:
277
- raise Exception(f"Error in prompt chain execution at prompt {i}: {str(e)}")
278
-
279
- return responses