lionagi 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +4 -1
- lionagi/core/session/branch.py +3 -2
- lionagi/core/session/session.py +3 -3
- lionagi/libs/func/async_calls/alcall.py +7 -0
- lionagi/operations/brainstorm/brainstorm.py +319 -94
- lionagi/operations/brainstorm/prompt.py +8 -1
- lionagi/operations/plan/plan.py +280 -67
- lionagi/operations/plan/prompt.py +17 -16
- lionagi/protocols/operatives/instruct.py +0 -6
- lionagi/protocols/operatives/prompts.py +50 -203
- lionagi/version.py +1 -1
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/METADATA +2 -2
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/RECORD +15 -15
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/WHEEL +0 -0
- {lionagi-0.5.2.dist-info → lionagi-0.5.4.dist-info}/licenses/LICENSE +0 -0
lionagi/__init__.py
CHANGED
@@ -4,7 +4,8 @@ import logging
|
|
4
4
|
|
5
5
|
from dotenv import load_dotenv
|
6
6
|
|
7
|
-
from .core.session.types import Branch
|
7
|
+
from .core.session.types import Branch, Session
|
8
|
+
from .integrations.litellm_.imodel import LiteiModel
|
8
9
|
from .protocols.operatives.step import Step
|
9
10
|
from .service import iModel
|
10
11
|
from .settings import Settings
|
@@ -20,6 +21,8 @@ __all__ = [
|
|
20
21
|
"LiteiModel",
|
21
22
|
"Branch",
|
22
23
|
"Step",
|
24
|
+
"Session",
|
25
|
+
"LiteiModel",
|
23
26
|
]
|
24
27
|
|
25
28
|
logger = logging.getLogger(__name__)
|
lionagi/core/session/branch.py
CHANGED
@@ -9,6 +9,7 @@ from pydantic import model_validator
|
|
9
9
|
|
10
10
|
from lionagi.core.generic.types import Component, LogManager, Pile, Progression
|
11
11
|
from lionagi.core.typing import ID
|
12
|
+
from lionagi.integrations.litellm_.imodel import LiteiModel
|
12
13
|
from lionagi.protocols.operatives.instruct import Instruct, OperationInstruct
|
13
14
|
from lionagi.service import iModel
|
14
15
|
from lionagi.settings import Settings
|
@@ -24,8 +25,8 @@ class Branch(Component, BranchActionMixin, BranchOperationMixin):
|
|
24
25
|
name: str | None = None
|
25
26
|
msgs: MessageManager = None
|
26
27
|
acts: ActionManager = None
|
27
|
-
imodel: iModel | None = None
|
28
|
-
parse_imodel: iModel | None = None
|
28
|
+
imodel: iModel | LiteiModel | None = None
|
29
|
+
parse_imodel: iModel | LiteiModel | None = None
|
29
30
|
|
30
31
|
@model_validator(mode="before")
|
31
32
|
def _validate_data(cls, data: dict) -> dict:
|
lionagi/core/session/session.py
CHANGED
@@ -128,9 +128,9 @@ class Session(Component):
|
|
128
128
|
branch: The branch to set as default or its identifier.
|
129
129
|
"""
|
130
130
|
branch = self.branches[branch]
|
131
|
-
if
|
132
|
-
|
133
|
-
|
131
|
+
if not isinstance(branch, Branch):
|
132
|
+
raise ValueError("Input value for branch is not a valid branch.")
|
133
|
+
self.default_branch = branch
|
134
134
|
|
135
135
|
def to_df(self, branches: ID.RefSeq = None) -> pd.DataFrame:
|
136
136
|
out = self.concat_messages(branches=branches)
|
@@ -148,6 +148,13 @@ async def alcall(
|
|
148
148
|
ucall(func, i, **kwargs), retry_timeout
|
149
149
|
)
|
150
150
|
return index, result
|
151
|
+
|
152
|
+
except InterruptedError:
|
153
|
+
return index, None
|
154
|
+
|
155
|
+
except asyncio.CancelledError:
|
156
|
+
return index, None
|
157
|
+
|
151
158
|
except TimeoutError as e:
|
152
159
|
raise TimeoutError(
|
153
160
|
f"{error_msg or ''} Timeout {retry_timeout} seconds "
|
@@ -2,11 +2,13 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
import logging
|
6
|
+
from typing import Literal
|
5
7
|
|
6
8
|
from lionagi.core.session.branch import Branch
|
7
9
|
from lionagi.core.session.session import Session
|
8
10
|
from lionagi.core.typing import ID, Any, BaseModel
|
9
|
-
from lionagi.libs.func import alcall
|
11
|
+
from lionagi.libs.func.types import alcall
|
10
12
|
from lionagi.libs.parse import to_flat_list
|
11
13
|
from lionagi.protocols.operatives.instruct import (
|
12
14
|
INSTRUCT_FIELD_MODEL,
|
@@ -17,13 +19,41 @@ from lionagi.protocols.operatives.instruct import (
|
|
17
19
|
from ..utils import prepare_instruct, prepare_session
|
18
20
|
from .prompt import PROMPT
|
19
21
|
|
22
|
+
# ---------------------------------------------------------------------
|
23
|
+
# Data Models & Utilities
|
24
|
+
# ---------------------------------------------------------------------
|
25
|
+
|
20
26
|
|
21
27
|
class BrainstormOperation(BaseModel):
|
28
|
+
"""
|
29
|
+
Container for the outcomes of a brainstorming session:
|
30
|
+
1. initial: the initial result of the 'brainstorm' prompt
|
31
|
+
2. brainstorm: the results of auto-run instructions (if auto_run = True)
|
32
|
+
3. explore: the results of exploring those instructions (if auto_explore = True)
|
33
|
+
"""
|
34
|
+
|
22
35
|
initial: Any
|
23
36
|
brainstorm: list[Instruct] | None = None
|
24
37
|
explore: list[InstructResponse] | None = None
|
25
38
|
|
26
39
|
|
40
|
+
def chunked(iterable, n):
|
41
|
+
"""
|
42
|
+
Yield successive n-sized chunks from an iterable.
|
43
|
+
|
44
|
+
Example:
|
45
|
+
>>> list(chunked([1,2,3,4,5], 2))
|
46
|
+
[[1,2],[3,4],[5]]
|
47
|
+
"""
|
48
|
+
for i in range(0, len(iterable), n):
|
49
|
+
yield iterable[i : i + n]
|
50
|
+
|
51
|
+
|
52
|
+
# ---------------------------------------------------------------------
|
53
|
+
# Core Instruction Execution
|
54
|
+
# ---------------------------------------------------------------------
|
55
|
+
|
56
|
+
|
27
57
|
async def run_instruct(
|
28
58
|
ins: Instruct,
|
29
59
|
session: Session,
|
@@ -32,53 +62,50 @@ async def run_instruct(
|
|
32
62
|
verbose: bool = True,
|
33
63
|
**kwargs: Any,
|
34
64
|
) -> Any:
|
35
|
-
"""
|
36
|
-
|
37
|
-
|
38
|
-
ins: The instruction model to run.
|
39
|
-
session: The current session.
|
40
|
-
branch: The branch to operate on.
|
41
|
-
auto_run: Whether to automatically run nested instructions.
|
42
|
-
verbose: Whether to enable verbose output.
|
43
|
-
**kwargs: Additional keyword arguments.
|
44
|
-
|
45
|
-
Returns:
|
46
|
-
The result of the instruction execution.
|
65
|
+
"""
|
66
|
+
Execute a single instruction within a brainstorming session.
|
67
|
+
Optionally auto-run any child instructions that result.
|
47
68
|
"""
|
48
69
|
|
49
|
-
async def
|
70
|
+
async def _run_child_instruction(child_ins: Instruct):
|
71
|
+
"""
|
72
|
+
Helper for recursively running child instructions.
|
73
|
+
"""
|
50
74
|
if verbose:
|
51
|
-
|
52
|
-
|
53
|
-
if len(
|
54
|
-
else
|
75
|
+
snippet = (
|
76
|
+
child_ins.guidance[:100] + "..."
|
77
|
+
if len(child_ins.guidance) > 100
|
78
|
+
else child_ins.guidance
|
55
79
|
)
|
56
|
-
print(f"\n-----Running instruction-----\n{
|
57
|
-
|
80
|
+
print(f"\n-----Running instruction-----\n{snippet}")
|
81
|
+
child_branch = session.split(branch)
|
58
82
|
return await run_instruct(
|
59
|
-
|
83
|
+
child_ins, session, child_branch, False, verbose=verbose, **kwargs
|
60
84
|
)
|
61
85
|
|
86
|
+
# Prepare config for the branch operation
|
62
87
|
config = {**ins.model_dump(), **kwargs}
|
63
|
-
|
88
|
+
result = await branch.operate(**config)
|
64
89
|
branch.msgs.logger.dump()
|
65
|
-
instructs = []
|
66
|
-
|
67
|
-
if hasattr(res, "instruct_models"):
|
68
|
-
instructs = res.instruct_models
|
69
90
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
91
|
+
# Extract any newly generated instructions
|
92
|
+
instructs = []
|
93
|
+
if hasattr(result, "instruct_models"):
|
94
|
+
instructs = result.instruct_models
|
95
|
+
|
96
|
+
# If we're allowed to auto-run child instructions, handle them
|
97
|
+
if auto_run and instructs:
|
98
|
+
child_results = await alcall(instructs, _run_child_instruction)
|
99
|
+
combined = []
|
100
|
+
for c in child_results:
|
101
|
+
if isinstance(c, list):
|
102
|
+
combined.extend(c)
|
76
103
|
else:
|
77
|
-
|
78
|
-
|
79
|
-
return
|
104
|
+
combined.append(c)
|
105
|
+
combined.insert(0, result)
|
106
|
+
return combined
|
80
107
|
|
81
|
-
return
|
108
|
+
return result
|
82
109
|
|
83
110
|
|
84
111
|
async def brainstorm(
|
@@ -89,116 +116,314 @@ async def brainstorm(
|
|
89
116
|
auto_run: bool = True,
|
90
117
|
auto_explore: bool = False,
|
91
118
|
explore_kwargs: dict[str, Any] | None = None,
|
119
|
+
explore_strategy: Literal[
|
120
|
+
"concurrent",
|
121
|
+
"sequential",
|
122
|
+
"sequential_concurrent_chunk",
|
123
|
+
"concurrent_sequential_chunk",
|
124
|
+
] = "concurrent",
|
92
125
|
branch_kwargs: dict[str, Any] | None = None,
|
93
126
|
return_session: bool = False,
|
94
127
|
verbose: bool = False,
|
128
|
+
branch_as_default: bool = True,
|
95
129
|
**kwargs: Any,
|
96
130
|
) -> Any:
|
97
|
-
"""Perform a brainstorming session.
|
98
|
-
|
99
|
-
Args:
|
100
|
-
instruct: Instruction model or dictionary.
|
101
|
-
num_instruct: Number of instructions to generate.
|
102
|
-
session: Existing session or None to create a new one.
|
103
|
-
branch: Existing branch or reference.
|
104
|
-
auto_run: If True, automatically run generated instructions.
|
105
|
-
branch_kwargs: Additional arguments for branch creation.
|
106
|
-
return_session: If True, return the session with results.
|
107
|
-
verbose: Whether to enable verbose output.
|
108
|
-
**kwargs: Additional keyword arguments.
|
109
|
-
|
110
|
-
Returns:
|
111
|
-
The results of the brainstorming session, optionally with the session.
|
112
131
|
"""
|
132
|
+
High-level function to perform a brainstorming session.
|
113
133
|
|
134
|
+
Steps:
|
135
|
+
1. Run the initial 'instruct' prompt to generate suggestions.
|
136
|
+
2. Optionally auto-run those suggestions (auto_run=True).
|
137
|
+
3. Optionally explore the resulting instructions (auto_explore=True)
|
138
|
+
using the chosen strategy (concurrent, sequential, etc.).
|
139
|
+
"""
|
140
|
+
|
141
|
+
# -----------------------------------------------------------------
|
142
|
+
# Basic Validations and Setup
|
143
|
+
# -----------------------------------------------------------------
|
114
144
|
if auto_explore and not auto_run:
|
115
145
|
raise ValueError("auto_explore requires auto_run to be True.")
|
116
146
|
|
117
147
|
if verbose:
|
118
|
-
print(
|
148
|
+
print("Starting brainstorming...")
|
119
149
|
|
150
|
+
# Make sure the correct field model is present
|
120
151
|
field_models: list = kwargs.get("field_models", [])
|
121
152
|
if INSTRUCT_FIELD_MODEL not in field_models:
|
122
153
|
field_models.append(INSTRUCT_FIELD_MODEL)
|
123
|
-
|
124
154
|
kwargs["field_models"] = field_models
|
155
|
+
|
156
|
+
# Prepare session, branch, and the instruction
|
125
157
|
session, branch = prepare_session(session, branch, branch_kwargs)
|
126
|
-
|
127
|
-
|
128
|
-
|
158
|
+
prompt_str = PROMPT.format(num_instruct=num_instruct)
|
159
|
+
instruct = prepare_instruct(instruct, prompt_str)
|
160
|
+
|
161
|
+
# -----------------------------------------------------------------
|
162
|
+
# 1. Initial Brainstorm
|
163
|
+
# -----------------------------------------------------------------
|
129
164
|
res1 = await branch.operate(**instruct, **kwargs)
|
130
165
|
out = BrainstormOperation(initial=res1)
|
131
166
|
|
132
167
|
if verbose:
|
133
168
|
print("Initial brainstorming complete.")
|
134
169
|
|
135
|
-
|
136
|
-
|
137
|
-
async def run(ins_):
|
170
|
+
# Helper to run single instructions from the 'brainstorm'
|
171
|
+
async def run_brainstorm_instruction(ins_):
|
138
172
|
if verbose:
|
139
|
-
|
173
|
+
snippet = (
|
140
174
|
ins_.guidance[:100] + "..."
|
141
175
|
if len(ins_.guidance) > 100
|
142
176
|
else ins_.guidance
|
143
177
|
)
|
144
|
-
print(f"\n-----Running instruction-----\n{
|
145
|
-
|
178
|
+
print(f"\n-----Running instruction-----\n{snippet}")
|
179
|
+
new_branch = session.split(branch)
|
146
180
|
return await run_instruct(
|
147
|
-
ins_, session,
|
181
|
+
ins_, session, new_branch, auto_run, verbose=verbose, **kwargs
|
148
182
|
)
|
149
183
|
|
184
|
+
# -----------------------------------------------------------------
|
185
|
+
# 2. Auto-run child instructions if requested
|
186
|
+
# -----------------------------------------------------------------
|
150
187
|
if not auto_run:
|
151
188
|
if return_session:
|
152
189
|
return out, session
|
153
190
|
return out
|
154
191
|
|
192
|
+
# We run inside the context manager for branching
|
155
193
|
async with session.branches:
|
156
194
|
response_ = []
|
195
|
+
|
196
|
+
# If the initial result has instructions, run them
|
157
197
|
if hasattr(res1, "instruct_models"):
|
158
198
|
instructs: list[Instruct] = res1.instruct_models
|
159
|
-
|
160
|
-
|
199
|
+
brainstorm_results = await alcall(
|
200
|
+
instructs, run_brainstorm_instruction
|
201
|
+
)
|
202
|
+
brainstorm_results = to_flat_list(brainstorm_results, dropna=True)
|
161
203
|
|
162
|
-
|
163
|
-
|
164
|
-
|
204
|
+
# Filter out plain str/dict responses, keep model-based
|
205
|
+
filtered = [
|
206
|
+
r if not isinstance(r, (str, dict)) else None
|
207
|
+
for r in brainstorm_results
|
165
208
|
]
|
166
|
-
|
209
|
+
filtered = to_flat_list(filtered, unique=True, dropna=True)
|
210
|
+
|
167
211
|
out.brainstorm = (
|
168
|
-
|
212
|
+
filtered if isinstance(filtered, list) else [filtered]
|
169
213
|
)
|
170
|
-
|
214
|
+
# Insert the initial result at index 0 for reference
|
215
|
+
filtered.insert(0, res1)
|
216
|
+
response_ = filtered
|
171
217
|
|
218
|
+
# -----------------------------------------------------------------
|
219
|
+
# 3. Explore the results (if auto_explore = True)
|
220
|
+
# -----------------------------------------------------------------
|
172
221
|
if response_ and auto_explore:
|
173
|
-
|
174
|
-
|
175
|
-
if verbose:
|
176
|
-
msg_ = (
|
177
|
-
ins_.guidance[:100] + "..."
|
178
|
-
if len(ins_.guidance) > 100
|
179
|
-
else ins_.guidance
|
180
|
-
)
|
181
|
-
print(f"\n-----Exploring Idea-----\n{msg_}")
|
182
|
-
b_ = session.split(branch)
|
183
|
-
res = await b_.instruct(ins_, **(explore_kwargs or {}))
|
184
|
-
return InstructResponse(
|
185
|
-
instruct=ins_,
|
186
|
-
response=res,
|
187
|
-
)
|
188
|
-
|
189
|
-
response_ = to_flat_list(
|
222
|
+
# Gather all newly generated instructions
|
223
|
+
all_explore_instructs = to_flat_list(
|
190
224
|
[
|
191
|
-
|
192
|
-
for
|
193
|
-
if hasattr(
|
225
|
+
r.instruct_models
|
226
|
+
for r in response_
|
227
|
+
if hasattr(r, "instruct_models")
|
194
228
|
],
|
195
229
|
dropna=True,
|
196
230
|
unique=True,
|
197
231
|
)
|
198
|
-
res_explore = await alcall(response_, explore)
|
199
|
-
out.explore = res_explore
|
200
232
|
|
233
|
+
# Decide how to explore based on the strategy
|
234
|
+
match explore_strategy:
|
235
|
+
# ---------------------------------------------------------
|
236
|
+
# Strategy A: CONCURRENT
|
237
|
+
# ---------------------------------------------------------
|
238
|
+
case "concurrent":
|
239
|
+
|
240
|
+
async def explore_concurrently(ins_: Instruct):
|
241
|
+
if verbose:
|
242
|
+
snippet = (
|
243
|
+
ins_.guidance[:100] + "..."
|
244
|
+
if len(ins_.guidance) > 100
|
245
|
+
else ins_.guidance
|
246
|
+
)
|
247
|
+
print(f"\n-----Exploring Idea-----\n{snippet}")
|
248
|
+
new_branch = session.split(branch)
|
249
|
+
resp = await new_branch.instruct(
|
250
|
+
ins_, **(explore_kwargs or {})
|
251
|
+
)
|
252
|
+
return InstructResponse(instruct=ins_, response=resp)
|
253
|
+
|
254
|
+
res_explore = await alcall(
|
255
|
+
all_explore_instructs, explore_concurrently
|
256
|
+
)
|
257
|
+
out.explore = res_explore
|
258
|
+
|
259
|
+
# Add messages for logging / auditing
|
260
|
+
branch.msgs.add_message(
|
261
|
+
instruction="\n".join(
|
262
|
+
i.model_dump_json() for i in all_explore_instructs
|
263
|
+
)
|
264
|
+
)
|
265
|
+
branch.msgs.add_message(
|
266
|
+
assistant_response="\n".join(
|
267
|
+
i.model_dump_json() for i in res_explore
|
268
|
+
)
|
269
|
+
)
|
270
|
+
|
271
|
+
# ---------------------------------------------------------
|
272
|
+
# Strategy B: SEQUENTIAL
|
273
|
+
# ---------------------------------------------------------
|
274
|
+
case "sequential":
|
275
|
+
explore_results = []
|
276
|
+
|
277
|
+
# Warn/log if a large number of instructions
|
278
|
+
if len(all_explore_instructs) > 30:
|
279
|
+
all_explore_instructs = all_explore_instructs[:30]
|
280
|
+
logging.warning(
|
281
|
+
"Maximum number of instructions for sequential exploration is 50. defaulting to 50."
|
282
|
+
)
|
283
|
+
if len(all_explore_instructs) > 10:
|
284
|
+
logging.warning(
|
285
|
+
"Large number of instructions for sequential exploration. This may take a while."
|
286
|
+
)
|
287
|
+
|
288
|
+
for i in all_explore_instructs:
|
289
|
+
if verbose:
|
290
|
+
snippet = (
|
291
|
+
i.guidance[:100] + "..."
|
292
|
+
if len(i.guidance) > 100
|
293
|
+
else i.guidance
|
294
|
+
)
|
295
|
+
print(f"\n-----Exploring Idea-----\n{snippet}")
|
296
|
+
seq_res = await branch.instruct(
|
297
|
+
i, **(explore_kwargs or {})
|
298
|
+
)
|
299
|
+
explore_results.append(
|
300
|
+
InstructResponse(instruct=i, response=seq_res)
|
301
|
+
)
|
302
|
+
|
303
|
+
out.explore = explore_results
|
304
|
+
|
305
|
+
# ---------------------------------------------------------
|
306
|
+
# Strategy C: SEQUENTIAL_CONCURRENT_CHUNK
|
307
|
+
# (chunks processed sequentially, each chunk in parallel)
|
308
|
+
# ---------------------------------------------------------
|
309
|
+
case "sequential_concurrent_chunk":
|
310
|
+
chunk_size = (explore_kwargs or {}).get("chunk_size", 5)
|
311
|
+
all_responses = []
|
312
|
+
|
313
|
+
async def explore_concurrent_chunk(
|
314
|
+
sub_instructs: list[Instruct], base_branch: Branch
|
315
|
+
):
|
316
|
+
"""
|
317
|
+
Explore instructions in a single chunk concurrently.
|
318
|
+
"""
|
319
|
+
if verbose:
|
320
|
+
print(
|
321
|
+
f"\n--- Exploring a chunk of size {len(sub_instructs)} ---\n"
|
322
|
+
)
|
323
|
+
|
324
|
+
async def _explore(ins_: Instruct):
|
325
|
+
child_branch = session.split(base_branch)
|
326
|
+
child_resp = await child_branch.instruct(
|
327
|
+
ins_, **(explore_kwargs or {})
|
328
|
+
)
|
329
|
+
return InstructResponse(
|
330
|
+
instruct=ins_, response=child_resp
|
331
|
+
)
|
332
|
+
|
333
|
+
# Run all instructions in the chunk concurrently
|
334
|
+
res_chunk = await alcall(sub_instructs, _explore)
|
335
|
+
|
336
|
+
# Log messages for debugging / auditing
|
337
|
+
next_branch = session.split(base_branch)
|
338
|
+
next_branch.msgs.add_message(
|
339
|
+
instruction="\n".join(
|
340
|
+
i.model_dump_json() for i in sub_instructs
|
341
|
+
)
|
342
|
+
)
|
343
|
+
next_branch.msgs.add_message(
|
344
|
+
assistant_response="\n".join(
|
345
|
+
i.model_dump_json() for i in res_chunk
|
346
|
+
)
|
347
|
+
)
|
348
|
+
return res_chunk, next_branch
|
349
|
+
|
350
|
+
# Process each chunk sequentially
|
351
|
+
for chunk in chunked(all_explore_instructs, chunk_size):
|
352
|
+
chunk_result, branch = await explore_concurrent_chunk(
|
353
|
+
chunk, branch
|
354
|
+
)
|
355
|
+
all_responses.extend(chunk_result)
|
356
|
+
|
357
|
+
out.explore = all_responses
|
358
|
+
|
359
|
+
# ---------------------------------------------------------
|
360
|
+
# Strategy D: CONCURRENT_SEQUENTIAL_CHUNK
|
361
|
+
# (all chunks processed concurrently, each chunk sequentially)
|
362
|
+
# ---------------------------------------------------------
|
363
|
+
case "concurrent_sequential_chunk":
|
364
|
+
chunk_size = (explore_kwargs or {}).get("chunk_size", 5)
|
365
|
+
all_chunks = list(
|
366
|
+
chunked(all_explore_instructs, chunk_size)
|
367
|
+
)
|
368
|
+
|
369
|
+
async def explore_chunk_sequentially(
|
370
|
+
sub_instructs: list[Instruct],
|
371
|
+
):
|
372
|
+
"""
|
373
|
+
Explore instructions in a single chunk, one at a time.
|
374
|
+
"""
|
375
|
+
chunk_results = []
|
376
|
+
local_branch = session.split(branch)
|
377
|
+
|
378
|
+
for ins_ in sub_instructs:
|
379
|
+
if verbose:
|
380
|
+
snippet = (
|
381
|
+
ins_.guidance[:100] + "..."
|
382
|
+
if len(ins_.guidance) > 100
|
383
|
+
else ins_.guidance
|
384
|
+
)
|
385
|
+
print(
|
386
|
+
f"\n-----Exploring Idea (sequential in chunk)-----\n{snippet}"
|
387
|
+
)
|
388
|
+
|
389
|
+
seq_resp = await local_branch.instruct(
|
390
|
+
ins_, **(explore_kwargs or {})
|
391
|
+
)
|
392
|
+
chunk_results.append(
|
393
|
+
InstructResponse(
|
394
|
+
instruct=ins_, response=seq_resp
|
395
|
+
)
|
396
|
+
)
|
397
|
+
|
398
|
+
return chunk_results
|
399
|
+
|
400
|
+
# Run all chunks in parallel
|
401
|
+
all_responses = await alcall(
|
402
|
+
all_chunks,
|
403
|
+
explore_chunk_sequentially,
|
404
|
+
flatten=True,
|
405
|
+
dropna=True,
|
406
|
+
)
|
407
|
+
out.explore = all_responses
|
408
|
+
|
409
|
+
# Log final messages
|
410
|
+
branch.msgs.add_message(
|
411
|
+
instruction="\n".join(
|
412
|
+
i.model_dump_json() for i in all_explore_instructs
|
413
|
+
)
|
414
|
+
)
|
415
|
+
branch.msgs.add_message(
|
416
|
+
assistant_response="\n".join(
|
417
|
+
i.model_dump_json() for i in all_responses
|
418
|
+
)
|
419
|
+
)
|
420
|
+
|
421
|
+
if branch_as_default:
|
422
|
+
session.change_default_branch(branch)
|
423
|
+
|
424
|
+
# -----------------------------------------------------------------
|
425
|
+
# 4. Return Results
|
426
|
+
# -----------------------------------------------------------------
|
201
427
|
if return_session:
|
202
428
|
return out, session
|
203
|
-
|
204
429
|
return out
|
@@ -1 +1,8 @@
|
|
1
|
-
PROMPT = """Perform a brainstorm session.
|
1
|
+
PROMPT = """Perform a brainstorm session. Generate {num_instruct} concise and distinct instructions (Instruct), each representing a potential next step. We will run them in parallel under the same context. Ensure each idea:
|
2
|
+
|
3
|
+
1. Adheres to project guidelines and standards.
|
4
|
+
2. Maintains a unique perspective or approach.
|
5
|
+
3. Remains succinct yet sufficiently detailed.
|
6
|
+
4. Flags any step that needs deeper expansion.
|
7
|
+
|
8
|
+
Aim for clarity, practicality, and adherence to the project's core principles."""
|