lionagi 0.17.11__py3-none-any.whl → 0.18.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/_errors.py +0 -5
- lionagi/fields.py +83 -0
- lionagi/libs/schema/minimal_yaml.py +98 -0
- lionagi/ln/__init__.py +3 -1
- lionagi/ln/concurrency/primitives.py +4 -4
- lionagi/ln/concurrency/task.py +1 -0
- lionagi/ln/types.py +32 -5
- lionagi/models/field_model.py +21 -4
- lionagi/models/hashable_model.py +2 -3
- lionagi/operations/ReAct/ReAct.py +475 -238
- lionagi/operations/ReAct/utils.py +3 -0
- lionagi/operations/act/act.py +206 -0
- lionagi/operations/builder.py +5 -7
- lionagi/operations/chat/chat.py +130 -114
- lionagi/operations/communicate/communicate.py +101 -42
- lionagi/operations/fields.py +380 -0
- lionagi/operations/flow.py +8 -10
- lionagi/operations/interpret/interpret.py +65 -20
- lionagi/operations/node.py +4 -4
- lionagi/operations/operate/operate.py +216 -108
- lionagi/{protocols/operatives → operations/operate}/operative.py +4 -5
- lionagi/{protocols/operatives → operations/operate}/step.py +34 -39
- lionagi/operations/parse/parse.py +170 -142
- lionagi/operations/select/select.py +79 -18
- lionagi/operations/select/utils.py +8 -2
- lionagi/operations/types.py +119 -23
- lionagi/protocols/action/manager.py +5 -6
- lionagi/protocols/contracts.py +2 -2
- lionagi/protocols/generic/__init__.py +22 -0
- lionagi/protocols/generic/element.py +36 -127
- lionagi/protocols/generic/log.py +3 -2
- lionagi/protocols/generic/pile.py +9 -10
- lionagi/protocols/generic/progression.py +23 -22
- lionagi/protocols/graph/edge.py +6 -5
- lionagi/protocols/ids.py +6 -49
- lionagi/protocols/messages/__init__.py +29 -0
- lionagi/protocols/messages/action_request.py +86 -184
- lionagi/protocols/messages/action_response.py +73 -131
- lionagi/protocols/messages/assistant_response.py +130 -159
- lionagi/protocols/messages/base.py +31 -22
- lionagi/protocols/messages/instruction.py +280 -625
- lionagi/protocols/messages/manager.py +112 -62
- lionagi/protocols/messages/message.py +87 -197
- lionagi/protocols/messages/system.py +52 -123
- lionagi/protocols/types.py +1 -13
- lionagi/service/connections/__init__.py +3 -0
- lionagi/service/connections/endpoint.py +0 -8
- lionagi/service/connections/providers/claude_code_cli.py +3 -2
- lionagi/service/connections/providers/oai_.py +29 -94
- lionagi/service/connections/providers/ollama_.py +3 -2
- lionagi/service/hooks/_types.py +1 -1
- lionagi/service/hooks/_utils.py +1 -1
- lionagi/service/hooks/hook_event.py +3 -8
- lionagi/service/hooks/hook_registry.py +5 -5
- lionagi/service/hooks/hooked_event.py +63 -3
- lionagi/service/imodel.py +24 -20
- lionagi/service/third_party/claude_code.py +3 -3
- lionagi/service/third_party/openai_models.py +435 -0
- lionagi/service/token_calculator.py +1 -94
- lionagi/session/branch.py +190 -400
- lionagi/session/session.py +8 -99
- lionagi/tools/file/reader.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/METADATA +6 -6
- lionagi-0.18.1.dist-info/RECORD +164 -0
- lionagi/fields/__init__.py +0 -47
- lionagi/fields/action.py +0 -188
- lionagi/fields/base.py +0 -153
- lionagi/fields/code.py +0 -239
- lionagi/fields/file.py +0 -234
- lionagi/fields/instruct.py +0 -135
- lionagi/fields/reason.py +0 -55
- lionagi/fields/research.py +0 -52
- lionagi/operations/_act/act.py +0 -86
- lionagi/operations/brainstorm/__init__.py +0 -2
- lionagi/operations/brainstorm/brainstorm.py +0 -498
- lionagi/operations/brainstorm/prompt.py +0 -11
- lionagi/operations/instruct/__init__.py +0 -2
- lionagi/operations/instruct/instruct.py +0 -28
- lionagi/operations/plan/__init__.py +0 -6
- lionagi/operations/plan/plan.py +0 -386
- lionagi/operations/plan/prompt.py +0 -25
- lionagi/operations/utils.py +0 -45
- lionagi/protocols/forms/__init__.py +0 -2
- lionagi/protocols/forms/base.py +0 -85
- lionagi/protocols/forms/flow.py +0 -79
- lionagi/protocols/forms/form.py +0 -86
- lionagi/protocols/forms/report.py +0 -48
- lionagi/protocols/mail/__init__.py +0 -2
- lionagi/protocols/mail/exchange.py +0 -220
- lionagi/protocols/mail/mail.py +0 -51
- lionagi/protocols/mail/mailbox.py +0 -103
- lionagi/protocols/mail/manager.py +0 -218
- lionagi/protocols/mail/package.py +0 -101
- lionagi/protocols/messages/templates/README.md +0 -28
- lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
- lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
- lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
- lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
- lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
- lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
- lionagi/protocols/operatives/__init__.py +0 -2
- lionagi/service/connections/providers/types.py +0 -28
- lionagi/service/third_party/openai_model_names.py +0 -198
- lionagi/service/types.py +0 -58
- lionagi-0.17.11.dist-info/RECORD +0 -199
- /lionagi/operations/{_act → act}/__init__.py +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/WHEEL +0 -0
- {lionagi-0.17.11.dist-info → lionagi-0.18.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,498 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
import logging
|
|
5
|
-
from typing import TYPE_CHECKING, Any, Literal
|
|
6
|
-
|
|
7
|
-
from pydantic import BaseModel
|
|
8
|
-
|
|
9
|
-
from lionagi.fields.instruct import (
|
|
10
|
-
LIST_INSTRUCT_FIELD_MODEL,
|
|
11
|
-
Instruct,
|
|
12
|
-
InstructResponse,
|
|
13
|
-
)
|
|
14
|
-
from lionagi.ln import alcall, to_list
|
|
15
|
-
from lionagi.protocols.generic.element import ID
|
|
16
|
-
|
|
17
|
-
if TYPE_CHECKING:
|
|
18
|
-
from lionagi.session.branch import Branch
|
|
19
|
-
from lionagi.session.session import Session
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
from ..utils import prepare_instruct, prepare_session
|
|
23
|
-
from .prompt import PROMPT
|
|
24
|
-
|
|
25
|
-
# ---------------------------------------------------------------------
|
|
26
|
-
# Data Models & Utilities
|
|
27
|
-
# ---------------------------------------------------------------------
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
class BrainstormOperation(BaseModel):
|
|
31
|
-
"""
|
|
32
|
-
Container for the outcomes of a brainstorming session:
|
|
33
|
-
1. initial: the initial result of the 'brainstorm' prompt
|
|
34
|
-
2. brainstorm: the results of auto-run instructions (if auto_run = True)
|
|
35
|
-
3. explore: the results of exploring those instructions (if auto_explore = True)
|
|
36
|
-
"""
|
|
37
|
-
|
|
38
|
-
initial: Any
|
|
39
|
-
brainstorm: list[Instruct] | None = None
|
|
40
|
-
explore: list[InstructResponse] | None = None
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def chunked(iterable, n):
|
|
44
|
-
"""
|
|
45
|
-
Yield successive n-sized chunks from an iterable.
|
|
46
|
-
|
|
47
|
-
Example:
|
|
48
|
-
>>> list(chunked([1,2,3,4,5], 2))
|
|
49
|
-
[[1,2],[3,4],[5]]
|
|
50
|
-
"""
|
|
51
|
-
for i in range(0, len(iterable), n):
|
|
52
|
-
yield iterable[i : i + n]
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
# ---------------------------------------------------------------------
|
|
56
|
-
# Core Instruction Execution
|
|
57
|
-
# ---------------------------------------------------------------------
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
async def run_instruct(
|
|
61
|
-
ins: Instruct,
|
|
62
|
-
session: "Session",
|
|
63
|
-
branch: "Branch",
|
|
64
|
-
auto_run: bool,
|
|
65
|
-
verbose: bool = True,
|
|
66
|
-
**kwargs: Any,
|
|
67
|
-
) -> Any:
|
|
68
|
-
"""
|
|
69
|
-
Execute a single instruction within a brainstorming session.
|
|
70
|
-
Optionally auto-run any child instructions that result.
|
|
71
|
-
"""
|
|
72
|
-
|
|
73
|
-
async def _run_child_instruction(child_ins: Instruct):
|
|
74
|
-
"""
|
|
75
|
-
Helper for recursively running child instructions.
|
|
76
|
-
"""
|
|
77
|
-
if verbose:
|
|
78
|
-
snippet = (
|
|
79
|
-
child_ins.guidance[:100] + "..."
|
|
80
|
-
if len(child_ins.guidance or "") > 100
|
|
81
|
-
else child_ins.guidance
|
|
82
|
-
)
|
|
83
|
-
print(f"\n-----Running instruction-----\n{snippet}")
|
|
84
|
-
child_branch = session.split(branch)
|
|
85
|
-
return await run_instruct(
|
|
86
|
-
child_ins, session, child_branch, False, verbose=verbose, **kwargs
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
# Prepare config for the branch operation
|
|
90
|
-
config = {**ins.model_dump(), **kwargs}
|
|
91
|
-
result = await branch.operate(**config)
|
|
92
|
-
branch.dump_logs()
|
|
93
|
-
|
|
94
|
-
# Extract any newly generated instructions
|
|
95
|
-
instructs = []
|
|
96
|
-
if hasattr(result, "instruct_models"):
|
|
97
|
-
instructs = result.instruct_models
|
|
98
|
-
|
|
99
|
-
# If we're allowed to auto-run child instructions, handle them
|
|
100
|
-
if auto_run and instructs:
|
|
101
|
-
child_results = await alcall(instructs, _run_child_instruction)
|
|
102
|
-
combined = []
|
|
103
|
-
for c in child_results:
|
|
104
|
-
if isinstance(c, list):
|
|
105
|
-
combined.extend(c)
|
|
106
|
-
else:
|
|
107
|
-
combined.append(c)
|
|
108
|
-
combined.insert(0, result)
|
|
109
|
-
return combined
|
|
110
|
-
|
|
111
|
-
return result
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
async def brainstorm(
|
|
115
|
-
instruct: Instruct | dict[str, Any],
|
|
116
|
-
num_instruct: int = 2,
|
|
117
|
-
session: "Session" = None,
|
|
118
|
-
branch: ID["Branch"].Ref | None = None,
|
|
119
|
-
auto_run: bool = True,
|
|
120
|
-
auto_explore: bool = False,
|
|
121
|
-
explore_kwargs: dict[str, Any] | None = None,
|
|
122
|
-
explore_strategy: Literal[
|
|
123
|
-
"concurrent",
|
|
124
|
-
"sequential",
|
|
125
|
-
"sequential_concurrent_chunk",
|
|
126
|
-
"concurrent_sequential_chunk",
|
|
127
|
-
] = "concurrent",
|
|
128
|
-
branch_kwargs: dict[str, Any] | None = None,
|
|
129
|
-
return_session: bool = False,
|
|
130
|
-
verbose: bool = False,
|
|
131
|
-
branch_as_default: bool = True,
|
|
132
|
-
operative_model: type[BaseModel] | None = None,
|
|
133
|
-
**kwargs: Any,
|
|
134
|
-
):
|
|
135
|
-
out = []
|
|
136
|
-
async for res in brainstormStream(
|
|
137
|
-
instruct=instruct,
|
|
138
|
-
num_instruct=num_instruct,
|
|
139
|
-
session=session,
|
|
140
|
-
branch=branch,
|
|
141
|
-
auto_run=auto_run,
|
|
142
|
-
auto_explore=auto_explore,
|
|
143
|
-
explore_kwargs=explore_kwargs,
|
|
144
|
-
explore_strategy=explore_strategy,
|
|
145
|
-
branch_kwargs=branch_kwargs,
|
|
146
|
-
return_session=return_session,
|
|
147
|
-
verbose=verbose,
|
|
148
|
-
branch_as_default=branch_as_default,
|
|
149
|
-
operative_model=operative_model,
|
|
150
|
-
**kwargs,
|
|
151
|
-
):
|
|
152
|
-
out.append(res)
|
|
153
|
-
|
|
154
|
-
return out[-1]
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
async def brainstormStream(
|
|
158
|
-
instruct: Instruct | dict[str, Any],
|
|
159
|
-
num_instruct: int = 2,
|
|
160
|
-
session: "Session" = None,
|
|
161
|
-
branch: ID["Branch"].Ref | None = None,
|
|
162
|
-
auto_run: bool = True,
|
|
163
|
-
auto_explore: bool = False,
|
|
164
|
-
explore_kwargs: dict[str, Any] | None = None,
|
|
165
|
-
explore_strategy: Literal[
|
|
166
|
-
"concurrent",
|
|
167
|
-
"sequential",
|
|
168
|
-
"sequential_concurrent_chunk",
|
|
169
|
-
"concurrent_sequential_chunk",
|
|
170
|
-
] = "concurrent",
|
|
171
|
-
branch_kwargs: dict[str, Any] | None = None,
|
|
172
|
-
return_session: bool = False,
|
|
173
|
-
verbose: bool = False,
|
|
174
|
-
branch_as_default: bool = True,
|
|
175
|
-
operative_model: type[BaseModel] | None = None,
|
|
176
|
-
**kwargs: Any,
|
|
177
|
-
) -> Any:
|
|
178
|
-
"""
|
|
179
|
-
High-level function to perform a brainstorming session.
|
|
180
|
-
|
|
181
|
-
Steps:
|
|
182
|
-
1. Run the initial 'instruct' prompt to generate suggestions.
|
|
183
|
-
2. Optionally auto-run those suggestions (auto_run=True).
|
|
184
|
-
3. Optionally explore the resulting instructions (auto_explore=True)
|
|
185
|
-
using the chosen strategy (concurrent, sequential, etc.).
|
|
186
|
-
"""
|
|
187
|
-
if operative_model:
|
|
188
|
-
logging.warning(
|
|
189
|
-
"The 'operative_model' parameter is deprecated and will be removed in a future version.use 'response_format' instead."
|
|
190
|
-
)
|
|
191
|
-
kwargs["response_format"] = kwargs.get(
|
|
192
|
-
"response_format", operative_model
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
# -----------------------------------------------------------------
|
|
196
|
-
# Basic Validations and Setup
|
|
197
|
-
# -----------------------------------------------------------------
|
|
198
|
-
if auto_explore and not auto_run:
|
|
199
|
-
raise ValueError("auto_explore requires auto_run to be True.")
|
|
200
|
-
|
|
201
|
-
if verbose:
|
|
202
|
-
print("Starting brainstorming...")
|
|
203
|
-
|
|
204
|
-
# Make sure the correct field model is present
|
|
205
|
-
field_models: list = kwargs.get("field_models", [])
|
|
206
|
-
if LIST_INSTRUCT_FIELD_MODEL not in field_models:
|
|
207
|
-
field_models.append(LIST_INSTRUCT_FIELD_MODEL)
|
|
208
|
-
kwargs["field_models"] = field_models
|
|
209
|
-
|
|
210
|
-
# Prepare session, branch, and the instruction
|
|
211
|
-
session, branch = prepare_session(session, branch, branch_kwargs)
|
|
212
|
-
prompt_str = PROMPT.format(num_instruct=num_instruct)
|
|
213
|
-
instruct = prepare_instruct(instruct, prompt_str)
|
|
214
|
-
|
|
215
|
-
# -----------------------------------------------------------------
|
|
216
|
-
# 1. Initial Brainstorm
|
|
217
|
-
# -----------------------------------------------------------------
|
|
218
|
-
res1 = await branch.operate(**instruct, **kwargs)
|
|
219
|
-
yield res1
|
|
220
|
-
|
|
221
|
-
out = BrainstormOperation(initial=res1)
|
|
222
|
-
|
|
223
|
-
if verbose:
|
|
224
|
-
print("Initial brainstorming complete.")
|
|
225
|
-
|
|
226
|
-
# Helper to run single instructions from the 'brainstorm'
|
|
227
|
-
async def run_brainstorm_instruction(ins_):
|
|
228
|
-
if verbose:
|
|
229
|
-
snippet = (
|
|
230
|
-
ins_.guidance[:100] + "..."
|
|
231
|
-
if len(ins_.guidance or "") > 100
|
|
232
|
-
else ins_.guidance
|
|
233
|
-
)
|
|
234
|
-
print(f"\n-----Running instruction-----\n{snippet}")
|
|
235
|
-
new_branch = session.split(branch)
|
|
236
|
-
return await run_instruct(
|
|
237
|
-
ins_, session, new_branch, auto_run, verbose=verbose, **kwargs
|
|
238
|
-
)
|
|
239
|
-
|
|
240
|
-
# -----------------------------------------------------------------
|
|
241
|
-
# 2. Auto-run child instructions if requested
|
|
242
|
-
# -----------------------------------------------------------------
|
|
243
|
-
if not auto_run:
|
|
244
|
-
if return_session:
|
|
245
|
-
yield out, session
|
|
246
|
-
yield out
|
|
247
|
-
|
|
248
|
-
return
|
|
249
|
-
|
|
250
|
-
# We run inside the context manager for branching
|
|
251
|
-
async with session.branches:
|
|
252
|
-
response_ = []
|
|
253
|
-
|
|
254
|
-
# If the initial result has instructions, run them
|
|
255
|
-
if hasattr(res1, "instruct_models"):
|
|
256
|
-
instructs: list[Instruct] = res1.instruct_models
|
|
257
|
-
brainstorm_results = await alcall(
|
|
258
|
-
instructs, run_brainstorm_instruction
|
|
259
|
-
)
|
|
260
|
-
brainstorm_results = to_list(
|
|
261
|
-
brainstorm_results, dropna=True, flatten=True
|
|
262
|
-
)
|
|
263
|
-
|
|
264
|
-
# Filter out plain str/dict responses, keep model-based
|
|
265
|
-
filtered = [
|
|
266
|
-
r if not isinstance(r, (str, dict)) else None
|
|
267
|
-
for r in brainstorm_results
|
|
268
|
-
]
|
|
269
|
-
filtered = to_list(
|
|
270
|
-
filtered, unique=True, dropna=True, flatten=True
|
|
271
|
-
)
|
|
272
|
-
|
|
273
|
-
out.brainstorm = (
|
|
274
|
-
filtered if isinstance(filtered, list) else [filtered]
|
|
275
|
-
)
|
|
276
|
-
# Insert the initial result at index 0 for reference
|
|
277
|
-
filtered.insert(0, res1)
|
|
278
|
-
response_ = filtered
|
|
279
|
-
yield response_
|
|
280
|
-
|
|
281
|
-
# -----------------------------------------------------------------
|
|
282
|
-
# 3. Explore the results (if auto_explore = True)
|
|
283
|
-
# -----------------------------------------------------------------
|
|
284
|
-
if response_ and auto_explore:
|
|
285
|
-
# Gather all newly generated instructions
|
|
286
|
-
all_explore_instructs = to_list(
|
|
287
|
-
[
|
|
288
|
-
r.instruct_models
|
|
289
|
-
for r in response_
|
|
290
|
-
if hasattr(r, "instruct_models")
|
|
291
|
-
],
|
|
292
|
-
dropna=True,
|
|
293
|
-
unique=True,
|
|
294
|
-
flatten=True,
|
|
295
|
-
)
|
|
296
|
-
|
|
297
|
-
# Decide how to explore based on the strategy
|
|
298
|
-
match explore_strategy:
|
|
299
|
-
# ---------------------------------------------------------
|
|
300
|
-
# Strategy A: CONCURRENT
|
|
301
|
-
# ---------------------------------------------------------
|
|
302
|
-
case "concurrent":
|
|
303
|
-
|
|
304
|
-
async def explore_concurrently(ins_: Instruct):
|
|
305
|
-
if verbose:
|
|
306
|
-
snippet = (
|
|
307
|
-
ins_.guidance[:100] + "..."
|
|
308
|
-
if len(ins_.guidance or "") > 100
|
|
309
|
-
else ins_.guidance
|
|
310
|
-
)
|
|
311
|
-
print(f"\n-----Exploring Idea-----\n{snippet}")
|
|
312
|
-
new_branch = session.split(branch)
|
|
313
|
-
resp = await new_branch.operate(
|
|
314
|
-
**ins_.to_dict(), **(explore_kwargs or {})
|
|
315
|
-
)
|
|
316
|
-
return InstructResponse(instruct=ins_, response=resp)
|
|
317
|
-
|
|
318
|
-
res_explore = await alcall(
|
|
319
|
-
all_explore_instructs, explore_concurrently
|
|
320
|
-
)
|
|
321
|
-
out.explore = res_explore
|
|
322
|
-
|
|
323
|
-
# Add messages for logging / auditing
|
|
324
|
-
branch.msgs.add_message(
|
|
325
|
-
instruction="\n".join(
|
|
326
|
-
i.model_dump_json() for i in all_explore_instructs
|
|
327
|
-
)
|
|
328
|
-
)
|
|
329
|
-
branch.msgs.add_message(
|
|
330
|
-
assistant_response="\n".join(
|
|
331
|
-
i.model_dump_json() for i in res_explore
|
|
332
|
-
)
|
|
333
|
-
)
|
|
334
|
-
yield res_explore
|
|
335
|
-
|
|
336
|
-
# ---------------------------------------------------------
|
|
337
|
-
# Strategy B: SEQUENTIAL
|
|
338
|
-
# ---------------------------------------------------------
|
|
339
|
-
case "sequential":
|
|
340
|
-
explore_results = []
|
|
341
|
-
|
|
342
|
-
# Warn/log if a large number of instructions
|
|
343
|
-
if len(all_explore_instructs) > 30:
|
|
344
|
-
all_explore_instructs = all_explore_instructs[:30]
|
|
345
|
-
logging.warning(
|
|
346
|
-
"Maximum number of instructions for sequential exploration is 50. defaulting to 50."
|
|
347
|
-
)
|
|
348
|
-
if len(all_explore_instructs) > 10:
|
|
349
|
-
logging.warning(
|
|
350
|
-
"Large number of instructions for sequential exploration. This may take a while."
|
|
351
|
-
)
|
|
352
|
-
|
|
353
|
-
for i in all_explore_instructs:
|
|
354
|
-
if verbose:
|
|
355
|
-
snippet = (
|
|
356
|
-
i.guidance[:100] + "..."
|
|
357
|
-
if len(i.guidance or "") > 100
|
|
358
|
-
else i.guidance
|
|
359
|
-
)
|
|
360
|
-
print(f"\n-----Exploring Idea-----\n{snippet}")
|
|
361
|
-
seq_res = await branch.operate(
|
|
362
|
-
**i.to_dict(), **(explore_kwargs or {})
|
|
363
|
-
)
|
|
364
|
-
ins_res = InstructResponse(
|
|
365
|
-
instruct=i, response=seq_res
|
|
366
|
-
)
|
|
367
|
-
explore_results.append(ins_res)
|
|
368
|
-
yield ins_res
|
|
369
|
-
|
|
370
|
-
out.explore = explore_results
|
|
371
|
-
|
|
372
|
-
# ---------------------------------------------------------
|
|
373
|
-
# Strategy C: SEQUENTIAL_CONCURRENT_CHUNK
|
|
374
|
-
# (chunks processed sequentially, each chunk in parallel)
|
|
375
|
-
# ---------------------------------------------------------
|
|
376
|
-
case "sequential_concurrent_chunk":
|
|
377
|
-
chunk_size = (explore_kwargs or {}).get("chunk_size", 5)
|
|
378
|
-
all_responses = []
|
|
379
|
-
|
|
380
|
-
async def explore_concurrent_chunk(
|
|
381
|
-
sub_instructs: list[Instruct], base_branch: "Branch"
|
|
382
|
-
):
|
|
383
|
-
"""
|
|
384
|
-
Explore instructions in a single chunk concurrently.
|
|
385
|
-
"""
|
|
386
|
-
if verbose:
|
|
387
|
-
print(
|
|
388
|
-
f"\n--- Exploring a chunk of size {len(sub_instructs)} ---\n"
|
|
389
|
-
)
|
|
390
|
-
|
|
391
|
-
async def _explore(ins_: Instruct):
|
|
392
|
-
child_branch = session.split(base_branch)
|
|
393
|
-
child_resp = await child_branch.operate(
|
|
394
|
-
**ins_.to_dict(), **(explore_kwargs or {})
|
|
395
|
-
)
|
|
396
|
-
return InstructResponse(
|
|
397
|
-
instruct=ins_, response=child_resp
|
|
398
|
-
)
|
|
399
|
-
|
|
400
|
-
# Run all instructions in the chunk concurrently
|
|
401
|
-
res_chunk = await alcall(sub_instructs, _explore)
|
|
402
|
-
|
|
403
|
-
# Log messages for debugging / auditing
|
|
404
|
-
next_branch = session.split(base_branch)
|
|
405
|
-
next_branch.msgs.add_message(
|
|
406
|
-
instruction="\n".join(
|
|
407
|
-
i.model_dump_json() for i in sub_instructs
|
|
408
|
-
)
|
|
409
|
-
)
|
|
410
|
-
next_branch.msgs.add_message(
|
|
411
|
-
assistant_response="\n".join(
|
|
412
|
-
i.model_dump_json() for i in res_chunk
|
|
413
|
-
)
|
|
414
|
-
)
|
|
415
|
-
return res_chunk, next_branch
|
|
416
|
-
|
|
417
|
-
# Process each chunk sequentially
|
|
418
|
-
for chunk in chunked(all_explore_instructs, chunk_size):
|
|
419
|
-
chunk_result, branch = await explore_concurrent_chunk(
|
|
420
|
-
chunk, branch
|
|
421
|
-
)
|
|
422
|
-
all_responses.extend(chunk_result)
|
|
423
|
-
yield chunk_result
|
|
424
|
-
|
|
425
|
-
out.explore = all_responses
|
|
426
|
-
|
|
427
|
-
# ---------------------------------------------------------
|
|
428
|
-
# Strategy D: CONCURRENT_SEQUENTIAL_CHUNK
|
|
429
|
-
# (all chunks processed concurrently, each chunk sequentially)
|
|
430
|
-
# ---------------------------------------------------------
|
|
431
|
-
case "concurrent_sequential_chunk":
|
|
432
|
-
chunk_size = (explore_kwargs or {}).get("chunk_size", 5)
|
|
433
|
-
all_chunks = list(
|
|
434
|
-
chunked(all_explore_instructs, chunk_size)
|
|
435
|
-
)
|
|
436
|
-
|
|
437
|
-
async def explore_chunk_sequentially(
|
|
438
|
-
sub_instructs: list[Instruct],
|
|
439
|
-
):
|
|
440
|
-
"""
|
|
441
|
-
Explore instructions in a single chunk, one at a time.
|
|
442
|
-
"""
|
|
443
|
-
chunk_results = []
|
|
444
|
-
local_branch = session.split(branch)
|
|
445
|
-
|
|
446
|
-
for ins_ in sub_instructs:
|
|
447
|
-
if verbose:
|
|
448
|
-
snippet = (
|
|
449
|
-
ins_.guidance[:100] + "..."
|
|
450
|
-
if len(ins_.guidance or "") > 100
|
|
451
|
-
else ins_.guidance
|
|
452
|
-
)
|
|
453
|
-
print(
|
|
454
|
-
f"\n-----Exploring Idea (sequential in chunk)-----\n{snippet}"
|
|
455
|
-
)
|
|
456
|
-
|
|
457
|
-
seq_resp = await local_branch.operate(
|
|
458
|
-
**ins_.to_dict(), **(explore_kwargs or {})
|
|
459
|
-
)
|
|
460
|
-
chunk_results.append(
|
|
461
|
-
InstructResponse(
|
|
462
|
-
instruct=ins_, response=seq_resp
|
|
463
|
-
)
|
|
464
|
-
)
|
|
465
|
-
|
|
466
|
-
return chunk_results
|
|
467
|
-
|
|
468
|
-
# Run all chunks in parallel
|
|
469
|
-
all_responses = await alcall(
|
|
470
|
-
all_chunks,
|
|
471
|
-
explore_chunk_sequentially,
|
|
472
|
-
output_flatten=True,
|
|
473
|
-
output_dropna=True,
|
|
474
|
-
)
|
|
475
|
-
out.explore = all_responses
|
|
476
|
-
|
|
477
|
-
# Log final messages
|
|
478
|
-
branch.msgs.add_message(
|
|
479
|
-
instruction="\n".join(
|
|
480
|
-
i.model_dump_json() for i in all_explore_instructs
|
|
481
|
-
)
|
|
482
|
-
)
|
|
483
|
-
branch.msgs.add_message(
|
|
484
|
-
assistant_response="\n".join(
|
|
485
|
-
i.model_dump_json() for i in all_responses
|
|
486
|
-
)
|
|
487
|
-
)
|
|
488
|
-
yield all_responses
|
|
489
|
-
|
|
490
|
-
if branch_as_default:
|
|
491
|
-
session.change_default_branch(branch)
|
|
492
|
-
|
|
493
|
-
# -----------------------------------------------------------------
|
|
494
|
-
# 4. Return Results
|
|
495
|
-
# -----------------------------------------------------------------
|
|
496
|
-
if return_session:
|
|
497
|
-
yield out, session
|
|
498
|
-
yield out
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
PROMPT = """Perform a brainstorm session. Generate {num_instruct} concise and distinct instructions (Instruct), each representing a potential next step. We will run them in parallel under the same context. Ensure each idea:
|
|
5
|
-
|
|
6
|
-
1. Adheres to project guidelines and standards.
|
|
7
|
-
2. Maintains a unique perspective or approach.
|
|
8
|
-
3. Remains succinct yet sufficiently detailed.
|
|
9
|
-
4. Flags any step that needs deeper expansion.
|
|
10
|
-
|
|
11
|
-
Aim for clarity, practicality, and adherence to the project's core principles."""
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
from typing import TYPE_CHECKING, Any
|
|
5
|
-
|
|
6
|
-
from lionagi.fields.instruct import Instruct
|
|
7
|
-
|
|
8
|
-
if TYPE_CHECKING:
|
|
9
|
-
from lionagi.session.branch import Branch
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
async def instruct(
|
|
13
|
-
branch: "Branch",
|
|
14
|
-
instruct: Instruct,
|
|
15
|
-
/,
|
|
16
|
-
**kwargs,
|
|
17
|
-
) -> Any:
|
|
18
|
-
config = {
|
|
19
|
-
**(instruct.to_dict() if isinstance(instruct, Instruct) else instruct),
|
|
20
|
-
**kwargs,
|
|
21
|
-
}
|
|
22
|
-
if any(i in config and config[i] for i in Instruct.reserved_kwargs):
|
|
23
|
-
if "response_format" in config or "request_model" in config:
|
|
24
|
-
return await branch.operate(**config)
|
|
25
|
-
for i in Instruct.reserved_kwargs:
|
|
26
|
-
config.pop(i, None)
|
|
27
|
-
|
|
28
|
-
return await branch.communicate(**config)
|