lionagi 0.0.312__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +61 -3
- lionagi/core/__init__.py +0 -14
- lionagi/core/_setting/_setting.py +59 -0
- lionagi/core/action/__init__.py +14 -0
- lionagi/core/action/function_calling.py +136 -0
- lionagi/core/action/manual.py +1 -0
- lionagi/core/action/node.py +109 -0
- lionagi/core/action/tool.py +114 -0
- lionagi/core/action/tool_manager.py +356 -0
- lionagi/core/agent/__init__.py +0 -3
- lionagi/core/agent/base_agent.py +45 -36
- lionagi/core/agent/eval/evaluator.py +1 -0
- lionagi/core/agent/eval/vote.py +40 -0
- lionagi/core/agent/learn/learner.py +59 -0
- lionagi/core/agent/plan/unit_template.py +1 -0
- lionagi/core/collections/__init__.py +17 -0
- lionagi/core/collections/_logger.py +319 -0
- lionagi/core/collections/abc/__init__.py +53 -0
- lionagi/core/collections/abc/component.py +615 -0
- lionagi/core/collections/abc/concepts.py +297 -0
- lionagi/core/collections/abc/exceptions.py +150 -0
- lionagi/core/collections/abc/util.py +45 -0
- lionagi/core/collections/exchange.py +161 -0
- lionagi/core/collections/flow.py +426 -0
- lionagi/core/collections/model.py +419 -0
- lionagi/core/collections/pile.py +913 -0
- lionagi/core/collections/progression.py +236 -0
- lionagi/core/collections/util.py +64 -0
- lionagi/core/director/direct.py +314 -0
- lionagi/core/director/director.py +2 -0
- lionagi/core/engine/branch_engine.py +333 -0
- lionagi/core/engine/instruction_map_engine.py +204 -0
- lionagi/core/engine/sandbox_.py +14 -0
- lionagi/core/engine/script_engine.py +99 -0
- lionagi/core/executor/base_executor.py +90 -0
- lionagi/core/executor/graph_executor.py +330 -0
- lionagi/core/executor/neo4j_executor.py +384 -0
- lionagi/core/generic/__init__.py +7 -0
- lionagi/core/generic/edge.py +112 -0
- lionagi/core/generic/edge_condition.py +16 -0
- lionagi/core/generic/graph.py +236 -0
- lionagi/core/generic/hyperedge.py +1 -0
- lionagi/core/generic/node.py +220 -0
- lionagi/core/generic/tree.py +48 -0
- lionagi/core/generic/tree_node.py +79 -0
- lionagi/core/mail/__init__.py +7 -3
- lionagi/core/mail/mail.py +25 -0
- lionagi/core/mail/mail_manager.py +142 -58
- lionagi/core/mail/package.py +45 -0
- lionagi/core/mail/start_mail.py +36 -0
- lionagi/core/message/__init__.py +19 -0
- lionagi/core/message/action_request.py +133 -0
- lionagi/core/message/action_response.py +135 -0
- lionagi/core/message/assistant_response.py +95 -0
- lionagi/core/message/instruction.py +234 -0
- lionagi/core/message/message.py +101 -0
- lionagi/core/message/system.py +86 -0
- lionagi/core/message/util.py +283 -0
- lionagi/core/report/__init__.py +4 -0
- lionagi/core/report/base.py +217 -0
- lionagi/core/report/form.py +231 -0
- lionagi/core/report/report.py +166 -0
- lionagi/core/report/util.py +28 -0
- lionagi/core/rule/__init__.py +0 -0
- lionagi/core/rule/_default.py +16 -0
- lionagi/core/rule/action.py +99 -0
- lionagi/core/rule/base.py +238 -0
- lionagi/core/rule/boolean.py +56 -0
- lionagi/core/rule/choice.py +47 -0
- lionagi/core/rule/mapping.py +96 -0
- lionagi/core/rule/number.py +71 -0
- lionagi/core/rule/rulebook.py +109 -0
- lionagi/core/rule/string.py +52 -0
- lionagi/core/rule/util.py +35 -0
- lionagi/core/session/__init__.py +0 -3
- lionagi/core/session/branch.py +431 -0
- lionagi/core/session/directive_mixin.py +287 -0
- lionagi/core/session/session.py +230 -902
- lionagi/core/structure/__init__.py +1 -0
- lionagi/core/structure/chain.py +1 -0
- lionagi/core/structure/forest.py +1 -0
- lionagi/core/structure/graph.py +1 -0
- lionagi/core/structure/tree.py +1 -0
- lionagi/core/unit/__init__.py +5 -0
- lionagi/core/unit/parallel_unit.py +245 -0
- lionagi/core/unit/template/__init__.py +0 -0
- lionagi/core/unit/template/action.py +81 -0
- lionagi/core/unit/template/base.py +51 -0
- lionagi/core/unit/template/plan.py +84 -0
- lionagi/core/unit/template/predict.py +109 -0
- lionagi/core/unit/template/score.py +124 -0
- lionagi/core/unit/template/select.py +104 -0
- lionagi/core/unit/unit.py +362 -0
- lionagi/core/unit/unit_form.py +305 -0
- lionagi/core/unit/unit_mixin.py +1168 -0
- lionagi/core/unit/util.py +71 -0
- lionagi/core/validator/__init__.py +0 -0
- lionagi/core/validator/validator.py +364 -0
- lionagi/core/work/__init__.py +0 -0
- lionagi/core/work/work.py +76 -0
- lionagi/core/work/work_function.py +101 -0
- lionagi/core/work/work_queue.py +103 -0
- lionagi/core/work/worker.py +258 -0
- lionagi/core/work/worklog.py +120 -0
- lionagi/experimental/__init__.py +0 -0
- lionagi/experimental/compressor/__init__.py +0 -0
- lionagi/experimental/compressor/base.py +46 -0
- lionagi/experimental/compressor/llm_compressor.py +247 -0
- lionagi/experimental/compressor/llm_summarizer.py +61 -0
- lionagi/experimental/compressor/util.py +70 -0
- lionagi/experimental/directive/__init__.py +19 -0
- lionagi/experimental/directive/parser/__init__.py +0 -0
- lionagi/experimental/directive/parser/base_parser.py +282 -0
- lionagi/experimental/directive/template/__init__.py +0 -0
- lionagi/experimental/directive/template/base_template.py +79 -0
- lionagi/experimental/directive/template/schema.py +36 -0
- lionagi/experimental/directive/tokenizer.py +73 -0
- lionagi/experimental/evaluator/__init__.py +0 -0
- lionagi/experimental/evaluator/ast_evaluator.py +131 -0
- lionagi/experimental/evaluator/base_evaluator.py +218 -0
- lionagi/experimental/knowledge/__init__.py +0 -0
- lionagi/experimental/knowledge/base.py +10 -0
- lionagi/experimental/knowledge/graph.py +0 -0
- lionagi/experimental/memory/__init__.py +0 -0
- lionagi/experimental/strategies/__init__.py +0 -0
- lionagi/experimental/strategies/base.py +1 -0
- lionagi/integrations/bridge/autogen_/__init__.py +0 -0
- lionagi/integrations/bridge/autogen_/autogen_.py +124 -0
- lionagi/integrations/bridge/langchain_/documents.py +4 -0
- lionagi/integrations/bridge/llamaindex_/index.py +30 -0
- lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +6 -0
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +227 -0
- lionagi/integrations/bridge/llamaindex_/node_parser.py +6 -9
- lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +1 -0
- lionagi/integrations/bridge/transformers_/__init__.py +0 -0
- lionagi/integrations/bridge/transformers_/install_.py +36 -0
- lionagi/integrations/chunker/__init__.py +0 -0
- lionagi/integrations/chunker/chunk.py +312 -0
- lionagi/integrations/config/oai_configs.py +38 -7
- lionagi/integrations/config/ollama_configs.py +1 -1
- lionagi/integrations/config/openrouter_configs.py +14 -2
- lionagi/integrations/loader/__init__.py +0 -0
- lionagi/integrations/loader/load.py +253 -0
- lionagi/integrations/loader/load_util.py +195 -0
- lionagi/integrations/provider/_mapping.py +46 -0
- lionagi/integrations/provider/litellm.py +2 -1
- lionagi/integrations/provider/mlx_service.py +16 -9
- lionagi/integrations/provider/oai.py +91 -4
- lionagi/integrations/provider/ollama.py +7 -6
- lionagi/integrations/provider/openrouter.py +115 -8
- lionagi/integrations/provider/services.py +2 -2
- lionagi/integrations/provider/transformers.py +18 -22
- lionagi/integrations/storage/__init__.py +3 -0
- lionagi/integrations/storage/neo4j.py +665 -0
- lionagi/integrations/storage/storage_util.py +287 -0
- lionagi/integrations/storage/structure_excel.py +285 -0
- lionagi/integrations/storage/to_csv.py +63 -0
- lionagi/integrations/storage/to_excel.py +83 -0
- lionagi/libs/__init__.py +26 -1
- lionagi/libs/ln_api.py +78 -23
- lionagi/libs/ln_context.py +37 -0
- lionagi/libs/ln_convert.py +21 -9
- lionagi/libs/ln_func_call.py +69 -28
- lionagi/libs/ln_image.py +107 -0
- lionagi/libs/ln_knowledge_graph.py +405 -0
- lionagi/libs/ln_nested.py +26 -11
- lionagi/libs/ln_parse.py +110 -14
- lionagi/libs/ln_queue.py +117 -0
- lionagi/libs/ln_tokenize.py +164 -0
- lionagi/{core/prompt/field_validator.py → libs/ln_validate.py} +79 -14
- lionagi/libs/special_tokens.py +172 -0
- lionagi/libs/sys_util.py +107 -2
- lionagi/lions/__init__.py +0 -0
- lionagi/lions/coder/__init__.py +0 -0
- lionagi/lions/coder/add_feature.py +20 -0
- lionagi/lions/coder/base_prompts.py +22 -0
- lionagi/lions/coder/code_form.py +13 -0
- lionagi/lions/coder/coder.py +168 -0
- lionagi/lions/coder/util.py +96 -0
- lionagi/lions/researcher/__init__.py +0 -0
- lionagi/lions/researcher/data_source/__init__.py +0 -0
- lionagi/lions/researcher/data_source/finhub_.py +191 -0
- lionagi/lions/researcher/data_source/google_.py +199 -0
- lionagi/lions/researcher/data_source/wiki_.py +96 -0
- lionagi/lions/researcher/data_source/yfinance_.py +21 -0
- lionagi/tests/integrations/__init__.py +0 -0
- lionagi/tests/libs/__init__.py +0 -0
- lionagi/tests/libs/test_field_validators.py +353 -0
- lionagi/tests/{test_libs → libs}/test_func_call.py +23 -21
- lionagi/tests/{test_libs → libs}/test_nested.py +36 -21
- lionagi/tests/{test_libs → libs}/test_parse.py +1 -1
- lionagi/tests/libs/test_queue.py +67 -0
- lionagi/tests/test_core/collections/__init__.py +0 -0
- lionagi/tests/test_core/collections/test_component.py +206 -0
- lionagi/tests/test_core/collections/test_exchange.py +138 -0
- lionagi/tests/test_core/collections/test_flow.py +145 -0
- lionagi/tests/test_core/collections/test_pile.py +171 -0
- lionagi/tests/test_core/collections/test_progression.py +129 -0
- lionagi/tests/test_core/generic/__init__.py +0 -0
- lionagi/tests/test_core/generic/test_edge.py +67 -0
- lionagi/tests/test_core/generic/test_graph.py +96 -0
- lionagi/tests/test_core/generic/test_node.py +106 -0
- lionagi/tests/test_core/generic/test_tree_node.py +73 -0
- lionagi/tests/test_core/test_branch.py +115 -292
- lionagi/tests/test_core/test_form.py +46 -0
- lionagi/tests/test_core/test_report.py +105 -0
- lionagi/tests/test_core/test_validator.py +111 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/LICENSE +12 -11
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/METADATA +19 -118
- lionagi-0.2.1.dist-info/RECORD +240 -0
- lionagi/core/branch/__init__.py +0 -4
- lionagi/core/branch/base_branch.py +0 -654
- lionagi/core/branch/branch.py +0 -471
- lionagi/core/branch/branch_flow_mixin.py +0 -96
- lionagi/core/branch/executable_branch.py +0 -347
- lionagi/core/branch/util.py +0 -323
- lionagi/core/direct/__init__.py +0 -6
- lionagi/core/direct/predict.py +0 -161
- lionagi/core/direct/score.py +0 -278
- lionagi/core/direct/select.py +0 -169
- lionagi/core/direct/utils.py +0 -87
- lionagi/core/direct/vote.py +0 -64
- lionagi/core/flow/base/baseflow.py +0 -23
- lionagi/core/flow/monoflow/ReAct.py +0 -238
- lionagi/core/flow/monoflow/__init__.py +0 -9
- lionagi/core/flow/monoflow/chat.py +0 -95
- lionagi/core/flow/monoflow/chat_mixin.py +0 -263
- lionagi/core/flow/monoflow/followup.py +0 -214
- lionagi/core/flow/polyflow/__init__.py +0 -1
- lionagi/core/flow/polyflow/chat.py +0 -248
- lionagi/core/mail/schema.py +0 -56
- lionagi/core/messages/__init__.py +0 -3
- lionagi/core/messages/schema.py +0 -533
- lionagi/core/prompt/prompt_template.py +0 -316
- lionagi/core/schema/__init__.py +0 -22
- lionagi/core/schema/action_node.py +0 -29
- lionagi/core/schema/base_mixin.py +0 -296
- lionagi/core/schema/base_node.py +0 -199
- lionagi/core/schema/condition.py +0 -24
- lionagi/core/schema/data_logger.py +0 -354
- lionagi/core/schema/data_node.py +0 -93
- lionagi/core/schema/prompt_template.py +0 -67
- lionagi/core/schema/structure.py +0 -910
- lionagi/core/tool/__init__.py +0 -3
- lionagi/core/tool/tool_manager.py +0 -280
- lionagi/integrations/bridge/pydantic_/base_model.py +0 -7
- lionagi/tests/test_core/test_base_branch.py +0 -427
- lionagi/tests/test_core/test_chat_flow.py +0 -63
- lionagi/tests/test_core/test_mail_manager.py +0 -75
- lionagi/tests/test_core/test_prompts.py +0 -51
- lionagi/tests/test_core/test_session.py +0 -254
- lionagi/tests/test_core/test_session_base_util.py +0 -312
- lionagi/tests/test_core/test_tool_manager.py +0 -95
- lionagi-0.0.312.dist-info/RECORD +0 -111
- /lionagi/core/{branch/base → _setting}/__init__.py +0 -0
- /lionagi/core/{flow → agent/eval}/__init__.py +0 -0
- /lionagi/core/{flow/base → agent/learn}/__init__.py +0 -0
- /lionagi/core/{prompt → agent/plan}/__init__.py +0 -0
- /lionagi/core/{tool/manual.py → agent/plan/plan.py} +0 -0
- /lionagi/{tests/test_integrations → core/director}/__init__.py +0 -0
- /lionagi/{tests/test_libs → core/engine}/__init__.py +0 -0
- /lionagi/{tests/test_libs/test_async.py → core/executor/__init__.py} +0 -0
- /lionagi/tests/{test_libs → libs}/test_api.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_convert.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_sys_util.py +0 -0
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/WHEEL +0 -0
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/top_level.txt +0 -0
@@ -1,214 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the MonoFollowup class for performing followup chats with an LLM.
|
3
|
-
|
4
|
-
The MonoFollowup class allows for conducting a series of followup chats with an LLM, with the ability to
|
5
|
-
process instructions, system messages, and invoke tools during the conversation. It extends the MonoChat class.
|
6
|
-
"""
|
7
|
-
|
8
|
-
from typing import Callable
|
9
|
-
from lionagi.core.messages import Instruction
|
10
|
-
from lionagi.core.schema import Tool
|
11
|
-
from .chat import MonoChat
|
12
|
-
|
13
|
-
|
14
|
-
class MonoFollowup(MonoChat):
|
15
|
-
"""
|
16
|
-
A class for performing followup chats with an LLM, processing instructions and system messages,
|
17
|
-
and optionally invoking tools.
|
18
|
-
|
19
|
-
Attributes:
|
20
|
-
FOLLOWUP_PROMPT (str): The default prompt for followup chats.
|
21
|
-
OUTPUT_PROMPT (str): The default prompt for presenting the final output to the user.
|
22
|
-
|
23
|
-
Methods:
|
24
|
-
async followup(self, instruction, context=None, sender=None, system=None, tools=None,
|
25
|
-
max_followup=1, followup_prompt=None, output_prompt=None, **kwargs):
|
26
|
-
Performs a series of followup chats with an LLM, processing instructions and system messages,
|
27
|
-
and optionally invoking tools.
|
28
|
-
|
29
|
-
_get_prompt(prompt=None, default=None, num_followup=None, instruction=None) -> str:
|
30
|
-
Retrieves the appropriate prompt for the followup chat based on the provided parameters.
|
31
|
-
|
32
|
-
_create_followup_config(self, tools, tool_choice="auto", **kwargs) -> dict:
|
33
|
-
Creates the configuration for the followup chat based on the provided tools and parameters.
|
34
|
-
|
35
|
-
async _followup(self, instruction, context=None, sender=None, system=None, tools=None,
|
36
|
-
max_followup=1, auto=False, followup_prompt=None, output_prompt=None,
|
37
|
-
out=True, **kwargs) -> str:
|
38
|
-
Performs the actual followup chats with the LLM, processing instructions and system messages,
|
39
|
-
and optionally invoking tools.
|
40
|
-
"""
|
41
|
-
|
42
|
-
FOLLOWUP_PROMPT = """
|
43
|
-
In the current task, you are allowed a maximum of another {num_followup} followup chats.
|
44
|
-
If further actions are needed, invoke tool usage.
|
45
|
-
If you are done, present the final result to the user without further tool usage.
|
46
|
-
"""
|
47
|
-
|
48
|
-
OUTPUT_PROMPT = "Notice: Present the final output to the user. Original user instruction: {instruction}"
|
49
|
-
|
50
|
-
async def followup(
|
51
|
-
self,
|
52
|
-
instruction: Instruction | str | dict[str, dict | str],
|
53
|
-
context=None,
|
54
|
-
sender=None,
|
55
|
-
system=None,
|
56
|
-
tools=None,
|
57
|
-
max_followup: int = 1,
|
58
|
-
followup_prompt=None,
|
59
|
-
output_prompt=None,
|
60
|
-
**kwargs,
|
61
|
-
):
|
62
|
-
"""
|
63
|
-
Performs a series of followup chats with an LLM, processing instructions and system messages,
|
64
|
-
and optionally invoking tools.
|
65
|
-
|
66
|
-
Args:
|
67
|
-
instruction (Instruction | str | dict[str, dict | str]): The instruction for the followup chat.
|
68
|
-
context (Optional[Any]): Additional context for the followup chat.
|
69
|
-
sender (Optional[str]): The sender of the followup chat message.
|
70
|
-
system (Optional[Any]): System message to be processed during the followup chat.
|
71
|
-
tools (Optional[Any]): Specifies tools to be invoked during the followup chat.
|
72
|
-
max_followup (int): The maximum number of followup chats allowed (default: 1).
|
73
|
-
followup_prompt (Optional[str]): The prompt to use for followup chats.
|
74
|
-
output_prompt (Optional[str]): The prompt to use for presenting the final output to the user.
|
75
|
-
**kwargs: Additional keyword arguments for the followup chat.
|
76
|
-
|
77
|
-
Returns:
|
78
|
-
str: The result of the followup chat.
|
79
|
-
"""
|
80
|
-
return await self._followup(
|
81
|
-
instruction=instruction,
|
82
|
-
context=context,
|
83
|
-
sender=sender,
|
84
|
-
system=system,
|
85
|
-
tools=tools,
|
86
|
-
max_followup=max_followup,
|
87
|
-
followup_prompt=followup_prompt,
|
88
|
-
output_prompt=output_prompt,
|
89
|
-
**kwargs,
|
90
|
-
)
|
91
|
-
|
92
|
-
@staticmethod
|
93
|
-
def _get_prompt(prompt=None, default=None, num_followup=None, instruction=None):
|
94
|
-
"""
|
95
|
-
Retrieves the appropriate prompt for the followup chat based on the provided parameters.
|
96
|
-
|
97
|
-
Args:
|
98
|
-
prompt (Optional[str]): The prompt to use for the followup chat.
|
99
|
-
default (Optional[str]): The default prompt to use if no specific prompt is provided.
|
100
|
-
num_followup (Optional[int]): The number of remaining followup chats.
|
101
|
-
instruction (Optional[Any]): The original user instruction.
|
102
|
-
|
103
|
-
Returns:
|
104
|
-
str: The appropriate prompt for the followup chat.
|
105
|
-
"""
|
106
|
-
if prompt is not None:
|
107
|
-
return prompt
|
108
|
-
|
109
|
-
try:
|
110
|
-
if num_followup is not None:
|
111
|
-
return default.format(num_followup=num_followup)
|
112
|
-
elif instruction is not None:
|
113
|
-
return default.format(instruction=instruction)
|
114
|
-
except (KeyError, ValueError):
|
115
|
-
pass
|
116
|
-
|
117
|
-
return default
|
118
|
-
|
119
|
-
def _create_followup_config(self, tools, tool_choice="auto", **kwargs):
|
120
|
-
"""
|
121
|
-
Creates the configuration for the followup chat based on the provided tools and parameters.
|
122
|
-
|
123
|
-
Args:
|
124
|
-
tools (Optional[Any]): Specifies tools to be invoked during the followup chat.
|
125
|
-
tool_choice (str): The choice of tools to use (default: "auto").
|
126
|
-
**kwargs: Additional keyword arguments for the followup chat configuration.
|
127
|
-
|
128
|
-
Returns:
|
129
|
-
dict: The configuration for the followup chat.
|
130
|
-
|
131
|
-
Raises:
|
132
|
-
ValueError: If no tools are found and registered.
|
133
|
-
"""
|
134
|
-
if tools and isinstance(tools, list) and isinstance(tools[0], (Callable, Tool)):
|
135
|
-
self.branch.tool_manager.register_tools(tools)
|
136
|
-
|
137
|
-
if not self.branch.tool_manager.has_tools:
|
138
|
-
raise ValueError("No tools found. You need to register tools.")
|
139
|
-
|
140
|
-
config = self.branch.tool_manager.parse_tool(tools=True, **kwargs)
|
141
|
-
config["tool_parsed"] = True
|
142
|
-
config["tool_choice"] = tool_choice
|
143
|
-
return config
|
144
|
-
|
145
|
-
async def _followup(
|
146
|
-
self,
|
147
|
-
instruction: Instruction | str | dict[str, dict | str],
|
148
|
-
context=None,
|
149
|
-
sender=None,
|
150
|
-
system=None,
|
151
|
-
tools=None,
|
152
|
-
max_followup: int = 1,
|
153
|
-
auto=False,
|
154
|
-
followup_prompt=None,
|
155
|
-
output_prompt=None,
|
156
|
-
out=True,
|
157
|
-
**kwargs,
|
158
|
-
) -> None:
|
159
|
-
"""
|
160
|
-
Performs the actual followup chats with the LLM, processing instructions and system messages,
|
161
|
-
and optionally invoking tools.
|
162
|
-
|
163
|
-
Args:
|
164
|
-
instruction (Instruction | str | dict[str, dict | str]): The instruction for the followup chat.
|
165
|
-
context (Optional[Any]): Additional context for the followup chat.
|
166
|
-
sender (Optional[str]): The sender of the followup chat message.
|
167
|
-
system (Optional[Any]): System message to be processed during the followup chat.
|
168
|
-
tools (Optional[Any]): Specifies tools to be invoked during the followup chat.
|
169
|
-
max_followup (int): The maximum number of followup chats allowed (default: 1).
|
170
|
-
auto (bool): Flag indicating whether to automatically determine if the chat is finished (default: False).
|
171
|
-
followup_prompt (Optional[str]): The prompt to use for followup chats.
|
172
|
-
output_prompt (Optional[str]): The prompt to use for presenting the final output to the user.
|
173
|
-
out (bool): Flag indicating whether to return the output of the followup chat (default: True).
|
174
|
-
**kwargs: Additional keyword arguments for the followup chat.
|
175
|
-
|
176
|
-
Returns:
|
177
|
-
Optional[str]: The result of the followup chat, if `out` is True.
|
178
|
-
"""
|
179
|
-
config = self._create_followup_config(tools, **kwargs)
|
180
|
-
|
181
|
-
i = 0
|
182
|
-
_out = ""
|
183
|
-
|
184
|
-
while i < max_followup:
|
185
|
-
_prompt = self._get_prompt(
|
186
|
-
prompt=followup_prompt,
|
187
|
-
default=self.FOLLOWUP_PROMPT,
|
188
|
-
num_followup=max_followup - i,
|
189
|
-
)
|
190
|
-
|
191
|
-
if i == 0:
|
192
|
-
_prompt = {"NOTICE": _prompt, "TASK": instruction}
|
193
|
-
_out = await self.chat(
|
194
|
-
_prompt, context=context, sender=sender, system=system, **config
|
195
|
-
)
|
196
|
-
else:
|
197
|
-
_out = await self.chat(_prompt, sender=sender, **config)
|
198
|
-
|
199
|
-
if auto and not self.branch._is_invoked():
|
200
|
-
return _out if out else None
|
201
|
-
|
202
|
-
i += 1
|
203
|
-
|
204
|
-
if auto:
|
205
|
-
if not self.branch._is_invoked():
|
206
|
-
return _out if out else None
|
207
|
-
|
208
|
-
_prompt = self._get_prompt(
|
209
|
-
prompt=output_prompt,
|
210
|
-
default=self.OUTPUT_PROMPT,
|
211
|
-
instruction=instruction,
|
212
|
-
)
|
213
|
-
_out = await self.chat(_prompt, sender=sender, **kwargs)
|
214
|
-
return _out if out else None
|
@@ -1 +0,0 @@
|
|
1
|
-
from .chat import PolyChat
|
@@ -1,248 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the PolyChat class for performing parallel chat conversations with multiple branches.
|
3
|
-
|
4
|
-
The PolyChat class allows for conducting parallel chat conversations with multiple branches, each processing
|
5
|
-
instructions and context independently. It provides methods for parallel chat execution and manages the
|
6
|
-
created branches within the session.
|
7
|
-
"""
|
8
|
-
|
9
|
-
from typing import Any
|
10
|
-
|
11
|
-
from lionagi.libs import ln_convert as convert
|
12
|
-
from lionagi.libs.ln_async import AsyncUtil
|
13
|
-
|
14
|
-
from lionagi.core.messages.schema import Instruction
|
15
|
-
from lionagi.core.branch.branch import Branch
|
16
|
-
|
17
|
-
from lionagi.core.flow.base.baseflow import BasePolyFlow
|
18
|
-
|
19
|
-
|
20
|
-
class PolyChat(BasePolyFlow):
|
21
|
-
"""
|
22
|
-
A class for performing parallel chat conversations with multiple branches.
|
23
|
-
|
24
|
-
Methods:
|
25
|
-
__init__(self, session) -> None:
|
26
|
-
Initializes the PolyChat instance.
|
27
|
-
|
28
|
-
async parallel_chat(self, instruction, num_instances=1, context=None, sender=None, branch_system=None,
|
29
|
-
messages=None, tools=False, out=True, invoke=True, output_fields=None,
|
30
|
-
persist_path=None, branch_config=None, explode=False, **kwargs) -> Any:
|
31
|
-
Performs parallel chat conversations with multiple branches.
|
32
|
-
|
33
|
-
async _parallel_chat(self, instruction, num_instances=1, context=None, sender=None, messages=None,
|
34
|
-
tools=False, out=True, invoke=True, output_fields=None, persist_path=None,
|
35
|
-
branch_config={}, explode=False, include_mapping=True, default_key="response",
|
36
|
-
**kwargs) -> Any:
|
37
|
-
Internal method for performing parallel chat conversations with multiple branches.
|
38
|
-
"""
|
39
|
-
|
40
|
-
def __init__(self, session) -> None:
|
41
|
-
"""
|
42
|
-
Initializes the PolyChat instance.
|
43
|
-
|
44
|
-
Args:
|
45
|
-
session: The session object.
|
46
|
-
"""
|
47
|
-
super().__init__(session)
|
48
|
-
|
49
|
-
async def parallel_chat(
|
50
|
-
self,
|
51
|
-
instruction: Instruction | str,
|
52
|
-
num_instances=1,
|
53
|
-
context=None,
|
54
|
-
sender=None,
|
55
|
-
branch_system=None,
|
56
|
-
messages=None,
|
57
|
-
tools=False,
|
58
|
-
out=True,
|
59
|
-
invoke: bool = True,
|
60
|
-
output_fields=None,
|
61
|
-
persist_path=None,
|
62
|
-
branch_config=None,
|
63
|
-
explode=False,
|
64
|
-
**kwargs,
|
65
|
-
) -> Any:
|
66
|
-
"""
|
67
|
-
Performs parallel chat conversations with multiple branches.
|
68
|
-
|
69
|
-
Args:
|
70
|
-
instruction (Instruction | str): The instruction for the chat conversation.
|
71
|
-
num_instances (int): The number of branch instances to create (default: 1).
|
72
|
-
context (Optional[Any]): Additional context for the chat conversation.
|
73
|
-
sender (Optional[str]): The sender of the chat message.
|
74
|
-
branch_system (Optional[Any]): The system configuration for the branches.
|
75
|
-
messages (Optional[Any]): Messages to initialize the branches with.
|
76
|
-
tools (bool): Flag indicating whether to use tools in the chat conversation (default: False).
|
77
|
-
out (bool): Flag indicating whether to return the output of the chat conversation (default: True).
|
78
|
-
invoke (bool): Flag indicating whether to invoke tools during the chat conversation (default: True).
|
79
|
-
output_fields (Optional[Any]): The output fields for the chat conversation.
|
80
|
-
persist_path (Optional[str]): The path to persist the branch data.
|
81
|
-
branch_config (Optional[dict]): Additional configuration for the branches.
|
82
|
-
explode (bool): Flag indicating whether to explode the instruction and context combinations (default: False).
|
83
|
-
**kwargs: Additional keyword arguments for the chat conversation.
|
84
|
-
|
85
|
-
Returns:
|
86
|
-
Any: The result of the parallel chat conversation.
|
87
|
-
"""
|
88
|
-
if branch_config is None:
|
89
|
-
branch_config = {}
|
90
|
-
return await self._parallel_chat(
|
91
|
-
instruction,
|
92
|
-
num_instances=num_instances,
|
93
|
-
context=context,
|
94
|
-
sender=sender,
|
95
|
-
branch_system=branch_system,
|
96
|
-
messages=messages,
|
97
|
-
tools=tools,
|
98
|
-
out=out,
|
99
|
-
invoke=invoke,
|
100
|
-
output_fields=output_fields,
|
101
|
-
persist_path=persist_path,
|
102
|
-
branch_config=branch_config,
|
103
|
-
explode=explode,
|
104
|
-
**kwargs,
|
105
|
-
)
|
106
|
-
|
107
|
-
async def _parallel_chat(
|
108
|
-
self,
|
109
|
-
instruction: Instruction | str,
|
110
|
-
num_instances=1,
|
111
|
-
context=None,
|
112
|
-
sender=None,
|
113
|
-
messages=None,
|
114
|
-
tools=False,
|
115
|
-
out=True,
|
116
|
-
invoke: bool = True,
|
117
|
-
output_fields=None,
|
118
|
-
persist_path=None,
|
119
|
-
branch_config={},
|
120
|
-
explode=False,
|
121
|
-
include_mapping=True,
|
122
|
-
default_key="response",
|
123
|
-
**kwargs,
|
124
|
-
) -> Any:
|
125
|
-
"""
|
126
|
-
Internal method for performing parallel chat conversations with multiple branches.
|
127
|
-
|
128
|
-
Args:
|
129
|
-
instruction (Instruction | str): The instruction for the chat conversation.
|
130
|
-
num_instances (int): The number of branch instances to create (default: 1).
|
131
|
-
context (Optional[Any]): Additional context for the chat conversation.
|
132
|
-
sender (Optional[str]): The sender of the chat message.
|
133
|
-
messages (Optional[Any]): Messages to initialize the branches with.
|
134
|
-
tools (bool): Flag indicating whether to use tools in the chat conversation (default: False).
|
135
|
-
out (bool): Flag indicating whether to return the output of the chat conversation (default: True).
|
136
|
-
invoke (bool): Flag indicating whether to invoke tools during the chat conversation (default: True).
|
137
|
-
output_fields (Optional[Any]): The output fields for the chat conversation.
|
138
|
-
persist_path (Optional[str]): The path to persist the branch data.
|
139
|
-
branch_config (dict): Additional configuration for the branches (default: {}).
|
140
|
-
explode (bool): Flag indicating whether to explode the instruction and context combinations (default: False).
|
141
|
-
include_mapping (bool): Flag indicating whether to include mapping information in the output (default: True).
|
142
|
-
default_key (str): The default key for the output mapping (default: "response").
|
143
|
-
**kwargs: Additional keyword arguments for the chat conversation.
|
144
|
-
|
145
|
-
Returns:
|
146
|
-
Any: The result of the parallel chat conversation.
|
147
|
-
"""
|
148
|
-
|
149
|
-
branches = {}
|
150
|
-
|
151
|
-
async def _inner(i, ins_, cxt_):
|
152
|
-
|
153
|
-
branch_ = Branch(
|
154
|
-
messages=messages,
|
155
|
-
service=self.session.default_branch.service,
|
156
|
-
llmconfig=self.session.default_branch.llmconfig,
|
157
|
-
persist_path=persist_path,
|
158
|
-
**branch_config,
|
159
|
-
)
|
160
|
-
|
161
|
-
branch_.branch_name = branch_.id_
|
162
|
-
|
163
|
-
if tools:
|
164
|
-
branch_.tool_manager = self.session.default_branch.tool_manager
|
165
|
-
|
166
|
-
res_ = await branch_.chat(
|
167
|
-
instruction=ins_ or instruction,
|
168
|
-
context=cxt_ or context,
|
169
|
-
sender=sender,
|
170
|
-
tools=tools,
|
171
|
-
invoke=invoke,
|
172
|
-
out=out,
|
173
|
-
output_fields=output_fields,
|
174
|
-
**kwargs,
|
175
|
-
)
|
176
|
-
|
177
|
-
branches[branch_.id_] = branch_
|
178
|
-
if include_mapping:
|
179
|
-
return {
|
180
|
-
"instruction": ins_ or instruction,
|
181
|
-
"context": cxt_ or context,
|
182
|
-
"branch_id": branch_.id_,
|
183
|
-
default_key: res_,
|
184
|
-
}
|
185
|
-
|
186
|
-
else:
|
187
|
-
return res_
|
188
|
-
|
189
|
-
async def _inner_2(i, ins_=None, cxt_=None):
|
190
|
-
"""returns num_instances of branches performing for same task/context"""
|
191
|
-
tasks = [_inner(i, ins_, cxt_) for _ in range(num_instances)]
|
192
|
-
ress = await AsyncUtil.execute_tasks(*tasks)
|
193
|
-
return convert.to_list(ress)
|
194
|
-
|
195
|
-
async def _inner_3(i):
|
196
|
-
"""different instructions but same context"""
|
197
|
-
tasks = [_inner_2(i, ins_=ins_) for ins_ in convert.to_list(instruction)]
|
198
|
-
ress = await AsyncUtil.execute_tasks(*tasks)
|
199
|
-
return convert.to_list(ress)
|
200
|
-
|
201
|
-
async def _inner_3_b(i):
|
202
|
-
"""different context but same instruction"""
|
203
|
-
tasks = [_inner_2(i, cxt_=cxt_) for cxt_ in convert.to_list(context)]
|
204
|
-
ress = await AsyncUtil.execute_tasks(*tasks)
|
205
|
-
return convert.to_list(ress)
|
206
|
-
|
207
|
-
async def _inner_4(i):
|
208
|
-
"""different instructions and different context"""
|
209
|
-
|
210
|
-
tasks = []
|
211
|
-
if explode:
|
212
|
-
tasks = [
|
213
|
-
_inner_2(i, ins_=ins_, cxt_=cxt_)
|
214
|
-
for ins_ in convert.to_list(instruction)
|
215
|
-
for cxt_ in convert.to_list(context)
|
216
|
-
]
|
217
|
-
else:
|
218
|
-
tasks = [
|
219
|
-
_inner_2(i, ins_=ins_, cxt_=cxt_)
|
220
|
-
for ins_, cxt_ in zip(
|
221
|
-
convert.to_list(instruction), convert.to_list(context)
|
222
|
-
)
|
223
|
-
]
|
224
|
-
|
225
|
-
ress = await AsyncUtil.execute_tasks(*tasks)
|
226
|
-
return convert.to_list(ress)
|
227
|
-
|
228
|
-
if len(convert.to_list(instruction)) == 1:
|
229
|
-
if len(convert.to_list(context)) == 1:
|
230
|
-
out_ = await _inner_2(0)
|
231
|
-
self.session.branches.update(branches)
|
232
|
-
return out_
|
233
|
-
|
234
|
-
elif len(convert.to_list(context)) > 1:
|
235
|
-
out_ = await _inner_3_b(0)
|
236
|
-
self.session.branches.update(branches)
|
237
|
-
return out_
|
238
|
-
|
239
|
-
elif len(convert.to_list(instruction)) > 1:
|
240
|
-
if len(convert.to_list(context)) == 1:
|
241
|
-
out_ = await _inner_3(0)
|
242
|
-
self.session.branches.update(branches)
|
243
|
-
return out_
|
244
|
-
|
245
|
-
elif len(convert.to_list(context)) > 1:
|
246
|
-
out_ = await _inner_4(0)
|
247
|
-
self.session.branches.update(branches)
|
248
|
-
return out_
|
lionagi/core/mail/schema.py
DELETED
@@ -1,56 +0,0 @@
|
|
1
|
-
from collections import deque
|
2
|
-
from enum import Enum
|
3
|
-
|
4
|
-
from lionagi.core.schema.base_node import BaseRelatableNode
|
5
|
-
|
6
|
-
|
7
|
-
class MailCategory(str, Enum):
|
8
|
-
MESSAGES = "messages"
|
9
|
-
TOOL = "tool"
|
10
|
-
SERVICE = "service"
|
11
|
-
MODEL = "model"
|
12
|
-
NODE = "node"
|
13
|
-
NODE_LIST = "node_list"
|
14
|
-
NODE_ID = "node_id"
|
15
|
-
START = "start"
|
16
|
-
END = "end"
|
17
|
-
CONDITION = "condition"
|
18
|
-
|
19
|
-
|
20
|
-
class BaseMail:
|
21
|
-
|
22
|
-
def __init__(self, sender_id, recipient_id, category, package):
|
23
|
-
self.sender_id = sender_id
|
24
|
-
self.recipient_id = recipient_id
|
25
|
-
try:
|
26
|
-
if isinstance(category, str):
|
27
|
-
category = MailCategory(category)
|
28
|
-
if isinstance(category, MailCategory):
|
29
|
-
self.category = category
|
30
|
-
else:
|
31
|
-
raise ValueError(
|
32
|
-
f"Invalid request title. Valid titles are" f" {list(MailCategory)}"
|
33
|
-
)
|
34
|
-
except Exception as e:
|
35
|
-
raise ValueError(
|
36
|
-
f"Invalid request title. Valid titles are "
|
37
|
-
f"{list(MailCategory)}, Error: {e}"
|
38
|
-
) from e
|
39
|
-
self.package = package
|
40
|
-
|
41
|
-
|
42
|
-
class StartMail(BaseRelatableNode):
|
43
|
-
|
44
|
-
def __init__(self, **kwargs):
|
45
|
-
super().__init__(**kwargs)
|
46
|
-
self.pending_outs = deque()
|
47
|
-
|
48
|
-
def trigger(self, context, structure_id, executable_id):
|
49
|
-
start_mail_content = {"context": context, "structure_id": structure_id}
|
50
|
-
start_mail = BaseMail(
|
51
|
-
sender_id=self.id_,
|
52
|
-
recipient_id=executable_id,
|
53
|
-
category="start",
|
54
|
-
package=start_mail_content,
|
55
|
-
)
|
56
|
-
self.pending_outs.append(start_mail)
|