lionagi 0.0.312__py3-none-any.whl → 0.2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +61 -3
- lionagi/core/__init__.py +0 -14
- lionagi/core/_setting/_setting.py +59 -0
- lionagi/core/action/__init__.py +14 -0
- lionagi/core/action/function_calling.py +136 -0
- lionagi/core/action/manual.py +1 -0
- lionagi/core/action/node.py +109 -0
- lionagi/core/action/tool.py +114 -0
- lionagi/core/action/tool_manager.py +356 -0
- lionagi/core/agent/__init__.py +0 -3
- lionagi/core/agent/base_agent.py +45 -36
- lionagi/core/agent/eval/evaluator.py +1 -0
- lionagi/core/agent/eval/vote.py +40 -0
- lionagi/core/agent/learn/learner.py +59 -0
- lionagi/core/agent/plan/unit_template.py +1 -0
- lionagi/core/collections/__init__.py +17 -0
- lionagi/core/collections/_logger.py +319 -0
- lionagi/core/collections/abc/__init__.py +53 -0
- lionagi/core/collections/abc/component.py +615 -0
- lionagi/core/collections/abc/concepts.py +297 -0
- lionagi/core/collections/abc/exceptions.py +150 -0
- lionagi/core/collections/abc/util.py +45 -0
- lionagi/core/collections/exchange.py +161 -0
- lionagi/core/collections/flow.py +426 -0
- lionagi/core/collections/model.py +419 -0
- lionagi/core/collections/pile.py +913 -0
- lionagi/core/collections/progression.py +236 -0
- lionagi/core/collections/util.py +64 -0
- lionagi/core/director/direct.py +314 -0
- lionagi/core/director/director.py +2 -0
- lionagi/core/engine/branch_engine.py +333 -0
- lionagi/core/engine/instruction_map_engine.py +204 -0
- lionagi/core/engine/sandbox_.py +14 -0
- lionagi/core/engine/script_engine.py +99 -0
- lionagi/core/executor/base_executor.py +90 -0
- lionagi/core/executor/graph_executor.py +330 -0
- lionagi/core/executor/neo4j_executor.py +384 -0
- lionagi/core/generic/__init__.py +7 -0
- lionagi/core/generic/edge.py +112 -0
- lionagi/core/generic/edge_condition.py +16 -0
- lionagi/core/generic/graph.py +236 -0
- lionagi/core/generic/hyperedge.py +1 -0
- lionagi/core/generic/node.py +220 -0
- lionagi/core/generic/tree.py +48 -0
- lionagi/core/generic/tree_node.py +79 -0
- lionagi/core/mail/__init__.py +7 -3
- lionagi/core/mail/mail.py +25 -0
- lionagi/core/mail/mail_manager.py +142 -58
- lionagi/core/mail/package.py +45 -0
- lionagi/core/mail/start_mail.py +36 -0
- lionagi/core/message/__init__.py +19 -0
- lionagi/core/message/action_request.py +133 -0
- lionagi/core/message/action_response.py +135 -0
- lionagi/core/message/assistant_response.py +95 -0
- lionagi/core/message/instruction.py +234 -0
- lionagi/core/message/message.py +101 -0
- lionagi/core/message/system.py +86 -0
- lionagi/core/message/util.py +283 -0
- lionagi/core/report/__init__.py +4 -0
- lionagi/core/report/base.py +217 -0
- lionagi/core/report/form.py +231 -0
- lionagi/core/report/report.py +166 -0
- lionagi/core/report/util.py +28 -0
- lionagi/core/rule/__init__.py +0 -0
- lionagi/core/rule/_default.py +16 -0
- lionagi/core/rule/action.py +99 -0
- lionagi/core/rule/base.py +238 -0
- lionagi/core/rule/boolean.py +56 -0
- lionagi/core/rule/choice.py +47 -0
- lionagi/core/rule/mapping.py +96 -0
- lionagi/core/rule/number.py +71 -0
- lionagi/core/rule/rulebook.py +109 -0
- lionagi/core/rule/string.py +52 -0
- lionagi/core/rule/util.py +35 -0
- lionagi/core/session/__init__.py +0 -3
- lionagi/core/session/branch.py +431 -0
- lionagi/core/session/directive_mixin.py +287 -0
- lionagi/core/session/session.py +230 -902
- lionagi/core/structure/__init__.py +1 -0
- lionagi/core/structure/chain.py +1 -0
- lionagi/core/structure/forest.py +1 -0
- lionagi/core/structure/graph.py +1 -0
- lionagi/core/structure/tree.py +1 -0
- lionagi/core/unit/__init__.py +5 -0
- lionagi/core/unit/parallel_unit.py +245 -0
- lionagi/core/unit/template/__init__.py +0 -0
- lionagi/core/unit/template/action.py +81 -0
- lionagi/core/unit/template/base.py +51 -0
- lionagi/core/unit/template/plan.py +84 -0
- lionagi/core/unit/template/predict.py +109 -0
- lionagi/core/unit/template/score.py +124 -0
- lionagi/core/unit/template/select.py +104 -0
- lionagi/core/unit/unit.py +362 -0
- lionagi/core/unit/unit_form.py +305 -0
- lionagi/core/unit/unit_mixin.py +1168 -0
- lionagi/core/unit/util.py +71 -0
- lionagi/core/validator/__init__.py +0 -0
- lionagi/core/validator/validator.py +364 -0
- lionagi/core/work/__init__.py +0 -0
- lionagi/core/work/work.py +76 -0
- lionagi/core/work/work_function.py +101 -0
- lionagi/core/work/work_queue.py +103 -0
- lionagi/core/work/worker.py +258 -0
- lionagi/core/work/worklog.py +120 -0
- lionagi/experimental/__init__.py +0 -0
- lionagi/experimental/compressor/__init__.py +0 -0
- lionagi/experimental/compressor/base.py +46 -0
- lionagi/experimental/compressor/llm_compressor.py +247 -0
- lionagi/experimental/compressor/llm_summarizer.py +61 -0
- lionagi/experimental/compressor/util.py +70 -0
- lionagi/experimental/directive/__init__.py +19 -0
- lionagi/experimental/directive/parser/__init__.py +0 -0
- lionagi/experimental/directive/parser/base_parser.py +282 -0
- lionagi/experimental/directive/template/__init__.py +0 -0
- lionagi/experimental/directive/template/base_template.py +79 -0
- lionagi/experimental/directive/template/schema.py +36 -0
- lionagi/experimental/directive/tokenizer.py +73 -0
- lionagi/experimental/evaluator/__init__.py +0 -0
- lionagi/experimental/evaluator/ast_evaluator.py +131 -0
- lionagi/experimental/evaluator/base_evaluator.py +218 -0
- lionagi/experimental/knowledge/__init__.py +0 -0
- lionagi/experimental/knowledge/base.py +10 -0
- lionagi/experimental/knowledge/graph.py +0 -0
- lionagi/experimental/memory/__init__.py +0 -0
- lionagi/experimental/strategies/__init__.py +0 -0
- lionagi/experimental/strategies/base.py +1 -0
- lionagi/integrations/bridge/autogen_/__init__.py +0 -0
- lionagi/integrations/bridge/autogen_/autogen_.py +124 -0
- lionagi/integrations/bridge/langchain_/documents.py +4 -0
- lionagi/integrations/bridge/llamaindex_/index.py +30 -0
- lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +6 -0
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +227 -0
- lionagi/integrations/bridge/llamaindex_/node_parser.py +6 -9
- lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +1 -0
- lionagi/integrations/bridge/transformers_/__init__.py +0 -0
- lionagi/integrations/bridge/transformers_/install_.py +36 -0
- lionagi/integrations/chunker/__init__.py +0 -0
- lionagi/integrations/chunker/chunk.py +312 -0
- lionagi/integrations/config/oai_configs.py +38 -7
- lionagi/integrations/config/ollama_configs.py +1 -1
- lionagi/integrations/config/openrouter_configs.py +14 -2
- lionagi/integrations/loader/__init__.py +0 -0
- lionagi/integrations/loader/load.py +253 -0
- lionagi/integrations/loader/load_util.py +195 -0
- lionagi/integrations/provider/_mapping.py +46 -0
- lionagi/integrations/provider/litellm.py +2 -1
- lionagi/integrations/provider/mlx_service.py +16 -9
- lionagi/integrations/provider/oai.py +91 -4
- lionagi/integrations/provider/ollama.py +7 -6
- lionagi/integrations/provider/openrouter.py +115 -8
- lionagi/integrations/provider/services.py +2 -2
- lionagi/integrations/provider/transformers.py +18 -22
- lionagi/integrations/storage/__init__.py +3 -0
- lionagi/integrations/storage/neo4j.py +665 -0
- lionagi/integrations/storage/storage_util.py +287 -0
- lionagi/integrations/storage/structure_excel.py +285 -0
- lionagi/integrations/storage/to_csv.py +63 -0
- lionagi/integrations/storage/to_excel.py +83 -0
- lionagi/libs/__init__.py +26 -1
- lionagi/libs/ln_api.py +78 -23
- lionagi/libs/ln_context.py +37 -0
- lionagi/libs/ln_convert.py +21 -9
- lionagi/libs/ln_func_call.py +69 -28
- lionagi/libs/ln_image.py +107 -0
- lionagi/libs/ln_knowledge_graph.py +405 -0
- lionagi/libs/ln_nested.py +26 -11
- lionagi/libs/ln_parse.py +110 -14
- lionagi/libs/ln_queue.py +117 -0
- lionagi/libs/ln_tokenize.py +164 -0
- lionagi/{core/prompt/field_validator.py → libs/ln_validate.py} +79 -14
- lionagi/libs/special_tokens.py +172 -0
- lionagi/libs/sys_util.py +107 -2
- lionagi/lions/__init__.py +0 -0
- lionagi/lions/coder/__init__.py +0 -0
- lionagi/lions/coder/add_feature.py +20 -0
- lionagi/lions/coder/base_prompts.py +22 -0
- lionagi/lions/coder/code_form.py +13 -0
- lionagi/lions/coder/coder.py +168 -0
- lionagi/lions/coder/util.py +96 -0
- lionagi/lions/researcher/__init__.py +0 -0
- lionagi/lions/researcher/data_source/__init__.py +0 -0
- lionagi/lions/researcher/data_source/finhub_.py +191 -0
- lionagi/lions/researcher/data_source/google_.py +199 -0
- lionagi/lions/researcher/data_source/wiki_.py +96 -0
- lionagi/lions/researcher/data_source/yfinance_.py +21 -0
- lionagi/tests/integrations/__init__.py +0 -0
- lionagi/tests/libs/__init__.py +0 -0
- lionagi/tests/libs/test_field_validators.py +353 -0
- lionagi/tests/{test_libs → libs}/test_func_call.py +23 -21
- lionagi/tests/{test_libs → libs}/test_nested.py +36 -21
- lionagi/tests/{test_libs → libs}/test_parse.py +1 -1
- lionagi/tests/libs/test_queue.py +67 -0
- lionagi/tests/test_core/collections/__init__.py +0 -0
- lionagi/tests/test_core/collections/test_component.py +206 -0
- lionagi/tests/test_core/collections/test_exchange.py +138 -0
- lionagi/tests/test_core/collections/test_flow.py +145 -0
- lionagi/tests/test_core/collections/test_pile.py +171 -0
- lionagi/tests/test_core/collections/test_progression.py +129 -0
- lionagi/tests/test_core/generic/__init__.py +0 -0
- lionagi/tests/test_core/generic/test_edge.py +67 -0
- lionagi/tests/test_core/generic/test_graph.py +96 -0
- lionagi/tests/test_core/generic/test_node.py +106 -0
- lionagi/tests/test_core/generic/test_tree_node.py +73 -0
- lionagi/tests/test_core/test_branch.py +115 -292
- lionagi/tests/test_core/test_form.py +46 -0
- lionagi/tests/test_core/test_report.py +105 -0
- lionagi/tests/test_core/test_validator.py +111 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/LICENSE +12 -11
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/METADATA +19 -118
- lionagi-0.2.1.dist-info/RECORD +240 -0
- lionagi/core/branch/__init__.py +0 -4
- lionagi/core/branch/base_branch.py +0 -654
- lionagi/core/branch/branch.py +0 -471
- lionagi/core/branch/branch_flow_mixin.py +0 -96
- lionagi/core/branch/executable_branch.py +0 -347
- lionagi/core/branch/util.py +0 -323
- lionagi/core/direct/__init__.py +0 -6
- lionagi/core/direct/predict.py +0 -161
- lionagi/core/direct/score.py +0 -278
- lionagi/core/direct/select.py +0 -169
- lionagi/core/direct/utils.py +0 -87
- lionagi/core/direct/vote.py +0 -64
- lionagi/core/flow/base/baseflow.py +0 -23
- lionagi/core/flow/monoflow/ReAct.py +0 -238
- lionagi/core/flow/monoflow/__init__.py +0 -9
- lionagi/core/flow/monoflow/chat.py +0 -95
- lionagi/core/flow/monoflow/chat_mixin.py +0 -263
- lionagi/core/flow/monoflow/followup.py +0 -214
- lionagi/core/flow/polyflow/__init__.py +0 -1
- lionagi/core/flow/polyflow/chat.py +0 -248
- lionagi/core/mail/schema.py +0 -56
- lionagi/core/messages/__init__.py +0 -3
- lionagi/core/messages/schema.py +0 -533
- lionagi/core/prompt/prompt_template.py +0 -316
- lionagi/core/schema/__init__.py +0 -22
- lionagi/core/schema/action_node.py +0 -29
- lionagi/core/schema/base_mixin.py +0 -296
- lionagi/core/schema/base_node.py +0 -199
- lionagi/core/schema/condition.py +0 -24
- lionagi/core/schema/data_logger.py +0 -354
- lionagi/core/schema/data_node.py +0 -93
- lionagi/core/schema/prompt_template.py +0 -67
- lionagi/core/schema/structure.py +0 -910
- lionagi/core/tool/__init__.py +0 -3
- lionagi/core/tool/tool_manager.py +0 -280
- lionagi/integrations/bridge/pydantic_/base_model.py +0 -7
- lionagi/tests/test_core/test_base_branch.py +0 -427
- lionagi/tests/test_core/test_chat_flow.py +0 -63
- lionagi/tests/test_core/test_mail_manager.py +0 -75
- lionagi/tests/test_core/test_prompts.py +0 -51
- lionagi/tests/test_core/test_session.py +0 -254
- lionagi/tests/test_core/test_session_base_util.py +0 -312
- lionagi/tests/test_core/test_tool_manager.py +0 -95
- lionagi-0.0.312.dist-info/RECORD +0 -111
- /lionagi/core/{branch/base → _setting}/__init__.py +0 -0
- /lionagi/core/{flow → agent/eval}/__init__.py +0 -0
- /lionagi/core/{flow/base → agent/learn}/__init__.py +0 -0
- /lionagi/core/{prompt → agent/plan}/__init__.py +0 -0
- /lionagi/core/{tool/manual.py → agent/plan/plan.py} +0 -0
- /lionagi/{tests/test_integrations → core/director}/__init__.py +0 -0
- /lionagi/{tests/test_libs → core/engine}/__init__.py +0 -0
- /lionagi/{tests/test_libs/test_async.py → core/executor/__init__.py} +0 -0
- /lionagi/tests/{test_libs → libs}/test_api.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_convert.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_sys_util.py +0 -0
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/WHEEL +0 -0
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/top_level.txt +0 -0
lionagi/core/direct/vote.py
DELETED
@@ -1,64 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the vote function for generating and scoring multiple outputs and selecting the top-ranked ones.
|
3
|
-
|
4
|
-
The vote function generates multiple outputs using a specified directive function (default: predict), scores each output
|
5
|
-
using the score function, and returns the top-ranked output(s) based on the scores. It allows for customization of the
|
6
|
-
number of generations, number of outputs to return, number of scorers, score range, and scorer instruction.
|
7
|
-
"""
|
8
|
-
|
9
|
-
from lionagi.libs import func_call
|
10
|
-
import numpy as np
|
11
|
-
from .predict import predict
|
12
|
-
from .score import score
|
13
|
-
|
14
|
-
|
15
|
-
async def vote(
|
16
|
-
sentence,
|
17
|
-
directive=predict,
|
18
|
-
num_generations=5,
|
19
|
-
num_output=1,
|
20
|
-
num_scorer=5,
|
21
|
-
score_range=(0, 100),
|
22
|
-
num_digit=2,
|
23
|
-
scorer_instruction=None,
|
24
|
-
**kwargs,
|
25
|
-
):
|
26
|
-
"""
|
27
|
-
Generates and scores multiple outputs and returns the top-ranked output(s).
|
28
|
-
|
29
|
-
Args:
|
30
|
-
sentence (str): The input sentence or context.
|
31
|
-
directive (function): The function used to generate outputs (default: predict).
|
32
|
-
num_generations (int): The number of outputs to generate (default: 5).
|
33
|
-
num_output (int): The number of top-ranked outputs to return (default: 1).
|
34
|
-
num_scorer (int): The number of scorers to use for scoring each output (default: 5).
|
35
|
-
score_range (tuple): The range of scores to assign (default: (0, 100)).
|
36
|
-
num_digit (int): The number of digits after the decimal point for scores (default: 2).
|
37
|
-
scorer_instruction (str): The instruction for the scorers (default: None).
|
38
|
-
**kwargs: Additional keyword arguments to pass to the directive function.
|
39
|
-
|
40
|
-
Returns:
|
41
|
-
The top-ranked output if num_output is 1, or a list of top-ranked outputs if num_output is greater than 1.
|
42
|
-
"""
|
43
|
-
|
44
|
-
async def _inner(i):
|
45
|
-
out_ = await directive(sentence, **kwargs)
|
46
|
-
score_ = await score(
|
47
|
-
out_.answer,
|
48
|
-
context=sentence,
|
49
|
-
instruction=scorer_instruction,
|
50
|
-
score_range=score_range,
|
51
|
-
num_digit=num_digit,
|
52
|
-
num_instances=num_scorer,
|
53
|
-
return_template=False,
|
54
|
-
)
|
55
|
-
|
56
|
-
out_.__setattr__("score", score_)
|
57
|
-
return out_
|
58
|
-
|
59
|
-
_outs = await func_call.alcall(list(range(num_generations)), _inner)
|
60
|
-
|
61
|
-
top_index = np.argsort([i.score for i in _outs])[-num_output:]
|
62
|
-
final_output = list(np.array(_outs)[top_index])
|
63
|
-
|
64
|
-
return final_output[0] if len(final_output) == 1 else final_output
|
@@ -1,23 +0,0 @@
|
|
1
|
-
from abc import ABC
|
2
|
-
|
3
|
-
|
4
|
-
class BaseFlow(ABC):
|
5
|
-
|
6
|
-
@classmethod
|
7
|
-
def class_name(cls) -> str:
|
8
|
-
"""
|
9
|
-
Returns the class name of the flow.
|
10
|
-
"""
|
11
|
-
return cls.__name__
|
12
|
-
|
13
|
-
|
14
|
-
class BaseMonoFlow(BaseFlow):
|
15
|
-
|
16
|
-
def __init__(self, branch) -> None:
|
17
|
-
self.branch = branch
|
18
|
-
|
19
|
-
|
20
|
-
class BasePolyFlow(BaseFlow):
|
21
|
-
|
22
|
-
def __init__(self, session) -> None:
|
23
|
-
self.session = session
|
@@ -1,238 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the MonoReAct class for performing reasoning and action tasks with an LLM.
|
3
|
-
|
4
|
-
The MonoReAct class allows for conducting a series of reasoning and action steps with an LLM, with the ability to
|
5
|
-
process instructions, system messages, and invoke tools during the conversation. It extends the MonoChat class.
|
6
|
-
"""
|
7
|
-
|
8
|
-
from typing import Callable
|
9
|
-
from .chat import MonoChat
|
10
|
-
from lionagi.core.schema import Tool
|
11
|
-
from lionagi.core.messages import Instruction
|
12
|
-
|
13
|
-
|
14
|
-
class MonoReAct(MonoChat):
|
15
|
-
"""
|
16
|
-
A class for performing reasoning and action tasks with an LLM, processing instructions and system messages,
|
17
|
-
and optionally invoking tools.
|
18
|
-
|
19
|
-
Attributes:
|
20
|
-
REASON_PROMPT (str): The default prompt for reasoning steps.
|
21
|
-
ACTION_PROMPT (str): The default prompt for action steps.
|
22
|
-
OUTPUT_PROMPT (str): The default prompt for presenting the final output to the user.
|
23
|
-
|
24
|
-
Methods:
|
25
|
-
async ReAct(self, instruction, context=None, sender=None, system=None, tools=None,
|
26
|
-
num_rounds=1, reason_prompt=None, action_prompt=None, output_prompt=None, **kwargs):
|
27
|
-
Performs a series of reasoning and action steps with an LLM, processing instructions and system messages,
|
28
|
-
and optionally invoking tools.
|
29
|
-
|
30
|
-
_get_prompt(prompt=None, default=None, num_steps=None, instruction=None) -> str:
|
31
|
-
Retrieves the appropriate prompt for the reasoning or action step based on the provided parameters.
|
32
|
-
|
33
|
-
_create_followup_config(self, tools, tool_choice="auto", **kwargs) -> dict:
|
34
|
-
Creates the configuration for the followup steps based on the provided tools and parameters.
|
35
|
-
|
36
|
-
async _ReAct(self, instruction, context=None, sender=None, system=None, tools=None,
|
37
|
-
num_rounds=1, auto=False, reason_prompt=None, action_prompt=None,
|
38
|
-
output_prompt=None, out=True, **kwargs):
|
39
|
-
Performs the actual reasoning and action steps with the LLM, processing instructions and system messages,
|
40
|
-
and optionally invoking tools.
|
41
|
-
"""
|
42
|
-
|
43
|
-
REASON_PROMPT = """
|
44
|
-
You have {num_steps} steps left in the current task. If available, integrate previous tool responses.
|
45
|
-
Perform reasoning and prepare an action plan according to available tools only. Apply divide and conquer technique.
|
46
|
-
"""
|
47
|
-
|
48
|
-
ACTION_PROMPT = """
|
49
|
-
You have {num_steps} steps left in the current task. If further actions are needed, invoke tool usage.
|
50
|
-
If you are done, present the final result to the user without further tool usage.
|
51
|
-
"""
|
52
|
-
|
53
|
-
OUTPUT_PROMPT = "Notice: Present the final output to the user. Original user instruction: {instruction}"
|
54
|
-
|
55
|
-
async def ReAct(
|
56
|
-
self,
|
57
|
-
instruction: Instruction | str | dict[str, dict | str],
|
58
|
-
context=None,
|
59
|
-
sender=None,
|
60
|
-
system=None,
|
61
|
-
tools=None,
|
62
|
-
num_rounds: int = 1,
|
63
|
-
reason_prompt=None,
|
64
|
-
action_prompt=None,
|
65
|
-
output_prompt=None,
|
66
|
-
**kwargs,
|
67
|
-
):
|
68
|
-
"""
|
69
|
-
Performs a series of reasoning and action steps with an LLM, processing instructions and system messages,
|
70
|
-
and optionally invoking tools.
|
71
|
-
|
72
|
-
Args:
|
73
|
-
instruction (Instruction | str | dict[str, dict | str]): The instruction for the task.
|
74
|
-
context (Optional[Any]): Additional context for the task.
|
75
|
-
sender (Optional[str]): The sender of the task message.
|
76
|
-
system (Optional[Any]): System message to be processed during the task.
|
77
|
-
tools (Optional[Any]): Specifies tools to be invoked during the task.
|
78
|
-
num_rounds (int): The number of reasoning and action rounds to perform (default: 1).
|
79
|
-
reason_prompt (Optional[str]): The prompt to use for reasoning steps.
|
80
|
-
action_prompt (Optional[str]): The prompt to use for action steps.
|
81
|
-
output_prompt (Optional[str]): The prompt to use for presenting the final output to the user.
|
82
|
-
**kwargs: Additional keyword arguments for the task.
|
83
|
-
|
84
|
-
Returns:
|
85
|
-
The result of the reasoning and action steps.
|
86
|
-
"""
|
87
|
-
return await self._ReAct(
|
88
|
-
instruction,
|
89
|
-
context=context,
|
90
|
-
sender=sender,
|
91
|
-
system=system,
|
92
|
-
tools=tools,
|
93
|
-
num_rounds=num_rounds,
|
94
|
-
reason_prompt=reason_prompt,
|
95
|
-
action_prompt=action_prompt,
|
96
|
-
output_prompt=output_prompt,
|
97
|
-
**kwargs,
|
98
|
-
)
|
99
|
-
|
100
|
-
@staticmethod
|
101
|
-
def _get_prompt(prompt=None, default=None, num_steps=None, instruction=None):
|
102
|
-
"""
|
103
|
-
Retrieves the appropriate prompt for the reasoning or action step based on the provided parameters.
|
104
|
-
|
105
|
-
Args:
|
106
|
-
prompt (Optional[str]): The prompt to use for the step.
|
107
|
-
default (Optional[str]): The default prompt to use if no specific prompt is provided.
|
108
|
-
num_steps (Optional[int]): The number of remaining steps in the task.
|
109
|
-
instruction (Optional[Any]): The original user instruction.
|
110
|
-
|
111
|
-
Returns:
|
112
|
-
str: The appropriate prompt for the reasoning or action step.
|
113
|
-
"""
|
114
|
-
|
115
|
-
if prompt is not None:
|
116
|
-
return prompt
|
117
|
-
|
118
|
-
try:
|
119
|
-
if num_steps is not None:
|
120
|
-
return default.format(num_steps=num_steps)
|
121
|
-
elif instruction is not None:
|
122
|
-
return default.format(instruction=instruction)
|
123
|
-
except (KeyError, ValueError):
|
124
|
-
pass
|
125
|
-
|
126
|
-
return default
|
127
|
-
|
128
|
-
def _create_followup_config(self, tools, tool_choice="auto", **kwargs):
|
129
|
-
"""
|
130
|
-
Creates the configuration for the followup steps based on the provided tools and parameters.
|
131
|
-
|
132
|
-
Args:
|
133
|
-
tools (Optional[Any]): Specifies tools to be invoked during the followup steps.
|
134
|
-
tool_choice (str): The choice of tools to use (default: "auto").
|
135
|
-
**kwargs: Additional keyword arguments for the followup configuration.
|
136
|
-
|
137
|
-
Returns:
|
138
|
-
dict: The configuration for the followup steps.
|
139
|
-
|
140
|
-
Raises:
|
141
|
-
ValueError: If no tools are found and registered.
|
142
|
-
"""
|
143
|
-
if tools and isinstance(tools, list) and isinstance(tools[0], (Callable, Tool)):
|
144
|
-
self.branch.tool_manager.register_tools(tools)
|
145
|
-
|
146
|
-
if not self.branch.tool_manager.has_tools:
|
147
|
-
raise ValueError("No tools found. You need to register tools.")
|
148
|
-
|
149
|
-
config = self.branch.tool_manager.parse_tool(tools=True, **kwargs)
|
150
|
-
config["tool_parsed"] = True
|
151
|
-
config["tool_choice"] = tool_choice
|
152
|
-
return config
|
153
|
-
|
154
|
-
async def _ReAct(
|
155
|
-
self,
|
156
|
-
instruction: Instruction | str | dict[str, dict | str],
|
157
|
-
context=None,
|
158
|
-
sender=None,
|
159
|
-
system=None,
|
160
|
-
tools=None,
|
161
|
-
num_rounds: int = 1,
|
162
|
-
auto=False,
|
163
|
-
reason_prompt=None,
|
164
|
-
action_prompt=None,
|
165
|
-
output_prompt=None,
|
166
|
-
out=True,
|
167
|
-
**kwargs,
|
168
|
-
):
|
169
|
-
"""
|
170
|
-
Performs the actual reasoning and action steps with the LLM, processing instructions and system messages,
|
171
|
-
and optionally invoking tools.
|
172
|
-
|
173
|
-
Args:
|
174
|
-
instruction (Instruction | str | dict[str, dict | str]): The instruction for the task.
|
175
|
-
context (Optional[Any]): Additional context for the task.
|
176
|
-
sender (Optional[str]): The sender of the task message.
|
177
|
-
system (Optional[Any]): System message to be processed during the task.
|
178
|
-
tools (Optional[Any]): Specifies tools to be invoked during the task.
|
179
|
-
num_rounds (int): The number of reasoning and action rounds to perform (default: 1).
|
180
|
-
auto (bool): Flag indicating whether to automatically determine if the task is finished (default: False).
|
181
|
-
reason_prompt (Optional[str]): The prompt to use for reasoning steps.
|
182
|
-
action_prompt (Optional[str]): The prompt to use for action steps.
|
183
|
-
output_prompt (Optional[str]): The prompt to use for presenting the final output to the user.
|
184
|
-
out (bool): Flag indicating whether to return the output of the task (default: True).
|
185
|
-
**kwargs: Additional keyword arguments for the task.
|
186
|
-
|
187
|
-
Returns:
|
188
|
-
The result of the reasoning and action steps.
|
189
|
-
"""
|
190
|
-
config = self._create_followup_config(tools, **kwargs)
|
191
|
-
|
192
|
-
i = 0
|
193
|
-
_out = ""
|
194
|
-
|
195
|
-
while i < num_rounds:
|
196
|
-
_prompt = self._get_prompt(
|
197
|
-
prompt=reason_prompt,
|
198
|
-
default=self.REASON_PROMPT,
|
199
|
-
num_steps=(num_rounds - i) * 2,
|
200
|
-
)
|
201
|
-
_instruct = {"NOTICE": _prompt}
|
202
|
-
|
203
|
-
# reason
|
204
|
-
if i == 0:
|
205
|
-
_instruct["TASK"] = instruction
|
206
|
-
|
207
|
-
await self.chat(
|
208
|
-
_instruct, context=context, sender=sender, system=system, **kwargs
|
209
|
-
)
|
210
|
-
|
211
|
-
elif i > 0:
|
212
|
-
await self.chat(_instruct, sender=sender, **kwargs)
|
213
|
-
|
214
|
-
# action
|
215
|
-
_prompt = self._get_prompt(
|
216
|
-
prompt=action_prompt,
|
217
|
-
default=self.ACTION_PROMPT,
|
218
|
-
num_steps=(num_rounds - i) * 2 - 1,
|
219
|
-
)
|
220
|
-
|
221
|
-
_out = await self.chat(_prompt, sender=sender, **config)
|
222
|
-
|
223
|
-
if auto and not self.branch._is_invoked():
|
224
|
-
return _out if out else None
|
225
|
-
|
226
|
-
i += 1
|
227
|
-
|
228
|
-
if auto:
|
229
|
-
if not self.branch._is_invoked():
|
230
|
-
return _out if out else None
|
231
|
-
|
232
|
-
_prompt = self._get_prompt(
|
233
|
-
prompt=output_prompt,
|
234
|
-
default=self.OUTPUT_PROMPT,
|
235
|
-
instruction=instruction,
|
236
|
-
)
|
237
|
-
_out = await self.chat(_prompt, sender=sender, **kwargs)
|
238
|
-
return _out if out else None
|
@@ -1,95 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the MonoChat class for performing chat conversations with an LLM.
|
3
|
-
|
4
|
-
The MonoChat class allows for processing instructions and system messages, and optionally invoking tools
|
5
|
-
during the chat conversation. It extends the BaseMonoFlow and MonoChatMixin classes.
|
6
|
-
"""
|
7
|
-
|
8
|
-
from typing import Any
|
9
|
-
|
10
|
-
from lionagi.core.flow.base.baseflow import BaseMonoFlow
|
11
|
-
from lionagi.core.flow.monoflow.chat_mixin import MonoChatMixin
|
12
|
-
|
13
|
-
|
14
|
-
class MonoChat(BaseMonoFlow, MonoChatMixin):
|
15
|
-
"""
|
16
|
-
A class for performing a chat conversation with an LLM, processing instructions and system messages,
|
17
|
-
and optionally invoking tools.
|
18
|
-
|
19
|
-
Attributes:
|
20
|
-
branch: The Branch instance to perform chat operations.
|
21
|
-
|
22
|
-
Methods:
|
23
|
-
__init__(self, branch) -> None:
|
24
|
-
Initializes the MonoChat instance.
|
25
|
-
|
26
|
-
async chat(self, instruction=None, context=None, sender=None, system=None, tools=False,
|
27
|
-
out=True, invoke=True, output_fields=None, prompt_template=None, **kwargs) -> Any:
|
28
|
-
Performs a chat conversation with an LLM, processing instructions and system messages,
|
29
|
-
and optionally invoking tools.
|
30
|
-
"""
|
31
|
-
|
32
|
-
def __init__(self, branch) -> None:
|
33
|
-
"""
|
34
|
-
Initializes the MonoChat instance.
|
35
|
-
|
36
|
-
Args:
|
37
|
-
branch: The Branch instance to perform chat operations.
|
38
|
-
"""
|
39
|
-
super().__init__(branch)
|
40
|
-
|
41
|
-
async def chat(
|
42
|
-
self,
|
43
|
-
instruction=None,
|
44
|
-
context=None,
|
45
|
-
sender=None,
|
46
|
-
system=None,
|
47
|
-
tools=False,
|
48
|
-
out: bool = True,
|
49
|
-
invoke: bool = True,
|
50
|
-
output_fields=None,
|
51
|
-
prompt_template=None,
|
52
|
-
**kwargs,
|
53
|
-
) -> Any:
|
54
|
-
"""
|
55
|
-
Performs a chat conversation with an LLM, processing instructions and system messages,
|
56
|
-
and optionally invoking tools.
|
57
|
-
|
58
|
-
Args:
|
59
|
-
instruction (Union[Instruction, str]): The instruction for the chat.
|
60
|
-
context (Optional[Any]): Additional context for the chat.
|
61
|
-
sender (Optional[str]): The sender of the chat message.
|
62
|
-
system (Optional[Union[System, str, Dict[str, Any]]]): System message to be processed.
|
63
|
-
tools (Union[bool, Tool, List[Tool], str, List[str]]): Specifies tools to be invoked.
|
64
|
-
out (bool): If True, outputs the chat response.
|
65
|
-
invoke (bool): If True, invokes tools as part of the chat.
|
66
|
-
output_fields (Optional[Any]): The output fields for the chat.
|
67
|
-
prompt_template (Optional[Any]): The prompt template for the chat.
|
68
|
-
**kwargs: Arbitrary keyword arguments for chat completion.
|
69
|
-
|
70
|
-
Returns:
|
71
|
-
Any: The result of the chat conversation.
|
72
|
-
|
73
|
-
Examples:
|
74
|
-
>>> await MonoChat.chat(branch, "Ask about user preferences")
|
75
|
-
"""
|
76
|
-
|
77
|
-
config = self._create_chat_config(
|
78
|
-
instruction=instruction,
|
79
|
-
context=context,
|
80
|
-
sender=sender,
|
81
|
-
system=system,
|
82
|
-
prompt_template=prompt_template,
|
83
|
-
tools=tools,
|
84
|
-
output_fields=output_fields,
|
85
|
-
**kwargs,
|
86
|
-
)
|
87
|
-
|
88
|
-
await self._call_chatcompletion(**config)
|
89
|
-
|
90
|
-
return await self._output(
|
91
|
-
invoke=invoke,
|
92
|
-
out=out,
|
93
|
-
output_fields=output_fields,
|
94
|
-
prompt_template=prompt_template,
|
95
|
-
)
|
@@ -1,263 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains mixins for configuring and invoking chatbots.
|
3
|
-
"""
|
4
|
-
|
5
|
-
from abc import ABC
|
6
|
-
from typing import Any
|
7
|
-
|
8
|
-
from lionagi.core.messages.schema import Instruction
|
9
|
-
from lionagi.core.schema.base_node import TOOL_TYPE
|
10
|
-
from lionagi.libs import (
|
11
|
-
ln_nested as nested,
|
12
|
-
ln_func_call as func_call,
|
13
|
-
ln_convert as convert,
|
14
|
-
)
|
15
|
-
from lionagi.libs.ln_parse import ParseUtil, StringMatch
|
16
|
-
|
17
|
-
|
18
|
-
class MonoChatConfigMixin(ABC):
|
19
|
-
"""
|
20
|
-
Mixin class for configuring chatbots.
|
21
|
-
|
22
|
-
Methods:
|
23
|
-
_create_chat_config(self, instruction=None, context=None, sender=None, system=None,
|
24
|
-
output_fields=None, prompt_template=None, tools=False, **kwargs) -> Any:
|
25
|
-
Creates a chat configuration based on the provided parameters.
|
26
|
-
"""
|
27
|
-
|
28
|
-
def _create_chat_config(
|
29
|
-
self,
|
30
|
-
instruction: Instruction | str | dict[str, Any] = None,
|
31
|
-
context: Any | None = None,
|
32
|
-
sender: str | None = None,
|
33
|
-
system: str | dict[str, Any] | None = None,
|
34
|
-
output_fields=None,
|
35
|
-
prompt_template=None,
|
36
|
-
tools: TOOL_TYPE = False,
|
37
|
-
**kwargs,
|
38
|
-
) -> Any:
|
39
|
-
"""
|
40
|
-
Creates a chat configuration based on the provided parameters.
|
41
|
-
|
42
|
-
Args:
|
43
|
-
instruction (Instruction | str | dict[str, Any]): The instruction for the chatbot (optional).
|
44
|
-
context (Any): The context for the chatbot (optional).
|
45
|
-
sender (str): The sender of the message (optional).
|
46
|
-
system (str | dict[str, Any]): The system message for the chatbot (optional).
|
47
|
-
output_fields: The output fields for the chatbot (optional).
|
48
|
-
prompt_template: The prompt template for the chatbot (optional).
|
49
|
-
tools (TOOL_TYPE): The tools for the chatbot (default: False).
|
50
|
-
**kwargs: Additional keyword arguments for the chat configuration.
|
51
|
-
|
52
|
-
Returns:
|
53
|
-
Any: The chat configuration.
|
54
|
-
"""
|
55
|
-
if system:
|
56
|
-
self.branch.change_first_system_message(system)
|
57
|
-
|
58
|
-
if not prompt_template:
|
59
|
-
self.branch.add_message(
|
60
|
-
instruction=instruction,
|
61
|
-
context=context,
|
62
|
-
sender=sender,
|
63
|
-
output_fields=output_fields,
|
64
|
-
)
|
65
|
-
else:
|
66
|
-
instruct_ = Instruction.from_prompt_template(prompt_template)
|
67
|
-
self.branch.add_message(instruction=instruct_)
|
68
|
-
|
69
|
-
if "tool_parsed" in kwargs:
|
70
|
-
kwargs.pop("tool_parsed")
|
71
|
-
tool_kwarg = {"tools": tools}
|
72
|
-
kwargs = tool_kwarg | kwargs
|
73
|
-
elif tools and self.branch.has_tools:
|
74
|
-
kwargs = self.branch.tool_manager.parse_tool(tools=tools, **kwargs)
|
75
|
-
|
76
|
-
config = {**self.branch.llmconfig, **kwargs}
|
77
|
-
if sender is not None:
|
78
|
-
config["sender"] = sender
|
79
|
-
|
80
|
-
return config
|
81
|
-
|
82
|
-
|
83
|
-
class MonoChatInvokeMixin(ABC):
|
84
|
-
"""
|
85
|
-
Mixin class for invoking chatbots.
|
86
|
-
|
87
|
-
Methods:
|
88
|
-
async _output(self, invoke, out, output_fields, func_calls_=None, prompt_template=None,
|
89
|
-
return_template=True):
|
90
|
-
Processes the output of the chatbot.
|
91
|
-
|
92
|
-
_return_response(content_, output_fields) -> Any:
|
93
|
-
Returns the response from the chatbot.
|
94
|
-
|
95
|
-
async _invoke_tools(self, content_=None, func_calls_=None):
|
96
|
-
Invokes the tools associated with the chatbot.
|
97
|
-
|
98
|
-
_process_chatcompletion(self, payload, completion, sender):
|
99
|
-
Processes the chat completion.
|
100
|
-
|
101
|
-
async _call_chatcompletion(self, sender=None, with_sender=False, **kwargs):
|
102
|
-
Calls the chat completion API.
|
103
|
-
"""
|
104
|
-
|
105
|
-
async def _output(
|
106
|
-
self,
|
107
|
-
invoke,
|
108
|
-
out,
|
109
|
-
output_fields,
|
110
|
-
func_calls_=None,
|
111
|
-
prompt_template=None,
|
112
|
-
return_template=True,
|
113
|
-
):
|
114
|
-
"""
|
115
|
-
Processes the output of the chatbot.
|
116
|
-
|
117
|
-
Args:
|
118
|
-
invoke: Flag indicating whether to invoke the tools.
|
119
|
-
out: Flag indicating whether to return the output.
|
120
|
-
output_fields: The output fields for the chatbot.
|
121
|
-
func_calls_: The function calls for invoking the tools (optional).
|
122
|
-
prompt_template: The prompt template for the chatbot (optional).
|
123
|
-
return_template (bool): Flag indicating whether to return the prompt template (default: True).
|
124
|
-
"""
|
125
|
-
|
126
|
-
content_ = self.branch.last_message_content
|
127
|
-
|
128
|
-
if invoke:
|
129
|
-
try:
|
130
|
-
await self._invoke_tools(content_, func_calls_=func_calls_)
|
131
|
-
except Exception:
|
132
|
-
pass
|
133
|
-
|
134
|
-
response_ = self._return_response(content_, output_fields)
|
135
|
-
if prompt_template:
|
136
|
-
prompt_template._process_response(response_)
|
137
|
-
return prompt_template if return_template else prompt_template.out
|
138
|
-
|
139
|
-
if out:
|
140
|
-
return response_
|
141
|
-
|
142
|
-
@staticmethod
|
143
|
-
def _return_response(content_, output_fields):
|
144
|
-
"""
|
145
|
-
Returns the response from the chatbot.
|
146
|
-
|
147
|
-
Args:
|
148
|
-
content_: The content of the last message.
|
149
|
-
output_fields: The output fields for the chatbot.
|
150
|
-
|
151
|
-
Returns:
|
152
|
-
Any: The response from the chatbot.
|
153
|
-
"""
|
154
|
-
out_ = ""
|
155
|
-
|
156
|
-
if len(content_.items()) == 1 and len(nested.get_flattened_keys(content_)) == 1:
|
157
|
-
key = nested.get_flattened_keys(content_)[0]
|
158
|
-
out_ = content_[key]
|
159
|
-
|
160
|
-
if output_fields:
|
161
|
-
try:
|
162
|
-
if isinstance(out_, dict):
|
163
|
-
out_ = convert.to_str(out_.values())
|
164
|
-
|
165
|
-
if isinstance(out_, str):
|
166
|
-
try:
|
167
|
-
out_ = ParseUtil.md_to_json(out_)
|
168
|
-
except Exception:
|
169
|
-
out_ = ParseUtil.md_to_json(out_.replace("'", '"'))
|
170
|
-
|
171
|
-
out_ = StringMatch.correct_keys(output_fields=output_fields, out_=out_)
|
172
|
-
except Exception:
|
173
|
-
pass
|
174
|
-
|
175
|
-
if isinstance(out_, str):
|
176
|
-
try:
|
177
|
-
out_ = ParseUtil.md_to_json(out_)
|
178
|
-
out_ = StringMatch.correct_keys(output_fields=output_fields, out_=out_)
|
179
|
-
return out_
|
180
|
-
except Exception:
|
181
|
-
pass
|
182
|
-
|
183
|
-
return out_
|
184
|
-
|
185
|
-
async def _invoke_tools(self, content_=None, func_calls_=None):
|
186
|
-
"""
|
187
|
-
Invokes the tools associated with the chatbot.
|
188
|
-
|
189
|
-
Args:
|
190
|
-
content_: The content of the last message (optional).
|
191
|
-
func_calls_: The function calls for invoking the tools (optional).
|
192
|
-
|
193
|
-
Returns:
|
194
|
-
list: The results of invoking the tools.
|
195
|
-
"""
|
196
|
-
if func_calls_ is None and content_ is not None:
|
197
|
-
tool_uses = content_
|
198
|
-
func_calls_ = func_call.lcall(
|
199
|
-
[convert.to_dict(i) for i in tool_uses["action_request"]],
|
200
|
-
self.branch.tool_manager.get_function_call,
|
201
|
-
)
|
202
|
-
|
203
|
-
outs = await func_call.alcall(func_calls_, self.branch.tool_manager.invoke)
|
204
|
-
outs = convert.to_list(outs, flatten=True)
|
205
|
-
|
206
|
-
a = []
|
207
|
-
for out_, f in zip(outs, func_calls_):
|
208
|
-
res = {
|
209
|
-
"function": f[0],
|
210
|
-
"arguments": f[1],
|
211
|
-
"output": out_,
|
212
|
-
}
|
213
|
-
self.branch.add_message(response=res)
|
214
|
-
a.append(res)
|
215
|
-
|
216
|
-
return a
|
217
|
-
|
218
|
-
def _process_chatcompletion(self, payload, completion, sender):
|
219
|
-
"""
|
220
|
-
Processes the chat completion.
|
221
|
-
|
222
|
-
Args:
|
223
|
-
payload: The payload for the chat completion.
|
224
|
-
completion: The completed chat response.
|
225
|
-
sender: The sender of the message.
|
226
|
-
"""
|
227
|
-
if "choices" in completion:
|
228
|
-
add_msg_config = {"response": completion["choices"][0]}
|
229
|
-
if sender is not None:
|
230
|
-
add_msg_config["sender"] = sender
|
231
|
-
|
232
|
-
self.branch.datalogger.append(input_data=payload, output_data=completion)
|
233
|
-
self.branch.add_message(**add_msg_config)
|
234
|
-
self.branch.status_tracker.num_tasks_succeeded += 1
|
235
|
-
else:
|
236
|
-
self.branch.status_tracker.num_tasks_failed += 1
|
237
|
-
|
238
|
-
async def _call_chatcompletion(self, sender=None, with_sender=False, **kwargs):
|
239
|
-
"""
|
240
|
-
Calls the chat completion API.
|
241
|
-
|
242
|
-
Args:
|
243
|
-
sender: The sender of the message (optional).
|
244
|
-
with_sender (bool): Flag indicating whether to include the sender in the chat messages (default: False).
|
245
|
-
**kwargs: Additional keyword arguments for the chat completion API.
|
246
|
-
"""
|
247
|
-
messages = (
|
248
|
-
self.branch.chat_messages_with_sender
|
249
|
-
if with_sender
|
250
|
-
else self.branch.chat_messages
|
251
|
-
)
|
252
|
-
payload, completion = await self.branch.service.serve_chat(
|
253
|
-
messages=messages, **kwargs
|
254
|
-
)
|
255
|
-
self._process_chatcompletion(payload, completion, sender)
|
256
|
-
|
257
|
-
|
258
|
-
class MonoChatMixin(MonoChatConfigMixin, MonoChatInvokeMixin, ABC):
|
259
|
-
"""
|
260
|
-
Mixin class that combines MonoChatConfigMixin and MonoChatInvokeMixin.
|
261
|
-
"""
|
262
|
-
|
263
|
-
pass
|