lionagi 0.0.312__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +61 -3
- lionagi/core/__init__.py +0 -14
- lionagi/core/_setting/_setting.py +59 -0
- lionagi/core/action/__init__.py +14 -0
- lionagi/core/action/function_calling.py +136 -0
- lionagi/core/action/manual.py +1 -0
- lionagi/core/action/node.py +109 -0
- lionagi/core/action/tool.py +114 -0
- lionagi/core/action/tool_manager.py +356 -0
- lionagi/core/agent/__init__.py +0 -3
- lionagi/core/agent/base_agent.py +45 -36
- lionagi/core/agent/eval/evaluator.py +1 -0
- lionagi/core/agent/eval/vote.py +40 -0
- lionagi/core/agent/learn/learner.py +59 -0
- lionagi/core/agent/plan/unit_template.py +1 -0
- lionagi/core/collections/__init__.py +17 -0
- lionagi/core/collections/_logger.py +319 -0
- lionagi/core/collections/abc/__init__.py +53 -0
- lionagi/core/collections/abc/component.py +615 -0
- lionagi/core/collections/abc/concepts.py +297 -0
- lionagi/core/collections/abc/exceptions.py +150 -0
- lionagi/core/collections/abc/util.py +45 -0
- lionagi/core/collections/exchange.py +161 -0
- lionagi/core/collections/flow.py +426 -0
- lionagi/core/collections/model.py +419 -0
- lionagi/core/collections/pile.py +913 -0
- lionagi/core/collections/progression.py +236 -0
- lionagi/core/collections/util.py +64 -0
- lionagi/core/director/direct.py +314 -0
- lionagi/core/director/director.py +2 -0
- lionagi/core/engine/branch_engine.py +333 -0
- lionagi/core/engine/instruction_map_engine.py +204 -0
- lionagi/core/engine/sandbox_.py +14 -0
- lionagi/core/engine/script_engine.py +99 -0
- lionagi/core/executor/base_executor.py +90 -0
- lionagi/core/executor/graph_executor.py +330 -0
- lionagi/core/executor/neo4j_executor.py +384 -0
- lionagi/core/generic/__init__.py +7 -0
- lionagi/core/generic/edge.py +112 -0
- lionagi/core/generic/edge_condition.py +16 -0
- lionagi/core/generic/graph.py +236 -0
- lionagi/core/generic/hyperedge.py +1 -0
- lionagi/core/generic/node.py +220 -0
- lionagi/core/generic/tree.py +48 -0
- lionagi/core/generic/tree_node.py +79 -0
- lionagi/core/mail/__init__.py +7 -3
- lionagi/core/mail/mail.py +25 -0
- lionagi/core/mail/mail_manager.py +142 -58
- lionagi/core/mail/package.py +45 -0
- lionagi/core/mail/start_mail.py +36 -0
- lionagi/core/message/__init__.py +19 -0
- lionagi/core/message/action_request.py +133 -0
- lionagi/core/message/action_response.py +135 -0
- lionagi/core/message/assistant_response.py +95 -0
- lionagi/core/message/instruction.py +234 -0
- lionagi/core/message/message.py +101 -0
- lionagi/core/message/system.py +86 -0
- lionagi/core/message/util.py +283 -0
- lionagi/core/report/__init__.py +4 -0
- lionagi/core/report/base.py +217 -0
- lionagi/core/report/form.py +231 -0
- lionagi/core/report/report.py +166 -0
- lionagi/core/report/util.py +28 -0
- lionagi/core/rule/__init__.py +0 -0
- lionagi/core/rule/_default.py +16 -0
- lionagi/core/rule/action.py +99 -0
- lionagi/core/rule/base.py +238 -0
- lionagi/core/rule/boolean.py +56 -0
- lionagi/core/rule/choice.py +47 -0
- lionagi/core/rule/mapping.py +96 -0
- lionagi/core/rule/number.py +71 -0
- lionagi/core/rule/rulebook.py +109 -0
- lionagi/core/rule/string.py +52 -0
- lionagi/core/rule/util.py +35 -0
- lionagi/core/session/__init__.py +0 -3
- lionagi/core/session/branch.py +431 -0
- lionagi/core/session/directive_mixin.py +287 -0
- lionagi/core/session/session.py +230 -902
- lionagi/core/structure/__init__.py +1 -0
- lionagi/core/structure/chain.py +1 -0
- lionagi/core/structure/forest.py +1 -0
- lionagi/core/structure/graph.py +1 -0
- lionagi/core/structure/tree.py +1 -0
- lionagi/core/unit/__init__.py +5 -0
- lionagi/core/unit/parallel_unit.py +245 -0
- lionagi/core/unit/template/__init__.py +0 -0
- lionagi/core/unit/template/action.py +81 -0
- lionagi/core/unit/template/base.py +51 -0
- lionagi/core/unit/template/plan.py +84 -0
- lionagi/core/unit/template/predict.py +109 -0
- lionagi/core/unit/template/score.py +124 -0
- lionagi/core/unit/template/select.py +104 -0
- lionagi/core/unit/unit.py +362 -0
- lionagi/core/unit/unit_form.py +305 -0
- lionagi/core/unit/unit_mixin.py +1168 -0
- lionagi/core/unit/util.py +71 -0
- lionagi/core/validator/__init__.py +0 -0
- lionagi/core/validator/validator.py +364 -0
- lionagi/core/work/__init__.py +0 -0
- lionagi/core/work/work.py +76 -0
- lionagi/core/work/work_function.py +101 -0
- lionagi/core/work/work_queue.py +103 -0
- lionagi/core/work/worker.py +258 -0
- lionagi/core/work/worklog.py +120 -0
- lionagi/experimental/__init__.py +0 -0
- lionagi/experimental/compressor/__init__.py +0 -0
- lionagi/experimental/compressor/base.py +46 -0
- lionagi/experimental/compressor/llm_compressor.py +247 -0
- lionagi/experimental/compressor/llm_summarizer.py +61 -0
- lionagi/experimental/compressor/util.py +70 -0
- lionagi/experimental/directive/__init__.py +19 -0
- lionagi/experimental/directive/parser/__init__.py +0 -0
- lionagi/experimental/directive/parser/base_parser.py +282 -0
- lionagi/experimental/directive/template/__init__.py +0 -0
- lionagi/experimental/directive/template/base_template.py +79 -0
- lionagi/experimental/directive/template/schema.py +36 -0
- lionagi/experimental/directive/tokenizer.py +73 -0
- lionagi/experimental/evaluator/__init__.py +0 -0
- lionagi/experimental/evaluator/ast_evaluator.py +131 -0
- lionagi/experimental/evaluator/base_evaluator.py +218 -0
- lionagi/experimental/knowledge/__init__.py +0 -0
- lionagi/experimental/knowledge/base.py +10 -0
- lionagi/experimental/knowledge/graph.py +0 -0
- lionagi/experimental/memory/__init__.py +0 -0
- lionagi/experimental/strategies/__init__.py +0 -0
- lionagi/experimental/strategies/base.py +1 -0
- lionagi/integrations/bridge/autogen_/__init__.py +0 -0
- lionagi/integrations/bridge/autogen_/autogen_.py +124 -0
- lionagi/integrations/bridge/langchain_/documents.py +4 -0
- lionagi/integrations/bridge/llamaindex_/index.py +30 -0
- lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +6 -0
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +227 -0
- lionagi/integrations/bridge/llamaindex_/node_parser.py +6 -9
- lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +1 -0
- lionagi/integrations/bridge/transformers_/__init__.py +0 -0
- lionagi/integrations/bridge/transformers_/install_.py +36 -0
- lionagi/integrations/chunker/__init__.py +0 -0
- lionagi/integrations/chunker/chunk.py +312 -0
- lionagi/integrations/config/oai_configs.py +38 -7
- lionagi/integrations/config/ollama_configs.py +1 -1
- lionagi/integrations/config/openrouter_configs.py +14 -2
- lionagi/integrations/loader/__init__.py +0 -0
- lionagi/integrations/loader/load.py +253 -0
- lionagi/integrations/loader/load_util.py +195 -0
- lionagi/integrations/provider/_mapping.py +46 -0
- lionagi/integrations/provider/litellm.py +2 -1
- lionagi/integrations/provider/mlx_service.py +16 -9
- lionagi/integrations/provider/oai.py +91 -4
- lionagi/integrations/provider/ollama.py +7 -6
- lionagi/integrations/provider/openrouter.py +115 -8
- lionagi/integrations/provider/services.py +2 -2
- lionagi/integrations/provider/transformers.py +18 -22
- lionagi/integrations/storage/__init__.py +3 -0
- lionagi/integrations/storage/neo4j.py +665 -0
- lionagi/integrations/storage/storage_util.py +287 -0
- lionagi/integrations/storage/structure_excel.py +285 -0
- lionagi/integrations/storage/to_csv.py +63 -0
- lionagi/integrations/storage/to_excel.py +83 -0
- lionagi/libs/__init__.py +26 -1
- lionagi/libs/ln_api.py +78 -23
- lionagi/libs/ln_context.py +37 -0
- lionagi/libs/ln_convert.py +21 -9
- lionagi/libs/ln_func_call.py +69 -28
- lionagi/libs/ln_image.py +107 -0
- lionagi/libs/ln_knowledge_graph.py +405 -0
- lionagi/libs/ln_nested.py +26 -11
- lionagi/libs/ln_parse.py +110 -14
- lionagi/libs/ln_queue.py +117 -0
- lionagi/libs/ln_tokenize.py +164 -0
- lionagi/{core/prompt/field_validator.py → libs/ln_validate.py} +79 -14
- lionagi/libs/special_tokens.py +172 -0
- lionagi/libs/sys_util.py +107 -2
- lionagi/lions/__init__.py +0 -0
- lionagi/lions/coder/__init__.py +0 -0
- lionagi/lions/coder/add_feature.py +20 -0
- lionagi/lions/coder/base_prompts.py +22 -0
- lionagi/lions/coder/code_form.py +13 -0
- lionagi/lions/coder/coder.py +168 -0
- lionagi/lions/coder/util.py +96 -0
- lionagi/lions/researcher/__init__.py +0 -0
- lionagi/lions/researcher/data_source/__init__.py +0 -0
- lionagi/lions/researcher/data_source/finhub_.py +191 -0
- lionagi/lions/researcher/data_source/google_.py +199 -0
- lionagi/lions/researcher/data_source/wiki_.py +96 -0
- lionagi/lions/researcher/data_source/yfinance_.py +21 -0
- lionagi/tests/integrations/__init__.py +0 -0
- lionagi/tests/libs/__init__.py +0 -0
- lionagi/tests/libs/test_field_validators.py +353 -0
- lionagi/tests/{test_libs → libs}/test_func_call.py +23 -21
- lionagi/tests/{test_libs → libs}/test_nested.py +36 -21
- lionagi/tests/{test_libs → libs}/test_parse.py +1 -1
- lionagi/tests/libs/test_queue.py +67 -0
- lionagi/tests/test_core/collections/__init__.py +0 -0
- lionagi/tests/test_core/collections/test_component.py +206 -0
- lionagi/tests/test_core/collections/test_exchange.py +138 -0
- lionagi/tests/test_core/collections/test_flow.py +145 -0
- lionagi/tests/test_core/collections/test_pile.py +171 -0
- lionagi/tests/test_core/collections/test_progression.py +129 -0
- lionagi/tests/test_core/generic/__init__.py +0 -0
- lionagi/tests/test_core/generic/test_edge.py +67 -0
- lionagi/tests/test_core/generic/test_graph.py +96 -0
- lionagi/tests/test_core/generic/test_node.py +106 -0
- lionagi/tests/test_core/generic/test_tree_node.py +73 -0
- lionagi/tests/test_core/test_branch.py +115 -292
- lionagi/tests/test_core/test_form.py +46 -0
- lionagi/tests/test_core/test_report.py +105 -0
- lionagi/tests/test_core/test_validator.py +111 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/LICENSE +12 -11
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/METADATA +19 -118
- lionagi-0.2.1.dist-info/RECORD +240 -0
- lionagi/core/branch/__init__.py +0 -4
- lionagi/core/branch/base_branch.py +0 -654
- lionagi/core/branch/branch.py +0 -471
- lionagi/core/branch/branch_flow_mixin.py +0 -96
- lionagi/core/branch/executable_branch.py +0 -347
- lionagi/core/branch/util.py +0 -323
- lionagi/core/direct/__init__.py +0 -6
- lionagi/core/direct/predict.py +0 -161
- lionagi/core/direct/score.py +0 -278
- lionagi/core/direct/select.py +0 -169
- lionagi/core/direct/utils.py +0 -87
- lionagi/core/direct/vote.py +0 -64
- lionagi/core/flow/base/baseflow.py +0 -23
- lionagi/core/flow/monoflow/ReAct.py +0 -238
- lionagi/core/flow/monoflow/__init__.py +0 -9
- lionagi/core/flow/monoflow/chat.py +0 -95
- lionagi/core/flow/monoflow/chat_mixin.py +0 -263
- lionagi/core/flow/monoflow/followup.py +0 -214
- lionagi/core/flow/polyflow/__init__.py +0 -1
- lionagi/core/flow/polyflow/chat.py +0 -248
- lionagi/core/mail/schema.py +0 -56
- lionagi/core/messages/__init__.py +0 -3
- lionagi/core/messages/schema.py +0 -533
- lionagi/core/prompt/prompt_template.py +0 -316
- lionagi/core/schema/__init__.py +0 -22
- lionagi/core/schema/action_node.py +0 -29
- lionagi/core/schema/base_mixin.py +0 -296
- lionagi/core/schema/base_node.py +0 -199
- lionagi/core/schema/condition.py +0 -24
- lionagi/core/schema/data_logger.py +0 -354
- lionagi/core/schema/data_node.py +0 -93
- lionagi/core/schema/prompt_template.py +0 -67
- lionagi/core/schema/structure.py +0 -910
- lionagi/core/tool/__init__.py +0 -3
- lionagi/core/tool/tool_manager.py +0 -280
- lionagi/integrations/bridge/pydantic_/base_model.py +0 -7
- lionagi/tests/test_core/test_base_branch.py +0 -427
- lionagi/tests/test_core/test_chat_flow.py +0 -63
- lionagi/tests/test_core/test_mail_manager.py +0 -75
- lionagi/tests/test_core/test_prompts.py +0 -51
- lionagi/tests/test_core/test_session.py +0 -254
- lionagi/tests/test_core/test_session_base_util.py +0 -312
- lionagi/tests/test_core/test_tool_manager.py +0 -95
- lionagi-0.0.312.dist-info/RECORD +0 -111
- /lionagi/core/{branch/base → _setting}/__init__.py +0 -0
- /lionagi/core/{flow → agent/eval}/__init__.py +0 -0
- /lionagi/core/{flow/base → agent/learn}/__init__.py +0 -0
- /lionagi/core/{prompt → agent/plan}/__init__.py +0 -0
- /lionagi/core/{tool/manual.py → agent/plan/plan.py} +0 -0
- /lionagi/{tests/test_integrations → core/director}/__init__.py +0 -0
- /lionagi/{tests/test_libs → core/engine}/__init__.py +0 -0
- /lionagi/{tests/test_libs/test_async.py → core/executor/__init__.py} +0 -0
- /lionagi/tests/{test_libs → libs}/test_api.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_convert.py +0 -0
- /lionagi/tests/{test_libs → libs}/test_sys_util.py +0 -0
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/WHEEL +0 -0
- {lionagi-0.0.312.dist-info → lionagi-0.2.1.dist-info}/top_level.txt +0 -0
lionagi/core/direct/predict.py
DELETED
@@ -1,161 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the PredictTemplate class for predicting the next sentence(s) based on a given sentence.
|
3
|
-
|
4
|
-
The PredictTemplate class is a subclass of ScoredTemplate and provides functionality for predicting the next sentence(s)
|
5
|
-
using a language model. It includes fields for the input sentence, number of sentences to predict, predicted answer,
|
6
|
-
confidence score, and reason for the prediction.
|
7
|
-
"""
|
8
|
-
|
9
|
-
from pydantic import Field
|
10
|
-
from lionagi.libs import func_call
|
11
|
-
from ..prompt.prompt_template import ScoredTemplate
|
12
|
-
from ..branch import Branch
|
13
|
-
|
14
|
-
|
15
|
-
class PredictTemplate(ScoredTemplate):
|
16
|
-
"""
|
17
|
-
A class for predicting the next sentence(s) based on a given sentence.
|
18
|
-
|
19
|
-
Attributes:
|
20
|
-
template_name (str): The name of the predict template (default: "default_predict_template").
|
21
|
-
sentence (str | list | dict): The given sentence(s) to predict.
|
22
|
-
num_sentences (int): The number of sentences to predict.
|
23
|
-
answer (str | list): The predicted sentence(s).
|
24
|
-
signature (str): The signature indicating the input and output fields (default: "sentence -> answer").
|
25
|
-
|
26
|
-
Methods:
|
27
|
-
__init__(self, sentence=None, num_sentences=None, confidence_score=False, reason=False, **kwargs):
|
28
|
-
Initializes a new instance of the PredictTemplate class.
|
29
|
-
|
30
|
-
async predict(sentence=None, num_sentences=1, confidence_score=False, reason=False, retries=2,
|
31
|
-
delay=0.5, backoff_factor=2, default_value=None, timeout=None, branch_name=None,
|
32
|
-
system=None, messages=None, service=None, sender=None, llmconfig=None, tools=None,
|
33
|
-
datalogger=None, persist_path=None, tool_manager=None, **kwargs) -> PredictTemplate:
|
34
|
-
Predicts the next sentence(s) based on the given sentence using a language model.
|
35
|
-
"""
|
36
|
-
|
37
|
-
template_name: str = "default_predict"
|
38
|
-
sentence: str | list | dict = Field(
|
39
|
-
default_factory=str, description="the given sentence(s) to predict"
|
40
|
-
)
|
41
|
-
num_sentences: int = Field(
|
42
|
-
default_factory=int, description="the number of sentences to predict"
|
43
|
-
)
|
44
|
-
answer: str | list = Field(
|
45
|
-
default_factory=str, description="the predicted sentence(s)"
|
46
|
-
)
|
47
|
-
signature: str = "sentence -> answer"
|
48
|
-
|
49
|
-
def __init__(
|
50
|
-
self,
|
51
|
-
sentence=None,
|
52
|
-
num_sentences=None,
|
53
|
-
confidence_score=False,
|
54
|
-
reason=False,
|
55
|
-
**kwargs,
|
56
|
-
):
|
57
|
-
"""
|
58
|
-
Initializes a new instance of the PredictTemplate class.
|
59
|
-
|
60
|
-
Args:
|
61
|
-
sentence (Optional[str | list | dict]): The given sentence(s) to predict.
|
62
|
-
num_sentences (Optional[int]): The number of sentences to predict.
|
63
|
-
confidence_score (bool): Whether to include the confidence score in the output (default: False).
|
64
|
-
reason (bool): Whether to include the reason for the prediction in the output (default: False).
|
65
|
-
**kwargs: Additional keyword arguments.
|
66
|
-
"""
|
67
|
-
super().__init__(**kwargs)
|
68
|
-
|
69
|
-
self.sentence = sentence
|
70
|
-
self.num_sentences = num_sentences
|
71
|
-
self.task = f"predict the next {self.num_sentences} sentence(s)"
|
72
|
-
|
73
|
-
if reason:
|
74
|
-
self.output_fields.append("reason")
|
75
|
-
|
76
|
-
if confidence_score:
|
77
|
-
self.output_fields.append("confidence_score")
|
78
|
-
|
79
|
-
|
80
|
-
async def predict(
|
81
|
-
sentence=None,
|
82
|
-
num_sentences=1,
|
83
|
-
confidence_score=False,
|
84
|
-
reason=False,
|
85
|
-
retries=2,
|
86
|
-
delay=0.5,
|
87
|
-
backoff_factor=2,
|
88
|
-
default_value=None,
|
89
|
-
timeout=None,
|
90
|
-
branch_name=None,
|
91
|
-
system=None,
|
92
|
-
messages=None,
|
93
|
-
service=None,
|
94
|
-
sender=None,
|
95
|
-
llmconfig=None,
|
96
|
-
tools=None,
|
97
|
-
datalogger=None,
|
98
|
-
persist_path=None,
|
99
|
-
tool_manager=None,
|
100
|
-
**kwargs,
|
101
|
-
) -> "PredictTemplate":
|
102
|
-
"""
|
103
|
-
Predicts the next sentence(s) based on the given sentence using a language model.
|
104
|
-
|
105
|
-
Args:
|
106
|
-
sentence (Optional[str | list | dict]): The given sentence(s) to predict.
|
107
|
-
num_sentences (int): The number of sentences to predict (default: 1).
|
108
|
-
confidence_score (bool): Whether to include the confidence score in the output (default: False).
|
109
|
-
reason (bool): Whether to include the reason for the prediction in the output (default: False).
|
110
|
-
retries (int): The number of retries for the API call (default: 2).
|
111
|
-
delay (float): The initial delay between retries in seconds (default: 0.5).
|
112
|
-
backoff_factor (float): The backoff factor for exponential delay between retries (default: 2).
|
113
|
-
default_value (Optional[Any]): The default value to return if the API call fails (default: None).
|
114
|
-
timeout (Optional[float]): The timeout for the API call in seconds (default: None).
|
115
|
-
branch_name (Optional[str]): The name of the branch to use for prediction.
|
116
|
-
system (Optional[Any]): The system configuration for the branch.
|
117
|
-
messages (Optional[Any]): The messages to initialize the branch with.
|
118
|
-
service (Optional[Any]): The service to use for prediction.
|
119
|
-
sender (Optional[str]): The sender of the prediction request.
|
120
|
-
llmconfig (Optional[Any]): The configuration for the language model.
|
121
|
-
tools (Optional[Any]): The tools to use for prediction.
|
122
|
-
datalogger (Optional[Any]): The data logger for the branch.
|
123
|
-
persist_path (Optional[str]): The path to persist the branch data.
|
124
|
-
tool_manager (Optional[Any]): The tool manager for the branch.
|
125
|
-
**kwargs: Additional keyword arguments for the API call.
|
126
|
-
|
127
|
-
Returns:
|
128
|
-
PredictTemplate: The predict template with the predicted sentence(s).
|
129
|
-
"""
|
130
|
-
branch = Branch(
|
131
|
-
name=branch_name,
|
132
|
-
system=system,
|
133
|
-
messages=messages,
|
134
|
-
service=service,
|
135
|
-
sender=sender,
|
136
|
-
llmconfig=llmconfig,
|
137
|
-
tools=tools,
|
138
|
-
datalogger=datalogger,
|
139
|
-
persist_path=persist_path,
|
140
|
-
tool_manager=tool_manager,
|
141
|
-
)
|
142
|
-
|
143
|
-
predict_template = PredictTemplate(
|
144
|
-
sentence=sentence,
|
145
|
-
num_sentences=num_sentences,
|
146
|
-
confidence_score=confidence_score,
|
147
|
-
reason=reason,
|
148
|
-
)
|
149
|
-
|
150
|
-
await func_call.rcall(
|
151
|
-
branch.chat,
|
152
|
-
prompt_template=predict_template,
|
153
|
-
retries=retries,
|
154
|
-
delay=delay,
|
155
|
-
backoff_factor=backoff_factor,
|
156
|
-
default=default_value,
|
157
|
-
timeout=timeout,
|
158
|
-
**kwargs,
|
159
|
-
)
|
160
|
-
|
161
|
-
return predict_template
|
lionagi/core/direct/score.py
DELETED
@@ -1,278 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the ScoreTemplate class and related functions for scoring a given context using a language model.
|
3
|
-
|
4
|
-
The ScoreTemplate class is a subclass of ScoredTemplate and provides functionality for scoring a given context
|
5
|
-
based on specified instructions, score range, and other parameters. It includes fields for the input sentence,
|
6
|
-
score range, inclusive flag, number of digits, confidence score, and reason for the score.
|
7
|
-
|
8
|
-
The module also includes functions for scoring a single instance or multiple instances of the context using the
|
9
|
-
ScoreTemplate class and a language model.
|
10
|
-
"""
|
11
|
-
|
12
|
-
from pydantic import Field
|
13
|
-
import numpy as np
|
14
|
-
from lionagi.libs import func_call, convert
|
15
|
-
from ..prompt.prompt_template import ScoredTemplate
|
16
|
-
from ..branch import Branch
|
17
|
-
|
18
|
-
|
19
|
-
class ScoreTemplate(ScoredTemplate):
|
20
|
-
"""
|
21
|
-
A class for scoring a given context using a language model.
|
22
|
-
|
23
|
-
Attributes:
|
24
|
-
template_name (str): The name of the score template (default: "default_score").
|
25
|
-
sentence (str | list | dict): The given context to score.
|
26
|
-
answer (float): The numeric score for the context.
|
27
|
-
signature (str): The signature indicating the input and output fields (default: "sentence -> answer").
|
28
|
-
|
29
|
-
Methods:
|
30
|
-
__init__(self, sentence=None, instruction=None, score_range=(1, 10), inclusive=True, num_digit=0,
|
31
|
-
confidence_score=False, reason=False, **kwargs):
|
32
|
-
Initializes a new instance of the ScoreTemplate class.
|
33
|
-
"""
|
34
|
-
|
35
|
-
template_name: str = "default_score"
|
36
|
-
sentence: str | list | dict = Field(
|
37
|
-
default_factory=str, description="the given context to score"
|
38
|
-
)
|
39
|
-
answer: float = Field(default_factory=float, description=f"a numeric score")
|
40
|
-
signature: str = "sentence -> answer"
|
41
|
-
|
42
|
-
def __init__(
|
43
|
-
self,
|
44
|
-
sentence=None,
|
45
|
-
instruction=None,
|
46
|
-
score_range=(1, 10),
|
47
|
-
inclusive=True,
|
48
|
-
num_digit=0,
|
49
|
-
confidence_score=False,
|
50
|
-
reason=False,
|
51
|
-
**kwargs,
|
52
|
-
):
|
53
|
-
super().__init__(**kwargs)
|
54
|
-
|
55
|
-
self.sentence = sentence
|
56
|
-
|
57
|
-
return_precision = ""
|
58
|
-
if num_digit == 0:
|
59
|
-
return_precision = "integer"
|
60
|
-
else:
|
61
|
-
return_precision = f"num:{convert.to_str(num_digit)}f"
|
62
|
-
|
63
|
-
self.task = f"""
|
64
|
-
score context according to the following constraints
|
65
|
-
1. objective, {convert.to_str(instruction)}
|
66
|
-
2. score range, {convert.to_str(score_range)}
|
67
|
-
3. include_endpoints, {"yes" if inclusive else "no"}
|
68
|
-
4. format the score in {return_precision}
|
69
|
-
"""
|
70
|
-
|
71
|
-
if reason:
|
72
|
-
self.output_fields.append("reason")
|
73
|
-
|
74
|
-
if confidence_score:
|
75
|
-
self.output_fields.append("confidence_score")
|
76
|
-
|
77
|
-
self.out_validation_kwargs["answer"] = {
|
78
|
-
"upper_bound": score_range[1],
|
79
|
-
"lower_bound": score_range[0],
|
80
|
-
"num_type": int if num_digit == 0 else float,
|
81
|
-
"precision": num_digit if num_digit != 0 else None,
|
82
|
-
}
|
83
|
-
|
84
|
-
|
85
|
-
async def _score(
|
86
|
-
sentence,
|
87
|
-
instruction=None,
|
88
|
-
score_range=(1, 10),
|
89
|
-
inclusive=True,
|
90
|
-
num_digit=0,
|
91
|
-
confidence_score=False,
|
92
|
-
reason=False,
|
93
|
-
retries=2,
|
94
|
-
delay=0.5,
|
95
|
-
backoff_factor=2,
|
96
|
-
default_value=None,
|
97
|
-
timeout=None,
|
98
|
-
branch_name=None,
|
99
|
-
system=None,
|
100
|
-
messages=None,
|
101
|
-
service=None,
|
102
|
-
sender=None,
|
103
|
-
llmconfig=None,
|
104
|
-
tools=None,
|
105
|
-
datalogger=None,
|
106
|
-
persist_path=None,
|
107
|
-
tool_manager=None,
|
108
|
-
**kwargs,
|
109
|
-
):
|
110
|
-
"""
|
111
|
-
Scores a given context using a language model.
|
112
|
-
|
113
|
-
Args:
|
114
|
-
sentence (str | list | dict): The given context to score.
|
115
|
-
instruction (Optional[str]): The instruction for scoring the context.
|
116
|
-
score_range (tuple): The range of valid scores (default: (1, 10)).
|
117
|
-
inclusive (bool): Whether to include the endpoints of the score range (default: True).
|
118
|
-
num_digit (int): The number of digits after the decimal point for the score (default: 0).
|
119
|
-
confidence_score (bool): Whether to include the confidence score in the output (default: False).
|
120
|
-
reason (bool): Whether to include the reason for the score in the output (default: False).
|
121
|
-
retries (int): The number of retries for the API call (default: 2).
|
122
|
-
delay (float): The initial delay between retries in seconds (default: 0.5).
|
123
|
-
backoff_factor (float): The backoff factor for exponential delay between retries (default: 2).
|
124
|
-
default_value (Optional[Any]): The default value to return if the API call fails (default: None).
|
125
|
-
timeout (Optional[float]): The timeout for the API call in seconds (default: None).
|
126
|
-
branch_name (Optional[str]): The name of the branch to use for scoring.
|
127
|
-
system (Optional[Any]): The system configuration for the branch.
|
128
|
-
messages (Optional[Any]): The messages to initialize the branch with.
|
129
|
-
service (Optional[Any]): The service to use for scoring.
|
130
|
-
sender (Optional[str]): The sender of the scoring request.
|
131
|
-
llmconfig (Optional[Any]): The configuration for the language model.
|
132
|
-
tools (Optional[Any]): The tools to use for scoring.
|
133
|
-
datalogger (Optional[Any]): The data logger for the branch.
|
134
|
-
persist_path (Optional[str]): The path to persist the branch data.
|
135
|
-
tool_manager (Optional[Any]): The tool manager for the branch.
|
136
|
-
**kwargs: Additional keyword arguments for the API call.
|
137
|
-
|
138
|
-
Returns:
|
139
|
-
ScoreTemplate: The score template with the scored context.
|
140
|
-
"""
|
141
|
-
|
142
|
-
if "temperature" not in kwargs:
|
143
|
-
kwargs["temperature"] = 0.1
|
144
|
-
|
145
|
-
instruction = instruction or ""
|
146
|
-
|
147
|
-
branch = Branch(
|
148
|
-
name=branch_name,
|
149
|
-
system=system,
|
150
|
-
messages=messages,
|
151
|
-
service=service,
|
152
|
-
sender=sender,
|
153
|
-
llmconfig=llmconfig,
|
154
|
-
tools=tools,
|
155
|
-
datalogger=datalogger,
|
156
|
-
persist_path=persist_path,
|
157
|
-
tool_manager=tool_manager,
|
158
|
-
)
|
159
|
-
|
160
|
-
_template = ScoreTemplate(
|
161
|
-
sentence=sentence,
|
162
|
-
instruction=instruction,
|
163
|
-
score_range=score_range,
|
164
|
-
inclusive=inclusive,
|
165
|
-
num_digit=num_digit,
|
166
|
-
confidence_score=confidence_score,
|
167
|
-
reason=reason,
|
168
|
-
)
|
169
|
-
|
170
|
-
await func_call.rcall(
|
171
|
-
branch.chat,
|
172
|
-
prompt_template=_template,
|
173
|
-
retries=retries,
|
174
|
-
delay=delay,
|
175
|
-
backoff_factor=backoff_factor,
|
176
|
-
default=default_value,
|
177
|
-
timeout=timeout,
|
178
|
-
**kwargs,
|
179
|
-
)
|
180
|
-
|
181
|
-
return _template
|
182
|
-
|
183
|
-
|
184
|
-
async def score(
|
185
|
-
sentence,
|
186
|
-
num_instances=1,
|
187
|
-
instruction=None,
|
188
|
-
score_range=(1, 10),
|
189
|
-
inclusive=True,
|
190
|
-
num_digit=0,
|
191
|
-
confidence_score=False,
|
192
|
-
reason=False,
|
193
|
-
retries=2,
|
194
|
-
delay=0.5,
|
195
|
-
backoff_factor=2,
|
196
|
-
default_value=None,
|
197
|
-
timeout=None,
|
198
|
-
branch_name=None,
|
199
|
-
system=None,
|
200
|
-
messages=None,
|
201
|
-
service=None,
|
202
|
-
sender=None,
|
203
|
-
llmconfig=None,
|
204
|
-
tools=None,
|
205
|
-
datalogger=None,
|
206
|
-
persist_path=None,
|
207
|
-
tool_manager=None,
|
208
|
-
return_template=True,
|
209
|
-
**kwargs,
|
210
|
-
) -> ScoreTemplate | float:
|
211
|
-
"""
|
212
|
-
Scores a given context using a language model, with the option to score multiple instances.
|
213
|
-
|
214
|
-
Args:
|
215
|
-
sentence (str | list | dict): The given context to score.
|
216
|
-
num_instances (int): The number of instances to score (default: 1).
|
217
|
-
instruction (Optional[str]): The instruction for scoring the context.
|
218
|
-
score_range (tuple): The range of valid scores (default: (1, 10)).
|
219
|
-
inclusive (bool): Whether to include the endpoints of the score range (default: True).
|
220
|
-
num_digit (int): The number of digits after the decimal point for the score (default: 0).
|
221
|
-
confidence_score (bool): Whether to include the confidence score in the output (default: False).
|
222
|
-
reason (bool): Whether to include the reason for the score in the output (default: False).
|
223
|
-
retries (int): The number of retries for the API call (default: 2).
|
224
|
-
delay (float): The initial delay between retries in seconds (default: 0.5).
|
225
|
-
backoff_factor (float): The backoff factor for exponential delay between retries (default: 2).
|
226
|
-
default_value (Optional[Any]): The default value to return if the API call fails (default: None).
|
227
|
-
timeout (Optional[float]): The timeout for the API call in seconds (default: None).
|
228
|
-
branch_name (Optional[str]): The name of the branch to use for scoring.
|
229
|
-
system (Optional[Any]): The system configuration for the branch.
|
230
|
-
messages (Optional[Any]): The messages to initialize the branch with.
|
231
|
-
service (Optional[Any]): The service to use for scoring.
|
232
|
-
sender (Optional[str]): The sender of the scoring request.
|
233
|
-
llmconfig (Optional[Any]): The configuration for the language model.
|
234
|
-
tools (Optional[Any]): The tools to use for scoring.
|
235
|
-
datalogger (Optional[Any]): The data logger for the branch.
|
236
|
-
persist_path (Optional[str]): The path to persist the branch data.
|
237
|
-
tool_manager (Optional[Any]): The tool manager for the branch.
|
238
|
-
return_template (bool): Whether to return the score template or only the score (default: True).
|
239
|
-
**kwargs: Additional keyword arguments for the API call.
|
240
|
-
|
241
|
-
Returns:
|
242
|
-
ScoreTemplate | float: The score template with the scored context or the average score if `return_template` is False.
|
243
|
-
"""
|
244
|
-
|
245
|
-
async def _inner(i=0):
|
246
|
-
return await _score(
|
247
|
-
sentence=sentence,
|
248
|
-
instruction=instruction,
|
249
|
-
score_range=score_range,
|
250
|
-
inclusive=inclusive,
|
251
|
-
num_digit=num_digit,
|
252
|
-
confidence_score=confidence_score,
|
253
|
-
reason=reason,
|
254
|
-
retries=retries,
|
255
|
-
delay=delay,
|
256
|
-
backoff_factor=backoff_factor,
|
257
|
-
default_value=default_value,
|
258
|
-
timeout=timeout,
|
259
|
-
branch_name=branch_name,
|
260
|
-
system=system,
|
261
|
-
messages=messages,
|
262
|
-
service=service,
|
263
|
-
sender=sender,
|
264
|
-
llmconfig=llmconfig,
|
265
|
-
tools=tools,
|
266
|
-
datalogger=datalogger,
|
267
|
-
persist_path=persist_path,
|
268
|
-
tool_manager=tool_manager,
|
269
|
-
**kwargs,
|
270
|
-
)
|
271
|
-
|
272
|
-
if num_instances == 1:
|
273
|
-
_out = await _inner()
|
274
|
-
return _out if return_template else _out.answer
|
275
|
-
|
276
|
-
elif num_instances > 1:
|
277
|
-
_outs = await func_call.alcall(range(num_instances), _inner)
|
278
|
-
return _outs if return_template else np.mean([i.answer for i in _outs])
|
lionagi/core/direct/select.py
DELETED
@@ -1,169 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
This module contains the SelectTemplate class for selecting an item from given choices based on a given context.
|
3
|
-
|
4
|
-
The SelectTemplate class is a subclass of ScoredTemplate and provides functionality for selecting an item from
|
5
|
-
given choices using a language model. It includes fields for the input sentence, choices, selected answer,
|
6
|
-
confidence score, and reason for the selection.
|
7
|
-
|
8
|
-
The module also includes a function for selecting an item from given choices using the SelectTemplate class
|
9
|
-
and a language model.
|
10
|
-
"""
|
11
|
-
|
12
|
-
from enum import Enum
|
13
|
-
from pydantic import Field
|
14
|
-
|
15
|
-
from lionagi.libs import func_call, StringMatch
|
16
|
-
from ..prompt.prompt_template import ScoredTemplate
|
17
|
-
from ..branch import Branch
|
18
|
-
|
19
|
-
|
20
|
-
class SelectTemplate(ScoredTemplate):
|
21
|
-
"""
|
22
|
-
A class for selecting an item from given choices based on a given context.
|
23
|
-
|
24
|
-
Attributes:
|
25
|
-
template_name (str): The name of the select template (default: "default_select").
|
26
|
-
sentence (str | list | dict): The given context.
|
27
|
-
answer (Enum | str): The selected item from the given choices.
|
28
|
-
signature (str): The signature indicating the input and output fields (default: "sentence -> answer").
|
29
|
-
|
30
|
-
Methods:
|
31
|
-
__init__(self, sentence=None, choices=None, instruction=None, reason=False, confidence_score=False, **kwargs):
|
32
|
-
Initializes a new instance of the SelectTemplate class.
|
33
|
-
"""
|
34
|
-
|
35
|
-
template_name: str = "default_select"
|
36
|
-
sentence: str | list | dict = Field(
|
37
|
-
default_factory=str, description="the given context"
|
38
|
-
)
|
39
|
-
answer: Enum | str = Field(
|
40
|
-
default_factory=str, description="selection from given choices"
|
41
|
-
)
|
42
|
-
|
43
|
-
signature: str = "sentence -> answer"
|
44
|
-
|
45
|
-
def __init__(
|
46
|
-
self,
|
47
|
-
sentence=None,
|
48
|
-
choices=None,
|
49
|
-
instruction=None,
|
50
|
-
reason=False,
|
51
|
-
confidence_score=False,
|
52
|
-
**kwargs,
|
53
|
-
):
|
54
|
-
"""
|
55
|
-
Initializes a new instance of the SelectTemplate class.
|
56
|
-
|
57
|
-
Args:
|
58
|
-
sentence (Optional[str | list | dict]): The given context.
|
59
|
-
choices (Optional[list]): The list of choices to select from.
|
60
|
-
instruction (Optional[str]): The instruction for selection.
|
61
|
-
reason (bool): Whether to include the reason for the selection in the output (default: False).
|
62
|
-
confidence_score (bool): Whether to include the confidence score in the output (default: False).
|
63
|
-
**kwargs: Additional keyword arguments.
|
64
|
-
"""
|
65
|
-
super().__init__(**kwargs)
|
66
|
-
|
67
|
-
self.sentence = sentence
|
68
|
-
self.choices = choices
|
69
|
-
self.task = f"select 1 item, from provided choices {choices}."
|
70
|
-
if instruction:
|
71
|
-
self.task += f"objetive {instruction}."
|
72
|
-
|
73
|
-
if reason:
|
74
|
-
self.output_fields.append("reason")
|
75
|
-
|
76
|
-
if confidence_score:
|
77
|
-
self.output_fields.append("confidence_score")
|
78
|
-
|
79
|
-
|
80
|
-
async def select(
|
81
|
-
sentence,
|
82
|
-
choices=None,
|
83
|
-
instruction=None,
|
84
|
-
confidence_score=False,
|
85
|
-
reason=False,
|
86
|
-
retries=2,
|
87
|
-
delay=0.5,
|
88
|
-
backoff_factor=2,
|
89
|
-
default_value=None,
|
90
|
-
timeout=None,
|
91
|
-
branch_name=None,
|
92
|
-
system=None,
|
93
|
-
messages=None,
|
94
|
-
service=None,
|
95
|
-
sender=None,
|
96
|
-
llmconfig=None,
|
97
|
-
tools=None,
|
98
|
-
datalogger=None,
|
99
|
-
persist_path=None,
|
100
|
-
tool_manager=None,
|
101
|
-
**kwargs,
|
102
|
-
):
|
103
|
-
"""
|
104
|
-
Selects an item from given choices based on a given context using a language model.
|
105
|
-
|
106
|
-
Args:
|
107
|
-
sentence (str | list | dict): The given context.
|
108
|
-
choices (Optional[list]): The list of choices to select from.
|
109
|
-
instruction (Optional[str]): The instruction for selection.
|
110
|
-
confidence_score (bool): Whether to include the confidence score in the output (default: False).
|
111
|
-
reason (bool): Whether to include the reason for the selection in the output (default: False).
|
112
|
-
retries (int): The number of retries for the API call (default: 2).
|
113
|
-
delay (float): The initial delay between retries in seconds (default: 0.5).
|
114
|
-
backoff_factor (float): The backoff factor for exponential delay between retries (default: 2).
|
115
|
-
default_value (Optional[Any]): The default value to return if the API call fails (default: None).
|
116
|
-
timeout (Optional[float]): The timeout for the API call in seconds (default: None).
|
117
|
-
branch_name (Optional[str]): The name of the branch to use for selection.
|
118
|
-
system (Optional[Any]): The system configuration for the branch.
|
119
|
-
messages (Optional[Any]): The messages to initialize the branch with.
|
120
|
-
service (Optional[Any]): The service to use for selection.
|
121
|
-
sender (Optional[str]): The sender of the selection request.
|
122
|
-
llmconfig (Optional[Any]): The configuration for the language model.
|
123
|
-
tools (Optional[Any]): The tools to use for selection.
|
124
|
-
datalogger (Optional[Any]): The data logger for the branch.
|
125
|
-
persist_path (Optional[str]): The path to persist the branch data.
|
126
|
-
tool_manager (Optional[Any]): The tool manager for the branch.
|
127
|
-
**kwargs: Additional keyword arguments for the API call.
|
128
|
-
|
129
|
-
Returns:
|
130
|
-
SelectTemplate: The select template with the selected item.
|
131
|
-
"""
|
132
|
-
branch = Branch(
|
133
|
-
name=branch_name,
|
134
|
-
system=system,
|
135
|
-
messages=messages,
|
136
|
-
service=service,
|
137
|
-
sender=sender,
|
138
|
-
llmconfig=llmconfig,
|
139
|
-
tools=tools,
|
140
|
-
datalogger=datalogger,
|
141
|
-
persist_path=persist_path,
|
142
|
-
tool_manager=tool_manager,
|
143
|
-
)
|
144
|
-
|
145
|
-
_template = SelectTemplate(
|
146
|
-
sentence=sentence,
|
147
|
-
choices=choices,
|
148
|
-
instruction=instruction,
|
149
|
-
confidence_score=confidence_score,
|
150
|
-
reason=reason,
|
151
|
-
)
|
152
|
-
|
153
|
-
await func_call.rcall(
|
154
|
-
branch.chat,
|
155
|
-
prompt_template=_template,
|
156
|
-
retries=retries,
|
157
|
-
delay=delay,
|
158
|
-
backoff_factor=backoff_factor,
|
159
|
-
default=default_value,
|
160
|
-
timeout=timeout,
|
161
|
-
**kwargs,
|
162
|
-
)
|
163
|
-
|
164
|
-
ans = _template.answer
|
165
|
-
|
166
|
-
if ans not in _template.choices:
|
167
|
-
_template.answer = StringMatch.choose_most_similar(ans, _template.choices)
|
168
|
-
|
169
|
-
return _template
|
lionagi/core/direct/utils.py
DELETED
@@ -1,87 +0,0 @@
|
|
1
|
-
# import contextlib
|
2
|
-
# from lionagi.libs import ParseUtil, StringMatch, convert, func_call
|
3
|
-
|
4
|
-
|
5
|
-
# def _parse_out(out_):
|
6
|
-
# if isinstance(out_, str):
|
7
|
-
# try:
|
8
|
-
# out_ = ParseUtil.md_to_json(out_)
|
9
|
-
# except Exception:
|
10
|
-
# with contextlib.suppress(Exception):
|
11
|
-
# out_ = ParseUtil.fuzzy_parse_json(out_.strip("```json").strip("```"))
|
12
|
-
# return out_
|
13
|
-
|
14
|
-
|
15
|
-
# def _handle_single_out(
|
16
|
-
# out_,
|
17
|
-
# default_key="answer",
|
18
|
-
# choices=None,
|
19
|
-
# to_type="dict",
|
20
|
-
# to_type_kwargs=None,
|
21
|
-
# to_default=True,
|
22
|
-
# ):
|
23
|
-
|
24
|
-
# if to_type_kwargs is None:
|
25
|
-
# to_type_kwargs = {}
|
26
|
-
# out_ = _parse_out(out_)
|
27
|
-
|
28
|
-
# if default_key not in out_:
|
29
|
-
# raise ValueError(f"Key {default_key} not found in output")
|
30
|
-
|
31
|
-
# answer = out_[default_key]
|
32
|
-
|
33
|
-
# if (
|
34
|
-
# choices is not None
|
35
|
-
# and answer not in choices
|
36
|
-
# and convert.strip_lower(out_) in ["", "none", "null", "na", "n/a"]
|
37
|
-
# ):
|
38
|
-
# raise ValueError(f"Answer {answer} not in choices {choices}")
|
39
|
-
|
40
|
-
# if to_type == "str":
|
41
|
-
# answer = convert.to_str(answer, **to_type_kwargs)
|
42
|
-
|
43
|
-
# elif to_type == "num":
|
44
|
-
# answer = convert.to_num(answer, **to_type_kwargs)
|
45
|
-
|
46
|
-
# out_[default_key] = answer
|
47
|
-
|
48
|
-
# return answer if to_default else out_
|
49
|
-
|
50
|
-
|
51
|
-
# def _handle_multi_out(
|
52
|
-
# out_,
|
53
|
-
# default_key="answer",
|
54
|
-
# choices=None,
|
55
|
-
# to_type="dict",
|
56
|
-
# to_type_kwargs=None,
|
57
|
-
# to_default=True,
|
58
|
-
# include_mapping=False,
|
59
|
-
# ):
|
60
|
-
# if to_type_kwargs is None:
|
61
|
-
# to_type_kwargs = {}
|
62
|
-
|
63
|
-
# if include_mapping:
|
64
|
-
# for i in out_:
|
65
|
-
# i[default_key] = _handle_single_out(
|
66
|
-
# i[default_key],
|
67
|
-
# choices=choices,
|
68
|
-
# default_key=default_key,
|
69
|
-
# to_type=to_type,
|
70
|
-
# to_type_kwargs=to_type_kwargs,
|
71
|
-
# to_default=to_default,
|
72
|
-
# )
|
73
|
-
# else:
|
74
|
-
# _out = []
|
75
|
-
# for i in out_:
|
76
|
-
# i = _handle_single_out(
|
77
|
-
# i,
|
78
|
-
# choices=choices,
|
79
|
-
# default_key=default_key,
|
80
|
-
# to_type=to_type,
|
81
|
-
# to_type_kwargs=to_type_kwargs,
|
82
|
-
# to_default=to_default,
|
83
|
-
# )
|
84
|
-
# _out.append(i)
|
85
|
-
# return _out
|
86
|
-
|
87
|
-
# return out_ if len(out_) > 1 else out_[0]
|