lionagi 0.0.306__py3-none-any.whl → 0.0.308__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +2 -5
- lionagi/core/__init__.py +7 -5
- lionagi/core/agent/__init__.py +3 -0
- lionagi/core/agent/base_agent.py +10 -12
- lionagi/core/branch/__init__.py +4 -0
- lionagi/core/branch/base_branch.py +81 -81
- lionagi/core/branch/branch.py +16 -28
- lionagi/core/branch/branch_flow_mixin.py +3 -7
- lionagi/core/branch/executable_branch.py +86 -56
- lionagi/core/branch/util.py +77 -162
- lionagi/core/{flow/direct → direct}/__init__.py +1 -1
- lionagi/core/{flow/direct/predict.py → direct/parallel_predict.py} +39 -17
- lionagi/core/direct/parallel_react.py +0 -0
- lionagi/core/direct/parallel_score.py +0 -0
- lionagi/core/direct/parallel_select.py +0 -0
- lionagi/core/direct/parallel_sentiment.py +0 -0
- lionagi/core/direct/predict.py +174 -0
- lionagi/core/{flow/direct → direct}/react.py +2 -2
- lionagi/core/{flow/direct → direct}/score.py +28 -23
- lionagi/core/{flow/direct → direct}/select.py +48 -45
- lionagi/core/direct/utils.py +83 -0
- lionagi/core/flow/monoflow/ReAct.py +6 -5
- lionagi/core/flow/monoflow/__init__.py +9 -0
- lionagi/core/flow/monoflow/chat.py +10 -10
- lionagi/core/flow/monoflow/chat_mixin.py +11 -10
- lionagi/core/flow/monoflow/followup.py +6 -5
- lionagi/core/flow/polyflow/__init__.py +1 -0
- lionagi/core/flow/polyflow/chat.py +15 -3
- lionagi/core/mail/mail_manager.py +18 -19
- lionagi/core/mail/schema.py +5 -4
- lionagi/core/messages/schema.py +18 -20
- lionagi/core/prompt/__init__.py +0 -0
- lionagi/core/prompt/prompt_template.py +0 -0
- lionagi/core/schema/__init__.py +2 -2
- lionagi/core/schema/action_node.py +11 -3
- lionagi/core/schema/base_mixin.py +56 -59
- lionagi/core/schema/base_node.py +34 -37
- lionagi/core/schema/condition.py +24 -0
- lionagi/core/schema/data_logger.py +96 -99
- lionagi/core/schema/data_node.py +19 -19
- lionagi/core/schema/prompt_template.py +0 -0
- lionagi/core/schema/structure.py +171 -169
- lionagi/core/session/__init__.py +1 -3
- lionagi/core/session/session.py +196 -214
- lionagi/core/tool/tool_manager.py +95 -103
- lionagi/integrations/__init__.py +1 -3
- lionagi/integrations/bridge/langchain_/documents.py +17 -18
- lionagi/integrations/bridge/langchain_/langchain_bridge.py +14 -14
- lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +22 -22
- lionagi/integrations/bridge/llamaindex_/node_parser.py +12 -12
- lionagi/integrations/bridge/llamaindex_/reader.py +11 -11
- lionagi/integrations/bridge/llamaindex_/textnode.py +7 -7
- lionagi/integrations/config/openrouter_configs.py +0 -1
- lionagi/integrations/provider/oai.py +26 -26
- lionagi/integrations/provider/services.py +38 -38
- lionagi/libs/__init__.py +34 -1
- lionagi/libs/ln_api.py +211 -221
- lionagi/libs/ln_async.py +53 -60
- lionagi/libs/ln_convert.py +118 -120
- lionagi/libs/ln_dataframe.py +32 -33
- lionagi/libs/ln_func_call.py +334 -342
- lionagi/libs/ln_nested.py +99 -107
- lionagi/libs/ln_parse.py +161 -165
- lionagi/libs/sys_util.py +52 -52
- lionagi/tests/test_core/test_session.py +254 -266
- lionagi/tests/test_core/test_session_base_util.py +299 -300
- lionagi/tests/test_core/test_tool_manager.py +70 -74
- lionagi/tests/test_libs/test_nested.py +2 -7
- lionagi/tests/test_libs/test_parse.py +2 -2
- lionagi/version.py +1 -1
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/METADATA +4 -2
- lionagi-0.0.308.dist-info/RECORD +115 -0
- lionagi/core/flow/direct/utils.py +0 -43
- lionagi-0.0.306.dist-info/RECORD +0 -106
- /lionagi/core/{flow/direct → direct}/sentiment.py +0 -0
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/LICENSE +0 -0
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/WHEEL +0 -0
- {lionagi-0.0.306.dist-info → lionagi-0.0.308.dist-info}/top_level.txt +0 -0
@@ -1,19 +1,22 @@
|
|
1
|
-
from lionagi.libs import
|
2
|
-
from
|
3
|
-
from
|
1
|
+
from lionagi.libs import func_call
|
2
|
+
from ..branch import Branch
|
3
|
+
from ..session import Session
|
4
|
+
from .utils import _handle_single_out, _handle_multi_out
|
4
5
|
|
5
6
|
|
6
|
-
async def
|
7
|
+
async def parallel_predict(
|
7
8
|
sentence,
|
8
9
|
*,
|
9
10
|
num_sentences=1,
|
10
11
|
default_key="answer",
|
11
12
|
confidence_score=False,
|
12
13
|
reason=False,
|
13
|
-
retry_kwargs=
|
14
|
+
retry_kwargs=None,
|
14
15
|
**kwargs,
|
15
16
|
):
|
16
|
-
|
17
|
+
if retry_kwargs is None:
|
18
|
+
retry_kwargs = {}
|
19
|
+
return await _force_parallel_predict(
|
17
20
|
sentence,
|
18
21
|
num_sentences,
|
19
22
|
default_key,
|
@@ -24,19 +27,26 @@ async def predict(
|
|
24
27
|
)
|
25
28
|
|
26
29
|
|
27
|
-
async def
|
30
|
+
async def _force_parallel_predict(
|
28
31
|
sentence,
|
29
32
|
num_sentences,
|
30
33
|
default_key="answer",
|
31
34
|
confidence_score=False,
|
32
35
|
reason=False,
|
33
36
|
retry_kwargs={},
|
37
|
+
include_mapping=False,
|
34
38
|
**kwargs,
|
35
39
|
):
|
36
40
|
|
37
41
|
async def _inner():
|
38
|
-
out_ = await
|
39
|
-
sentence,
|
42
|
+
out_ = await _parallel_predict(
|
43
|
+
sentence=sentence,
|
44
|
+
num_sentences=num_sentences,
|
45
|
+
default_key=default_key,
|
46
|
+
confidence_score=confidence_score,
|
47
|
+
reason=reason,
|
48
|
+
include_mapping=include_mapping,
|
49
|
+
**kwargs,
|
40
50
|
)
|
41
51
|
if out_ is None:
|
42
52
|
raise ValueError("No output from the model")
|
@@ -81,25 +91,37 @@ def _create_predict_config(
|
|
81
91
|
return instruct, output_fields, kwargs
|
82
92
|
|
83
93
|
|
84
|
-
async def
|
94
|
+
async def _parallel_predict(
|
85
95
|
sentence,
|
86
96
|
num_sentences,
|
87
97
|
default_key="answer",
|
88
98
|
confidence_score=False,
|
89
99
|
reason=False,
|
100
|
+
include_mapping=False,
|
90
101
|
**kwargs,
|
91
102
|
):
|
92
103
|
_instruct, _output_fields, _kwargs = _create_predict_config(
|
93
|
-
num_sentences=num_sentences,
|
94
|
-
|
104
|
+
num_sentences=num_sentences,
|
105
|
+
default_key=default_key,
|
106
|
+
confidence_score=confidence_score,
|
107
|
+
reason=reason,
|
108
|
+
**kwargs,
|
95
109
|
)
|
96
110
|
|
97
|
-
|
111
|
+
session = Session()
|
98
112
|
|
99
|
-
out_ = await
|
100
|
-
_instruct,
|
113
|
+
out_ = await session.parallel_chat(
|
114
|
+
_instruct,
|
115
|
+
context=sentence,
|
116
|
+
output_fields=_output_fields,
|
117
|
+
include_mapping=include_mapping,
|
118
|
+
**_kwargs,
|
101
119
|
)
|
102
120
|
|
103
|
-
return
|
104
|
-
out_,
|
121
|
+
return _handle_multi_out(
|
122
|
+
out_,
|
123
|
+
default_key=default_key,
|
124
|
+
to_type="str",
|
125
|
+
to_default=True,
|
126
|
+
include_mapping=include_mapping,
|
105
127
|
)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
@@ -0,0 +1,174 @@
|
|
1
|
+
from lionagi.libs import func_call
|
2
|
+
from ..branch import Branch
|
3
|
+
from ..session import Session
|
4
|
+
from .utils import _handle_single_out, _handle_multi_out
|
5
|
+
|
6
|
+
|
7
|
+
async def predict(
|
8
|
+
sentence,
|
9
|
+
*,
|
10
|
+
num_sentences=1,
|
11
|
+
default_key="answer",
|
12
|
+
confidence_score=False,
|
13
|
+
reason=False,
|
14
|
+
retry_kwargs=None,
|
15
|
+
include_mapping=False,
|
16
|
+
**kwargs,
|
17
|
+
):
|
18
|
+
if retry_kwargs is None:
|
19
|
+
retry_kwargs = {}
|
20
|
+
return await _force_predict(
|
21
|
+
sentence=sentence,
|
22
|
+
num_sentences=num_sentences,
|
23
|
+
default_key=default_key,
|
24
|
+
confidence_score=confidence_score,
|
25
|
+
reason=reason,
|
26
|
+
retry_kwargs=retry_kwargs,
|
27
|
+
include_mapping=include_mapping,
|
28
|
+
**kwargs,
|
29
|
+
)
|
30
|
+
|
31
|
+
|
32
|
+
async def _force_predict(
|
33
|
+
sentence,
|
34
|
+
num_sentences,
|
35
|
+
default_key="answer",
|
36
|
+
confidence_score=False,
|
37
|
+
reason=False,
|
38
|
+
retry_kwargs={},
|
39
|
+
include_mapping=False,
|
40
|
+
**kwargs,
|
41
|
+
):
|
42
|
+
|
43
|
+
async def _inner1():
|
44
|
+
out_ = await _predict(
|
45
|
+
sentence=sentence,
|
46
|
+
num_sentences=num_sentences,
|
47
|
+
default_key=default_key,
|
48
|
+
confidence_score=confidence_score,
|
49
|
+
reason=reason,
|
50
|
+
**kwargs,
|
51
|
+
)
|
52
|
+
if out_ is None:
|
53
|
+
raise ValueError("No output from the model")
|
54
|
+
|
55
|
+
return out_
|
56
|
+
|
57
|
+
async def _inner2():
|
58
|
+
out_ = await _parallel_predict(
|
59
|
+
sentence=sentence,
|
60
|
+
num_sentences=num_sentences,
|
61
|
+
default_key=default_key,
|
62
|
+
confidence_score=confidence_score,
|
63
|
+
reason=reason,
|
64
|
+
include_mapping=include_mapping,
|
65
|
+
**kwargs,
|
66
|
+
)
|
67
|
+
|
68
|
+
if out_ is None:
|
69
|
+
raise ValueError("No output from the model")
|
70
|
+
|
71
|
+
return out_
|
72
|
+
|
73
|
+
if "retries" not in retry_kwargs:
|
74
|
+
retry_kwargs["retries"] = 2
|
75
|
+
|
76
|
+
if "delay" not in retry_kwargs:
|
77
|
+
retry_kwargs["delay"] = 0.5
|
78
|
+
|
79
|
+
if (isinstance(sentence, (list, tuple)) and len(sentence) > 1) or include_mapping:
|
80
|
+
return await func_call.rcall(_inner2, **retry_kwargs)
|
81
|
+
|
82
|
+
return await func_call.rcall(_inner1, **retry_kwargs)
|
83
|
+
|
84
|
+
|
85
|
+
def _create_predict_config(
|
86
|
+
num_sentences,
|
87
|
+
default_key="answer",
|
88
|
+
confidence_score=False,
|
89
|
+
reason=False,
|
90
|
+
**kwargs,
|
91
|
+
):
|
92
|
+
instruct = {
|
93
|
+
"task": f"predict the next {num_sentences} sentence(s)",
|
94
|
+
}
|
95
|
+
extra_fields = kwargs.pop("output_fields", {})
|
96
|
+
|
97
|
+
output_fields = {default_key: "the predicted sentence(s)"}
|
98
|
+
output_fields = {**output_fields, **extra_fields}
|
99
|
+
|
100
|
+
if reason:
|
101
|
+
output_fields["reason"] = "brief reason for the prediction"
|
102
|
+
|
103
|
+
if confidence_score:
|
104
|
+
output_fields["confidence_score"] = (
|
105
|
+
"a numeric score between 0 to 1 formatted in num:0.2f"
|
106
|
+
)
|
107
|
+
|
108
|
+
if "temperature" not in kwargs:
|
109
|
+
kwargs["temperature"] = 0.1
|
110
|
+
|
111
|
+
return instruct, output_fields, kwargs
|
112
|
+
|
113
|
+
|
114
|
+
async def _predict(
|
115
|
+
sentence,
|
116
|
+
num_sentences,
|
117
|
+
default_key="answer",
|
118
|
+
confidence_score=False,
|
119
|
+
reason=False,
|
120
|
+
**kwargs,
|
121
|
+
):
|
122
|
+
_instruct, _output_fields, _kwargs = _create_predict_config(
|
123
|
+
num_sentences=num_sentences,
|
124
|
+
default_key=default_key,
|
125
|
+
confidence_score=confidence_score,
|
126
|
+
reason=reason,
|
127
|
+
**kwargs,
|
128
|
+
)
|
129
|
+
|
130
|
+
branch = Branch()
|
131
|
+
|
132
|
+
out_ = await branch.chat(
|
133
|
+
_instruct, context=sentence, output_fields=_output_fields, **_kwargs
|
134
|
+
)
|
135
|
+
|
136
|
+
return _handle_single_out(
|
137
|
+
out_, default_key=default_key, to_type="str", to_default=True
|
138
|
+
)
|
139
|
+
|
140
|
+
|
141
|
+
async def _parallel_predict(
|
142
|
+
sentence,
|
143
|
+
num_sentences,
|
144
|
+
default_key="answer",
|
145
|
+
confidence_score=False,
|
146
|
+
reason=False,
|
147
|
+
include_mapping=False,
|
148
|
+
**kwargs,
|
149
|
+
):
|
150
|
+
_instruct, _output_fields, _kwargs = _create_predict_config(
|
151
|
+
num_sentences=num_sentences,
|
152
|
+
default_key=default_key,
|
153
|
+
confidence_score=confidence_score,
|
154
|
+
reason=reason,
|
155
|
+
**kwargs,
|
156
|
+
)
|
157
|
+
|
158
|
+
session = Session()
|
159
|
+
|
160
|
+
out_ = await session.parallel_chat(
|
161
|
+
_instruct,
|
162
|
+
context=sentence,
|
163
|
+
output_fields=_output_fields,
|
164
|
+
include_mapping=include_mapping,
|
165
|
+
**_kwargs,
|
166
|
+
)
|
167
|
+
|
168
|
+
return _handle_multi_out(
|
169
|
+
out_,
|
170
|
+
default_key=default_key,
|
171
|
+
to_type="str",
|
172
|
+
to_default=True,
|
173
|
+
include_mapping=include_mapping,
|
174
|
+
)
|
@@ -1,7 +1,5 @@
|
|
1
|
-
from lionagi.
|
2
|
-
from
|
3
|
-
import lionagi.libs.ln_convert as convert
|
4
|
-
|
1
|
+
from lionagi.libs import func_call, convert
|
2
|
+
from ..branch import Branch
|
5
3
|
from .utils import _handle_single_out
|
6
4
|
|
7
5
|
|
@@ -16,20 +14,22 @@ async def score(
|
|
16
14
|
method="llm",
|
17
15
|
reason=False,
|
18
16
|
confidence_score=False,
|
19
|
-
retry_kwargs=
|
17
|
+
retry_kwargs=None,
|
20
18
|
**kwargs,
|
21
19
|
):
|
20
|
+
if retry_kwargs is None:
|
21
|
+
retry_kwargs = {}
|
22
22
|
return await _force_score(
|
23
|
-
context
|
24
|
-
instruction
|
25
|
-
score_range
|
26
|
-
inclusive
|
27
|
-
num_digit
|
28
|
-
default_key
|
29
|
-
method
|
30
|
-
reason
|
31
|
-
confidence_score
|
32
|
-
retry_kwargs
|
23
|
+
context=context,
|
24
|
+
instruction=instruction,
|
25
|
+
score_range=score_range,
|
26
|
+
inclusive=inclusive,
|
27
|
+
num_digit=num_digit,
|
28
|
+
default_key=default_key,
|
29
|
+
method=method,
|
30
|
+
reason=reason,
|
31
|
+
confidence_score=confidence_score,
|
32
|
+
retry_kwargs=retry_kwargs,
|
33
33
|
**kwargs,
|
34
34
|
)
|
35
35
|
|
@@ -65,7 +65,7 @@ async def _force_score(
|
|
65
65
|
raise ValueError("No output from the model")
|
66
66
|
|
67
67
|
return out_
|
68
|
-
|
68
|
+
|
69
69
|
if "retries" not in retry_kwargs:
|
70
70
|
retry_kwargs["retries"] = 2
|
71
71
|
|
@@ -86,28 +86,28 @@ def _create_score_config(
|
|
86
86
|
**kwargs,
|
87
87
|
):
|
88
88
|
instruct = {
|
89
|
-
"task":
|
89
|
+
"task": "score context according to the following constraints",
|
90
90
|
"instruction": convert.to_str(instruction),
|
91
91
|
"score_range": convert.to_str(score_range),
|
92
92
|
"include_endpoints": "yes" if inclusive else "no",
|
93
93
|
}
|
94
94
|
|
95
|
-
return_precision =
|
95
|
+
return_precision = ""
|
96
96
|
if num_digit == 0:
|
97
97
|
return_precision = "integer"
|
98
98
|
else:
|
99
99
|
return_precision = f"num:{convert.to_str(num_digit)}f"
|
100
|
-
|
100
|
+
|
101
101
|
extra_fields = kwargs.pop("output_fields", {})
|
102
102
|
output_fields = {default_key: f"""a numeric score as {return_precision}"""}
|
103
103
|
output_fields = {**output_fields, **extra_fields}
|
104
104
|
|
105
105
|
if reason:
|
106
|
-
output_fields
|
106
|
+
output_fields["reason"] = "brief reason for the score"
|
107
107
|
|
108
108
|
if confidence_score:
|
109
|
-
output_fields
|
110
|
-
|
109
|
+
output_fields["confidence_score"] = (
|
110
|
+
"a numeric score between 0 to 1 formatted in num:0.2f"
|
111
111
|
)
|
112
112
|
|
113
113
|
if "temperature" not in kwargs:
|
@@ -143,7 +143,12 @@ async def _score(
|
|
143
143
|
out_ = ""
|
144
144
|
|
145
145
|
if method == "llm":
|
146
|
-
out_ = await branch.chat(
|
146
|
+
out_ = await branch.chat(
|
147
|
+
_instruct,
|
148
|
+
tools=None,
|
149
|
+
context=context,
|
150
|
+
output_fields=_output_fields,
|
151
|
+
**_kwargs,
|
147
152
|
)
|
148
153
|
|
149
154
|
to_num_kwargs = {
|
@@ -1,22 +1,23 @@
|
|
1
|
-
from lionagi.libs import
|
2
|
-
from
|
3
|
-
from lionagi.libs.ln_parse import StringMatch
|
4
|
-
|
1
|
+
from lionagi.libs import StringMatch, func_call
|
2
|
+
from ..branch.branch import Branch
|
5
3
|
from .utils import _handle_single_out
|
6
4
|
|
5
|
+
|
7
6
|
async def select(
|
8
|
-
context,
|
9
|
-
choices,
|
7
|
+
context,
|
8
|
+
choices,
|
10
9
|
*,
|
11
10
|
num_choices=1,
|
12
|
-
method=
|
11
|
+
method="llm",
|
13
12
|
objective=None,
|
14
|
-
default_key=
|
15
|
-
reason=False,
|
16
|
-
confidence_score=False,
|
17
|
-
retry_kwargs=
|
13
|
+
default_key="answer",
|
14
|
+
reason=False,
|
15
|
+
confidence_score=False,
|
16
|
+
retry_kwargs=None,
|
18
17
|
**kwargs,
|
19
18
|
):
|
19
|
+
if retry_kwargs is None:
|
20
|
+
retry_kwargs = {}
|
20
21
|
return await _force_select(
|
21
22
|
context=context,
|
22
23
|
choices=choices,
|
@@ -32,19 +33,18 @@ async def select(
|
|
32
33
|
|
33
34
|
|
34
35
|
async def _force_select(
|
35
|
-
context,
|
36
|
-
choices,
|
36
|
+
context,
|
37
|
+
choices,
|
37
38
|
num_choices=1,
|
38
|
-
method=
|
39
|
+
method="llm",
|
39
40
|
objective=None,
|
40
|
-
default_key=
|
41
|
-
reason=False,
|
42
|
-
confidence_score=False,
|
41
|
+
default_key="answer",
|
42
|
+
reason=False,
|
43
|
+
confidence_score=False,
|
43
44
|
retry_kwargs={},
|
44
45
|
**kwargs,
|
45
46
|
):
|
46
|
-
|
47
|
-
|
47
|
+
|
48
48
|
async def _inner():
|
49
49
|
out_ = await _select(
|
50
50
|
context=context,
|
@@ -61,10 +61,9 @@ async def _force_select(
|
|
61
61
|
if out_ is None:
|
62
62
|
raise ValueError("No output from the model")
|
63
63
|
|
64
|
-
if isinstance(out_, dict):
|
65
|
-
|
66
|
-
|
67
|
-
out_[default_key] = v
|
64
|
+
if isinstance(out_, dict) and out_[default_key] not in choices:
|
65
|
+
v = StringMatch.choose_most_similar(out_.pop(default_key, ""), choices)
|
66
|
+
out_[default_key] = v
|
68
67
|
|
69
68
|
return out_
|
70
69
|
|
@@ -78,15 +77,15 @@ async def _force_select(
|
|
78
77
|
|
79
78
|
|
80
79
|
def _create_select_config(
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
80
|
+
choices,
|
81
|
+
num_choices=1,
|
82
|
+
objective=None,
|
83
|
+
default_key="answer",
|
84
|
+
reason=False,
|
85
|
+
confidence_score=False,
|
86
|
+
**kwargs,
|
88
87
|
):
|
89
|
-
|
88
|
+
|
90
89
|
instruct = {"task": f"select {num_choices} from provided", "choices": choices}
|
91
90
|
if objective is not None:
|
92
91
|
instruct["objective"] = objective
|
@@ -96,31 +95,31 @@ def _create_select_config(
|
|
96
95
|
output_fields = {**output_fields, **extra_fields}
|
97
96
|
|
98
97
|
if reason:
|
99
|
-
output_fields
|
98
|
+
output_fields["reason"] = "brief reason for the selection"
|
100
99
|
|
101
100
|
if confidence_score:
|
102
|
-
output_fields
|
103
|
-
|
101
|
+
output_fields["confidence_score"] = (
|
102
|
+
"a numeric score between 0 to 1 formatted in num:0.2f"
|
104
103
|
)
|
105
104
|
|
106
105
|
if "temperature" not in kwargs:
|
107
106
|
kwargs["temperature"] = 0.1
|
108
|
-
|
107
|
+
|
109
108
|
return instruct, output_fields, kwargs
|
110
109
|
|
111
110
|
|
112
111
|
async def _select(
|
113
|
-
context,
|
114
|
-
choices,
|
112
|
+
context,
|
113
|
+
choices,
|
115
114
|
num_choices=1,
|
116
|
-
method=
|
115
|
+
method="llm",
|
117
116
|
objective=None,
|
118
|
-
default_key=
|
119
|
-
reason=False,
|
120
|
-
confidence_score=False,
|
117
|
+
default_key="answer",
|
118
|
+
reason=False,
|
119
|
+
confidence_score=False,
|
121
120
|
**kwargs,
|
122
121
|
):
|
123
|
-
|
122
|
+
|
124
123
|
_instruct, _output_fields, _kwargs = _create_select_config(
|
125
124
|
choices=choices,
|
126
125
|
num_choices=num_choices,
|
@@ -130,12 +129,16 @@ async def _select(
|
|
130
129
|
confidence_score=confidence_score,
|
131
130
|
**kwargs,
|
132
131
|
)
|
133
|
-
|
132
|
+
|
134
133
|
branch = Branch()
|
135
134
|
out_ = ""
|
136
135
|
if method == "llm":
|
137
136
|
out_ = await branch.chat(
|
138
|
-
_instruct,
|
137
|
+
_instruct,
|
138
|
+
tools=None,
|
139
|
+
context=context,
|
140
|
+
output_fields=_output_fields,
|
141
|
+
**_kwargs,
|
139
142
|
)
|
140
|
-
|
143
|
+
|
141
144
|
return _handle_single_out(out_, default_key)
|
@@ -0,0 +1,83 @@
|
|
1
|
+
import contextlib
|
2
|
+
from lionagi.libs import ParseUtil, StringMatch, convert, func_call
|
3
|
+
|
4
|
+
|
5
|
+
def _parse_out(out_):
|
6
|
+
if isinstance(out_, str):
|
7
|
+
try:
|
8
|
+
out_ = ParseUtil.md_to_json(out_)
|
9
|
+
except Exception:
|
10
|
+
with contextlib.suppress(Exception):
|
11
|
+
out_ = ParseUtil.fuzzy_parse_json(out_.strip("```json").strip("```"))
|
12
|
+
return out_
|
13
|
+
|
14
|
+
|
15
|
+
def _handle_single_out(
|
16
|
+
out_,
|
17
|
+
default_key,
|
18
|
+
choices=None,
|
19
|
+
to_type="dict",
|
20
|
+
to_type_kwargs=None,
|
21
|
+
to_default=True,
|
22
|
+
):
|
23
|
+
|
24
|
+
if to_type_kwargs is None:
|
25
|
+
to_type_kwargs = {}
|
26
|
+
out_ = _parse_out(out_)
|
27
|
+
|
28
|
+
if default_key not in out_:
|
29
|
+
raise ValueError(f"Key {default_key} not found in output")
|
30
|
+
|
31
|
+
answer = out_[default_key]
|
32
|
+
|
33
|
+
if (
|
34
|
+
choices is not None
|
35
|
+
and answer not in choices
|
36
|
+
and convert.strip_lower(out_) in ["", "none", "null", "na", "n/a"]
|
37
|
+
):
|
38
|
+
raise ValueError(f"Answer {answer} not in choices {choices}")
|
39
|
+
|
40
|
+
if to_type == "str":
|
41
|
+
out_[default_key] = convert.to_str(answer, **to_type_kwargs)
|
42
|
+
|
43
|
+
elif to_type == "num":
|
44
|
+
out_[default_key] = convert.to_num(answer, **to_type_kwargs)
|
45
|
+
|
46
|
+
return out_[default_key] if to_default and len(out_.keys()) == 1 else out_
|
47
|
+
|
48
|
+
|
49
|
+
def _handle_multi_out(
|
50
|
+
out_,
|
51
|
+
default_key,
|
52
|
+
choices=None,
|
53
|
+
to_type="dict",
|
54
|
+
to_type_kwargs=None,
|
55
|
+
to_default=True,
|
56
|
+
include_mapping=False,
|
57
|
+
):
|
58
|
+
if to_type_kwargs is None:
|
59
|
+
to_type_kwargs = {}
|
60
|
+
if include_mapping:
|
61
|
+
for i in out_:
|
62
|
+
i[default_key] = _handle_single_out(
|
63
|
+
i[default_key],
|
64
|
+
choices=choices,
|
65
|
+
default_key=default_key,
|
66
|
+
to_type=to_type,
|
67
|
+
to_type_kwargs=to_type_kwargs,
|
68
|
+
to_default=to_default,
|
69
|
+
)
|
70
|
+
else:
|
71
|
+
_out = []
|
72
|
+
for i in out_:
|
73
|
+
i = _handle_single_out(
|
74
|
+
i,
|
75
|
+
choices=choices,
|
76
|
+
default_key=default_key,
|
77
|
+
to_type=to_type,
|
78
|
+
to_type_kwargs=to_type_kwargs,
|
79
|
+
to_default=to_default,
|
80
|
+
)
|
81
|
+
_out.append(i)
|
82
|
+
|
83
|
+
return out_ if len(out_) > 1 else out_[0]
|
@@ -42,16 +42,17 @@ class MonoReAct(MonoChat):
|
|
42
42
|
try:
|
43
43
|
try:
|
44
44
|
return default.format(num_steps=num_steps)
|
45
|
-
except:
|
45
|
+
except Exception:
|
46
46
|
return default.format(instruction=instruction)
|
47
|
-
except:
|
47
|
+
except Exception:
|
48
48
|
return default
|
49
49
|
|
50
50
|
def _create_followup_config(self, tools, auto=True, **kwargs):
|
51
51
|
|
52
|
-
if tools is not None
|
53
|
-
|
54
|
-
|
52
|
+
if tools is not None and (
|
53
|
+
isinstance(tools, list) and isinstance(tools[0], Tool)
|
54
|
+
):
|
55
|
+
self.branch.tool_manager.register_tools(tools)
|
55
56
|
|
56
57
|
if not self.branch.tool_manager.has_tools:
|
57
58
|
raise ValueError("No tools found, You need to register tools")
|