lionagi 0.0.305__py3-none-any.whl → 0.0.307__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (84) hide show
  1. lionagi/__init__.py +2 -5
  2. lionagi/core/__init__.py +7 -4
  3. lionagi/core/agent/__init__.py +3 -0
  4. lionagi/core/agent/base_agent.py +46 -0
  5. lionagi/core/branch/__init__.py +4 -0
  6. lionagi/core/branch/base/__init__.py +0 -0
  7. lionagi/core/branch/base_branch.py +100 -78
  8. lionagi/core/branch/branch.py +22 -34
  9. lionagi/core/branch/branch_flow_mixin.py +3 -7
  10. lionagi/core/branch/executable_branch.py +192 -0
  11. lionagi/core/branch/util.py +77 -162
  12. lionagi/core/direct/__init__.py +13 -0
  13. lionagi/core/direct/parallel_predict.py +127 -0
  14. lionagi/core/direct/parallel_react.py +0 -0
  15. lionagi/core/direct/parallel_score.py +0 -0
  16. lionagi/core/direct/parallel_select.py +0 -0
  17. lionagi/core/direct/parallel_sentiment.py +0 -0
  18. lionagi/core/direct/predict.py +174 -0
  19. lionagi/core/direct/react.py +33 -0
  20. lionagi/core/direct/score.py +163 -0
  21. lionagi/core/direct/select.py +144 -0
  22. lionagi/core/direct/sentiment.py +51 -0
  23. lionagi/core/direct/utils.py +83 -0
  24. lionagi/core/flow/__init__.py +0 -3
  25. lionagi/core/flow/monoflow/{mono_react.py → ReAct.py} +52 -9
  26. lionagi/core/flow/monoflow/__init__.py +9 -0
  27. lionagi/core/flow/monoflow/{mono_chat.py → chat.py} +11 -11
  28. lionagi/core/flow/monoflow/{mono_chat_mixin.py → chat_mixin.py} +33 -27
  29. lionagi/core/flow/monoflow/{mono_followup.py → followup.py} +7 -6
  30. lionagi/core/flow/polyflow/__init__.py +1 -0
  31. lionagi/core/flow/polyflow/{polychat.py → chat.py} +15 -3
  32. lionagi/core/mail/__init__.py +8 -0
  33. lionagi/core/mail/mail_manager.py +88 -40
  34. lionagi/core/mail/schema.py +32 -6
  35. lionagi/core/messages/__init__.py +3 -0
  36. lionagi/core/messages/schema.py +56 -25
  37. lionagi/core/prompt/__init__.py +0 -0
  38. lionagi/core/prompt/prompt_template.py +0 -0
  39. lionagi/core/schema/__init__.py +7 -5
  40. lionagi/core/schema/action_node.py +29 -0
  41. lionagi/core/schema/base_mixin.py +56 -59
  42. lionagi/core/schema/base_node.py +35 -38
  43. lionagi/core/schema/condition.py +24 -0
  44. lionagi/core/schema/data_logger.py +98 -98
  45. lionagi/core/schema/data_node.py +19 -19
  46. lionagi/core/schema/prompt_template.py +0 -0
  47. lionagi/core/schema/structure.py +293 -190
  48. lionagi/core/session/__init__.py +1 -3
  49. lionagi/core/session/session.py +196 -214
  50. lionagi/core/tool/tool_manager.py +95 -103
  51. lionagi/integrations/__init__.py +1 -3
  52. lionagi/integrations/bridge/langchain_/documents.py +17 -18
  53. lionagi/integrations/bridge/langchain_/langchain_bridge.py +14 -14
  54. lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +22 -22
  55. lionagi/integrations/bridge/llamaindex_/node_parser.py +12 -12
  56. lionagi/integrations/bridge/llamaindex_/reader.py +11 -11
  57. lionagi/integrations/bridge/llamaindex_/textnode.py +7 -7
  58. lionagi/integrations/config/openrouter_configs.py +0 -1
  59. lionagi/integrations/provider/oai.py +26 -26
  60. lionagi/integrations/provider/services.py +38 -38
  61. lionagi/libs/__init__.py +34 -1
  62. lionagi/libs/ln_api.py +211 -221
  63. lionagi/libs/ln_async.py +53 -60
  64. lionagi/libs/ln_convert.py +118 -120
  65. lionagi/libs/ln_dataframe.py +32 -33
  66. lionagi/libs/ln_func_call.py +334 -342
  67. lionagi/libs/ln_nested.py +99 -107
  68. lionagi/libs/ln_parse.py +175 -158
  69. lionagi/libs/sys_util.py +52 -52
  70. lionagi/tests/test_core/test_base_branch.py +427 -427
  71. lionagi/tests/test_core/test_branch.py +292 -292
  72. lionagi/tests/test_core/test_mail_manager.py +57 -57
  73. lionagi/tests/test_core/test_session.py +254 -266
  74. lionagi/tests/test_core/test_session_base_util.py +299 -300
  75. lionagi/tests/test_core/test_tool_manager.py +70 -74
  76. lionagi/tests/test_libs/test_nested.py +2 -7
  77. lionagi/tests/test_libs/test_parse.py +2 -2
  78. lionagi/version.py +1 -1
  79. {lionagi-0.0.305.dist-info → lionagi-0.0.307.dist-info}/METADATA +4 -2
  80. lionagi-0.0.307.dist-info/RECORD +115 -0
  81. lionagi-0.0.305.dist-info/RECORD +0 -94
  82. {lionagi-0.0.305.dist-info → lionagi-0.0.307.dist-info}/LICENSE +0 -0
  83. {lionagi-0.0.305.dist-info → lionagi-0.0.307.dist-info}/WHEEL +0 -0
  84. {lionagi-0.0.305.dist-info → lionagi-0.0.307.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,127 @@
1
+ from lionagi.libs import func_call
2
+ from ..branch import Branch
3
+ from ..session import Session
4
+ from .utils import _handle_single_out, _handle_multi_out
5
+
6
+
7
+ async def parallel_predict(
8
+ sentence,
9
+ *,
10
+ num_sentences=1,
11
+ default_key="answer",
12
+ confidence_score=False,
13
+ reason=False,
14
+ retry_kwargs=None,
15
+ **kwargs,
16
+ ):
17
+ if retry_kwargs is None:
18
+ retry_kwargs = {}
19
+ return await _force_parallel_predict(
20
+ sentence,
21
+ num_sentences,
22
+ default_key,
23
+ confidence_score,
24
+ reason,
25
+ retry_kwargs,
26
+ **kwargs,
27
+ )
28
+
29
+
30
+ async def _force_parallel_predict(
31
+ sentence,
32
+ num_sentences,
33
+ default_key="answer",
34
+ confidence_score=False,
35
+ reason=False,
36
+ retry_kwargs={},
37
+ include_mapping=False,
38
+ **kwargs,
39
+ ):
40
+
41
+ async def _inner():
42
+ out_ = await _parallel_predict(
43
+ sentence=sentence,
44
+ num_sentences=num_sentences,
45
+ default_key=default_key,
46
+ confidence_score=confidence_score,
47
+ reason=reason,
48
+ include_mapping=include_mapping,
49
+ **kwargs,
50
+ )
51
+ if out_ is None:
52
+ raise ValueError("No output from the model")
53
+
54
+ return out_
55
+
56
+ if "retries" not in retry_kwargs:
57
+ retry_kwargs["retries"] = 2
58
+
59
+ if "delay" not in retry_kwargs:
60
+ retry_kwargs["delay"] = 0.5
61
+
62
+ return await func_call.rcall(_inner, **retry_kwargs)
63
+
64
+
65
+ def _create_predict_config(
66
+ num_sentences,
67
+ default_key="answer",
68
+ confidence_score=False,
69
+ reason=False,
70
+ **kwargs,
71
+ ):
72
+ instruct = {
73
+ "task": f"predict the next {num_sentences} sentence(s)",
74
+ }
75
+ extra_fields = kwargs.pop("output_fields", {})
76
+
77
+ output_fields = {default_key: "the predicted sentence(s)"}
78
+ output_fields = {**output_fields, **extra_fields}
79
+
80
+ if reason:
81
+ output_fields.update({"reason": "brief reason for the prediction"})
82
+
83
+ if confidence_score:
84
+ output_fields.update(
85
+ {"confidence_score": "a numeric score between 0 to 1 formatted in num:0.2f"}
86
+ )
87
+
88
+ if "temperature" not in kwargs:
89
+ kwargs["temperature"] = 0.1
90
+
91
+ return instruct, output_fields, kwargs
92
+
93
+
94
+ async def _parallel_predict(
95
+ sentence,
96
+ num_sentences,
97
+ default_key="answer",
98
+ confidence_score=False,
99
+ reason=False,
100
+ include_mapping=False,
101
+ **kwargs,
102
+ ):
103
+ _instruct, _output_fields, _kwargs = _create_predict_config(
104
+ num_sentences=num_sentences,
105
+ default_key=default_key,
106
+ confidence_score=confidence_score,
107
+ reason=reason,
108
+ **kwargs,
109
+ )
110
+
111
+ session = Session()
112
+
113
+ out_ = await session.parallel_chat(
114
+ _instruct,
115
+ context=sentence,
116
+ output_fields=_output_fields,
117
+ include_mapping=include_mapping,
118
+ **_kwargs,
119
+ )
120
+
121
+ return _handle_multi_out(
122
+ out_,
123
+ default_key=default_key,
124
+ to_type="str",
125
+ to_default=True,
126
+ include_mapping=include_mapping,
127
+ )
File without changes
File without changes
File without changes
File without changes
@@ -0,0 +1,174 @@
1
+ from lionagi.libs import func_call
2
+ from ..branch import Branch
3
+ from ..session import Session
4
+ from .utils import _handle_single_out, _handle_multi_out
5
+
6
+
7
+ async def predict(
8
+ sentence,
9
+ *,
10
+ num_sentences=1,
11
+ default_key="answer",
12
+ confidence_score=False,
13
+ reason=False,
14
+ retry_kwargs=None,
15
+ include_mapping=False,
16
+ **kwargs,
17
+ ):
18
+ if retry_kwargs is None:
19
+ retry_kwargs = {}
20
+ return await _force_predict(
21
+ sentence=sentence,
22
+ num_sentences=num_sentences,
23
+ default_key=default_key,
24
+ confidence_score=confidence_score,
25
+ reason=reason,
26
+ retry_kwargs=retry_kwargs,
27
+ include_mapping=include_mapping,
28
+ **kwargs,
29
+ )
30
+
31
+
32
+ async def _force_predict(
33
+ sentence,
34
+ num_sentences,
35
+ default_key="answer",
36
+ confidence_score=False,
37
+ reason=False,
38
+ retry_kwargs={},
39
+ include_mapping=False,
40
+ **kwargs,
41
+ ):
42
+
43
+ async def _inner1():
44
+ out_ = await _predict(
45
+ sentence=sentence,
46
+ num_sentences=num_sentences,
47
+ default_key=default_key,
48
+ confidence_score=confidence_score,
49
+ reason=reason,
50
+ **kwargs,
51
+ )
52
+ if out_ is None:
53
+ raise ValueError("No output from the model")
54
+
55
+ return out_
56
+
57
+ async def _inner2():
58
+ out_ = await _parallel_predict(
59
+ sentence=sentence,
60
+ num_sentences=num_sentences,
61
+ default_key=default_key,
62
+ confidence_score=confidence_score,
63
+ reason=reason,
64
+ include_mapping=include_mapping,
65
+ **kwargs,
66
+ )
67
+
68
+ if out_ is None:
69
+ raise ValueError("No output from the model")
70
+
71
+ return out_
72
+
73
+ if "retries" not in retry_kwargs:
74
+ retry_kwargs["retries"] = 2
75
+
76
+ if "delay" not in retry_kwargs:
77
+ retry_kwargs["delay"] = 0.5
78
+
79
+ if (isinstance(sentence, (list, tuple)) and len(sentence) > 1) or include_mapping:
80
+ return await func_call.rcall(_inner2, **retry_kwargs)
81
+
82
+ return await func_call.rcall(_inner1, **retry_kwargs)
83
+
84
+
85
+ def _create_predict_config(
86
+ num_sentences,
87
+ default_key="answer",
88
+ confidence_score=False,
89
+ reason=False,
90
+ **kwargs,
91
+ ):
92
+ instruct = {
93
+ "task": f"predict the next {num_sentences} sentence(s)",
94
+ }
95
+ extra_fields = kwargs.pop("output_fields", {})
96
+
97
+ output_fields = {default_key: "the predicted sentence(s)"}
98
+ output_fields = {**output_fields, **extra_fields}
99
+
100
+ if reason:
101
+ output_fields["reason"] = "brief reason for the prediction"
102
+
103
+ if confidence_score:
104
+ output_fields["confidence_score"] = (
105
+ "a numeric score between 0 to 1 formatted in num:0.2f"
106
+ )
107
+
108
+ if "temperature" not in kwargs:
109
+ kwargs["temperature"] = 0.1
110
+
111
+ return instruct, output_fields, kwargs
112
+
113
+
114
+ async def _predict(
115
+ sentence,
116
+ num_sentences,
117
+ default_key="answer",
118
+ confidence_score=False,
119
+ reason=False,
120
+ **kwargs,
121
+ ):
122
+ _instruct, _output_fields, _kwargs = _create_predict_config(
123
+ num_sentences=num_sentences,
124
+ default_key=default_key,
125
+ confidence_score=confidence_score,
126
+ reason=reason,
127
+ **kwargs,
128
+ )
129
+
130
+ branch = Branch()
131
+
132
+ out_ = await branch.chat(
133
+ _instruct, context=sentence, output_fields=_output_fields, **_kwargs
134
+ )
135
+
136
+ return _handle_single_out(
137
+ out_, default_key=default_key, to_type="str", to_default=True
138
+ )
139
+
140
+
141
+ async def _parallel_predict(
142
+ sentence,
143
+ num_sentences,
144
+ default_key="answer",
145
+ confidence_score=False,
146
+ reason=False,
147
+ include_mapping=False,
148
+ **kwargs,
149
+ ):
150
+ _instruct, _output_fields, _kwargs = _create_predict_config(
151
+ num_sentences=num_sentences,
152
+ default_key=default_key,
153
+ confidence_score=confidence_score,
154
+ reason=reason,
155
+ **kwargs,
156
+ )
157
+
158
+ session = Session()
159
+
160
+ out_ = await session.parallel_chat(
161
+ _instruct,
162
+ context=sentence,
163
+ output_fields=_output_fields,
164
+ include_mapping=include_mapping,
165
+ **_kwargs,
166
+ )
167
+
168
+ return _handle_multi_out(
169
+ out_,
170
+ default_key=default_key,
171
+ to_type="str",
172
+ to_default=True,
173
+ include_mapping=include_mapping,
174
+ )
@@ -0,0 +1,33 @@
1
+ from ..branch import Branch
2
+ from ..flow.monoflow import MonoReAct
3
+
4
+
5
+ async def react(
6
+ instruction=None,
7
+ system=None,
8
+ context=None,
9
+ output_fields=None,
10
+ tools=None,
11
+ reason_prompt=None,
12
+ action_prompt=None,
13
+ output_prompt=None,
14
+ **kwargs,
15
+ ):
16
+ branch = Branch(system=system, tools=tools)
17
+ flow = MonoReAct(branch)
18
+
19
+ out = await flow._react(
20
+ instruction=instruction,
21
+ context=context,
22
+ output_fields=output_fields,
23
+ reason_prompt=reason_prompt,
24
+ action_prompt=action_prompt,
25
+ **kwargs,
26
+ )
27
+
28
+ output_prompt = output_prompt or "integrate everything, present final output"
29
+ output_fields_ = {"answer": "..."}
30
+ out1 = await flow.chat(output_prompt, output_fields=output_fields_)
31
+
32
+ out["answer"] = out1["answer"]
33
+ return out
@@ -0,0 +1,163 @@
1
+ from lionagi.libs import func_call, convert
2
+ from ..branch import Branch
3
+ from .utils import _handle_single_out
4
+
5
+
6
+ async def score(
7
+ context,
8
+ instruction=None,
9
+ *,
10
+ score_range=(1, 10),
11
+ inclusive=True,
12
+ num_digit=0,
13
+ default_key="score",
14
+ method="llm",
15
+ reason=False,
16
+ confidence_score=False,
17
+ retry_kwargs=None,
18
+ **kwargs,
19
+ ):
20
+ if retry_kwargs is None:
21
+ retry_kwargs = {}
22
+ return await _force_score(
23
+ context=context,
24
+ instruction=instruction,
25
+ score_range=score_range,
26
+ inclusive=inclusive,
27
+ num_digit=num_digit,
28
+ default_key=default_key,
29
+ method=method,
30
+ reason=reason,
31
+ confidence_score=confidence_score,
32
+ retry_kwargs=retry_kwargs,
33
+ **kwargs,
34
+ )
35
+
36
+
37
+ async def _force_score(
38
+ context,
39
+ instruction=None,
40
+ score_range=(1, 10),
41
+ inclusive=True,
42
+ num_digit=1,
43
+ default_key="score",
44
+ method="llm",
45
+ reason=False,
46
+ confidence_score=False,
47
+ retry_kwargs={},
48
+ **kwargs,
49
+ ):
50
+
51
+ async def _inner():
52
+ out_ = await _score(
53
+ instruction=instruction,
54
+ context=context,
55
+ score_range=score_range,
56
+ inclusive=inclusive,
57
+ num_digit=num_digit,
58
+ reason=reason,
59
+ default_key=default_key,
60
+ confidence_score=confidence_score,
61
+ method=method,
62
+ **kwargs,
63
+ )
64
+ if out_ is None:
65
+ raise ValueError("No output from the model")
66
+
67
+ return out_
68
+
69
+ if "retries" not in retry_kwargs:
70
+ retry_kwargs["retries"] = 2
71
+
72
+ if "delay" not in retry_kwargs:
73
+ retry_kwargs["delay"] = 0.5
74
+
75
+ return await func_call.rcall(_inner, **retry_kwargs)
76
+
77
+
78
+ def _create_score_config(
79
+ instruction,
80
+ score_range=(1, 10),
81
+ inclusive=True,
82
+ num_digit=0,
83
+ reason=False,
84
+ default_key="score",
85
+ confidence_score=False,
86
+ **kwargs,
87
+ ):
88
+ instruct = {
89
+ "task": "score context according to the following constraints",
90
+ "instruction": convert.to_str(instruction),
91
+ "score_range": convert.to_str(score_range),
92
+ "include_endpoints": "yes" if inclusive else "no",
93
+ }
94
+
95
+ return_precision = ""
96
+ if num_digit == 0:
97
+ return_precision = "integer"
98
+ else:
99
+ return_precision = f"num:{convert.to_str(num_digit)}f"
100
+
101
+ extra_fields = kwargs.pop("output_fields", {})
102
+ output_fields = {default_key: f"""a numeric score as {return_precision}"""}
103
+ output_fields = {**output_fields, **extra_fields}
104
+
105
+ if reason:
106
+ output_fields["reason"] = "brief reason for the score"
107
+
108
+ if confidence_score:
109
+ output_fields["confidence_score"] = (
110
+ "a numeric score between 0 to 1 formatted in num:0.2f"
111
+ )
112
+
113
+ if "temperature" not in kwargs:
114
+ kwargs["temperature"] = 0.1
115
+
116
+ return instruct, output_fields, kwargs
117
+
118
+
119
+ async def _score(
120
+ context,
121
+ instruction=None,
122
+ score_range=(1, 10),
123
+ inclusive=True,
124
+ num_digit=0,
125
+ default_key="score",
126
+ method="llm",
127
+ reason=False,
128
+ confidence_score=False,
129
+ **kwargs,
130
+ ):
131
+ _instruct, _output_fields, _kwargs = _create_score_config(
132
+ instruction=instruction,
133
+ score_range=score_range,
134
+ inclusive=inclusive,
135
+ num_digit=num_digit,
136
+ reason=reason,
137
+ default_key=default_key,
138
+ confidence_score=confidence_score,
139
+ **kwargs,
140
+ )
141
+
142
+ branch = Branch()
143
+ out_ = ""
144
+
145
+ if method == "llm":
146
+ out_ = await branch.chat(
147
+ _instruct,
148
+ tools=None,
149
+ context=context,
150
+ output_fields=_output_fields,
151
+ **_kwargs,
152
+ )
153
+
154
+ to_num_kwargs = {
155
+ "upper_bound": score_range[1],
156
+ "lower_bound": score_range[0],
157
+ "num_type": int if num_digit == 0 else float,
158
+ "precision": num_digit if num_digit != 0 else None,
159
+ }
160
+
161
+ return _handle_single_out(
162
+ out_, default_key, to_type="num", to_type_kwargs=to_num_kwargs
163
+ )
@@ -0,0 +1,144 @@
1
+ from lionagi.libs import StringMatch, func_call
2
+ from ..branch.branch import Branch
3
+ from .utils import _handle_single_out
4
+
5
+
6
+ async def select(
7
+ context,
8
+ choices,
9
+ *,
10
+ num_choices=1,
11
+ method="llm",
12
+ objective=None,
13
+ default_key="answer",
14
+ reason=False,
15
+ confidence_score=False,
16
+ retry_kwargs=None,
17
+ **kwargs,
18
+ ):
19
+ if retry_kwargs is None:
20
+ retry_kwargs = {}
21
+ return await _force_select(
22
+ context=context,
23
+ choices=choices,
24
+ num_choices=num_choices,
25
+ method=method,
26
+ objective=objective,
27
+ default_key=default_key,
28
+ reason=reason,
29
+ confidence_score=confidence_score,
30
+ retry_kwargs=retry_kwargs,
31
+ **kwargs,
32
+ )
33
+
34
+
35
+ async def _force_select(
36
+ context,
37
+ choices,
38
+ num_choices=1,
39
+ method="llm",
40
+ objective=None,
41
+ default_key="answer",
42
+ reason=False,
43
+ confidence_score=False,
44
+ retry_kwargs={},
45
+ **kwargs,
46
+ ):
47
+
48
+ async def _inner():
49
+ out_ = await _select(
50
+ context=context,
51
+ choices=choices,
52
+ num_choices=num_choices,
53
+ method=method,
54
+ objective=objective,
55
+ default_key=default_key,
56
+ reason=reason,
57
+ confidence_score=confidence_score,
58
+ retry_kwargs=retry_kwargs,
59
+ **kwargs,
60
+ )
61
+ if out_ is None:
62
+ raise ValueError("No output from the model")
63
+
64
+ if isinstance(out_, dict) and out_[default_key] not in choices:
65
+ v = StringMatch.choose_most_similar(out_.pop(default_key, ""), choices)
66
+ out_[default_key] = v
67
+
68
+ return out_
69
+
70
+ if "retries" not in retry_kwargs:
71
+ retry_kwargs["retries"] = 2
72
+
73
+ if "delay" not in retry_kwargs:
74
+ retry_kwargs["delay"] = 0.5
75
+
76
+ return await func_call.rcall(_inner, **retry_kwargs)
77
+
78
+
79
+ def _create_select_config(
80
+ choices,
81
+ num_choices=1,
82
+ objective=None,
83
+ default_key="answer",
84
+ reason=False,
85
+ confidence_score=False,
86
+ **kwargs,
87
+ ):
88
+
89
+ instruct = {"task": f"select {num_choices} from provided", "choices": choices}
90
+ if objective is not None:
91
+ instruct["objective"] = objective
92
+
93
+ extra_fields = kwargs.pop("output_fields", {})
94
+ output_fields = {default_key: "..."}
95
+ output_fields = {**output_fields, **extra_fields}
96
+
97
+ if reason:
98
+ output_fields["reason"] = "brief reason for the selection"
99
+
100
+ if confidence_score:
101
+ output_fields["confidence_score"] = (
102
+ "a numeric score between 0 to 1 formatted in num:0.2f"
103
+ )
104
+
105
+ if "temperature" not in kwargs:
106
+ kwargs["temperature"] = 0.1
107
+
108
+ return instruct, output_fields, kwargs
109
+
110
+
111
+ async def _select(
112
+ context,
113
+ choices,
114
+ num_choices=1,
115
+ method="llm",
116
+ objective=None,
117
+ default_key="answer",
118
+ reason=False,
119
+ confidence_score=False,
120
+ **kwargs,
121
+ ):
122
+
123
+ _instruct, _output_fields, _kwargs = _create_select_config(
124
+ choices=choices,
125
+ num_choices=num_choices,
126
+ objective=objective,
127
+ default_key=default_key,
128
+ reason=reason,
129
+ confidence_score=confidence_score,
130
+ **kwargs,
131
+ )
132
+
133
+ branch = Branch()
134
+ out_ = ""
135
+ if method == "llm":
136
+ out_ = await branch.chat(
137
+ _instruct,
138
+ tools=None,
139
+ context=context,
140
+ output_fields=_output_fields,
141
+ **_kwargs,
142
+ )
143
+
144
+ return _handle_single_out(out_, default_key)
@@ -0,0 +1,51 @@
1
+ from .select import select
2
+ from .score import score
3
+
4
+
5
+ async def sentiment(
6
+ context,
7
+ choices=None,
8
+ instruction=None,
9
+ score_range=(0, 1),
10
+ inclusive=True,
11
+ num_digit=2,
12
+ reason=False,
13
+ method="llm",
14
+ objective=None,
15
+ default_key="answer",
16
+ retries=2,
17
+ to_type="str",
18
+ **kwargs,
19
+ ):
20
+ if to_type == "str":
21
+ if choices is None:
22
+ choices = ["positive", "negative", "neutral"]
23
+
24
+ if objective is None:
25
+ objective = "classify sentiment"
26
+
27
+ return await select(
28
+ context=context,
29
+ choices=choices,
30
+ method=method,
31
+ objective=objective,
32
+ default_key=default_key,
33
+ retries=retries,
34
+ reason=reason,
35
+ out_str=True,
36
+ **kwargs,
37
+ )
38
+
39
+ elif to_type == "num":
40
+ return await score(
41
+ context=context,
42
+ instruction=instruction,
43
+ score_range=score_range,
44
+ inclusive=inclusive,
45
+ num_digit=num_digit,
46
+ reason=reason,
47
+ method=method,
48
+ default_key=default_key,
49
+ retries=retries,
50
+ **kwargs,
51
+ )