lionagi 0.0.315__py3-none-any.whl → 0.0.316__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -3,5 +3,8 @@ from .select import select
3
3
  from .score import score
4
4
  from .react import react
5
5
  from .vote import vote
6
+ from .plan import plan
7
+ from .cot import chain_of_thoughts, chain_of_react
6
8
 
7
- __all__ = ["predict", "select", "score", "vote", "react"]
9
+
10
+ __all__ = ["predict", "select", "score", "vote", "react", "plan", "chain_of_thoughts", "chain_of_react"]
@@ -1 +1,88 @@
1
- # TODO: chain of thoughts
1
+ from typing import Callable
2
+ from lionagi.libs import convert
3
+ from ..tool import func_to_tool
4
+ from ..schema import Tool
5
+ from .predict import predict
6
+ from .plan import plan
7
+ from .react import react
8
+
9
+ from .utils import _process_tools
10
+
11
+
12
+ async def chain_of_thoughts(
13
+ sentence=None,
14
+ branch=None,
15
+ instruction=None,
16
+ reason=False,
17
+ confidence_score=False,
18
+ num_steps=3,
19
+ directive_kwargs={},
20
+ return_branch=False,
21
+ **kwargs
22
+ ):
23
+
24
+ out_, outs, answer, reasons, confidence_score = "", [], "", [], 0
25
+ if branch is not None:
26
+ out_ = await plan(sentence, branch=branch, instruction=instruction, num_steps=num_steps, **kwargs)
27
+ else:
28
+ out_, branch = await plan(sentence, instruction=instruction, branch=branch, num_steps=num_steps, return_branch=True, **kwargs)
29
+
30
+ for i in range(len(out_.plan)):
31
+ _out = await predict(branch=branch, instruction=out_.plan[f"step_{i+1}"], reason=reason, confidence_score=confidence_score, **directive_kwargs)
32
+ answer += _out.answer
33
+ if reason:
34
+ reasons.append(_out.reason)
35
+ if confidence_score:
36
+ confidence_score += _out.confidence_score
37
+ outs.append(_out)
38
+
39
+ setattr(out_, "chain_output", outs)
40
+ setattr(out_, "chain_answer", answer)
41
+
42
+ if reason:
43
+ setattr(out_, "chain_reasons", reasons)
44
+ if confidence_score:
45
+ setattr(out_, "chain_confidence_score", confidence_score/len(outs))
46
+
47
+ if return_branch:
48
+ return out_, branch
49
+
50
+ return out_
51
+
52
+
53
+ async def chain_of_react(
54
+ sentence=None,
55
+ branch=None,
56
+ instruction=None,
57
+ num_steps=3,
58
+ tools=None,
59
+ directive_system=None,
60
+ directive_kwargs={},
61
+ return_branch=False,
62
+ **kwargs
63
+ ):
64
+ out_, outs, reasons, actions, action_responses = "", [], [], [], []
65
+ if branch is not None:
66
+ out_ = await plan(sentence, branch=branch, instruction=instruction, num_steps=num_steps, **kwargs)
67
+ else:
68
+ out_, branch = await plan(sentence, instruction=instruction, branch=branch, num_steps=num_steps, return_branch=True, **kwargs)
69
+
70
+ _process_tools(tools, branch)
71
+
72
+ for i in range(len(out_.plan)):
73
+ _out = await react(branch=branch, system=directive_system, instruction=out_.plan[f"step_{i+1}"], **directive_kwargs)
74
+ outs.append(_out)
75
+ reasons.append(_out.reason)
76
+ actions.append(_out.actions)
77
+ if _out.action_needed:
78
+ action_responses.append(_out.action_response)
79
+
80
+ setattr(out_, "chain_output", convert.to_list(outs))
81
+ setattr(out_, "chain_reason", convert.to_list(reasons))
82
+ setattr(out_, "chain_actions", convert.to_list(actions))
83
+ setattr(out_, "chain_action_response", convert.to_list(action_responses))
84
+
85
+ if return_branch:
86
+ return out_, branch
87
+
88
+ return out_
@@ -0,0 +1,162 @@
1
+ # plan.py
2
+
3
+ from lionagi.libs import func_call, ParseUtil
4
+ from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
5
+ from ..prompt.scored_template import ScoredTemplate
6
+ from ..branch import Branch
7
+
8
+
9
+ class PlanTemplate(ScoredTemplate):
10
+ template_name: str = "default_plan"
11
+ sentence: str | list | dict = Field(
12
+ default_factory=str,
13
+ description="the given sentence(s) or context to generate a plan for",
14
+ )
15
+ plan: dict | str= Field(
16
+ default_factory=dict, description="the generated step by step plan, return as a dictionary following {step_n: {plan: ..., reason: ...}} format")
17
+ signature: str = "sentence -> plan"
18
+
19
+ def __init__(
20
+ self,
21
+ sentence=None,
22
+ instruction=None,
23
+ confidence_score=False,
24
+ reason=False,
25
+ num_step=3,
26
+ **kwargs,
27
+ ):
28
+ super().__init__(**kwargs)
29
+
30
+ self.sentence = sentence
31
+ self.task = f"Generate a {num_step}_step plan based on the given context. Instruction: {instruction}."
32
+
33
+ if reason:
34
+ self.output_fields.append("reason")
35
+
36
+ if confidence_score:
37
+ self.output_fields.append("confidence_score")
38
+
39
+
40
+ async def _plan(
41
+ sentence,
42
+ *,
43
+ instruction=None,
44
+ branch=None,
45
+ confidence_score=False,
46
+ reason=False,
47
+ retries=2,
48
+ delay=0.5,
49
+ backoff_factor=2,
50
+ default_value=None,
51
+ timeout=None,
52
+ branch_name=None,
53
+ system=None,
54
+ messages=None,
55
+ service=None,
56
+ sender=None,
57
+ llmconfig=None,
58
+ tools=None,
59
+ datalogger=None,
60
+ persist_path=None,
61
+ tool_manager=None,
62
+ return_branch=False,
63
+ **kwargs,
64
+ ):
65
+ if "temperature" not in kwargs:
66
+ kwargs["temperature"] = 0.1
67
+
68
+ instruction = instruction or ""
69
+
70
+ branch = branch or Branch(
71
+ name=branch_name,
72
+ system=system,
73
+ messages=messages,
74
+ service=service,
75
+ sender=sender,
76
+ llmconfig=llmconfig,
77
+ tools=tools,
78
+ datalogger=datalogger,
79
+ persist_path=persist_path,
80
+ tool_manager=tool_manager,
81
+ )
82
+
83
+ _template = PlanTemplate(
84
+ sentence=sentence,
85
+ instruction=instruction,
86
+ confidence_score=confidence_score,
87
+ reason=reason,
88
+ )
89
+
90
+ await func_call.rcall(
91
+ branch.chat,
92
+ prompt_template=_template,
93
+ retries=retries,
94
+ delay=delay,
95
+ backoff_factor=backoff_factor,
96
+ default=default_value,
97
+ timeout=timeout,
98
+ **kwargs,
99
+ )
100
+
101
+ _template.plan = ParseUtil.fuzzy_parse_json(_template.plan)
102
+
103
+ return (_template, branch) if return_branch else _template
104
+
105
+
106
+ async def plan(
107
+ sentence,
108
+ *,
109
+ instruction=None,
110
+ num_instances=1,
111
+ branch=None,
112
+ confidence_score=False,
113
+ reason=False,
114
+ retries=2,
115
+ delay=0.5,
116
+ backoff_factor=2,
117
+ default_value=None,
118
+ timeout=None,
119
+ branch_name=None,
120
+ system=None,
121
+ messages=None,
122
+ service=None,
123
+ sender=None,
124
+ llmconfig=None,
125
+ tools=None,
126
+ datalogger=None,
127
+ persist_path=None,
128
+ tool_manager=None,
129
+ return_branch=False,
130
+ **kwargs,
131
+ ):
132
+ async def _inner(i=0):
133
+ return await _plan(
134
+ sentence=sentence,
135
+ instruction=instruction,
136
+ branch=branch,
137
+ confidence_score=confidence_score,
138
+ reason=reason,
139
+ retries=retries,
140
+ delay=delay,
141
+ backoff_factor=backoff_factor,
142
+ default_value=default_value,
143
+ timeout=timeout,
144
+ branch_name=branch_name,
145
+ system=system,
146
+ messages=messages,
147
+ service=service,
148
+ sender=sender,
149
+ llmconfig=llmconfig,
150
+ tools=tools,
151
+ datalogger=datalogger,
152
+ persist_path=persist_path,
153
+ tool_manager=tool_manager,
154
+ return_branch=return_branch,
155
+ **kwargs,
156
+ )
157
+
158
+ if num_instances == 1:
159
+ return await _inner()
160
+
161
+ elif num_instances > 1:
162
+ return await func_call.alcall(range(num_instances), _inner)
@@ -43,14 +43,15 @@ class PredictTemplate(ScoredTemplate):
43
43
  default_factory=int, description="the number of sentences to predict"
44
44
  )
45
45
  answer: str | list = Field(
46
- default_factory=str, description="the predicted sentence(s)"
46
+ default_factory=str, description="the predicted sentence(s) or desired output"
47
47
  )
48
48
  signature: str = "sentence -> answer"
49
49
 
50
50
  def __init__(
51
51
  self,
52
52
  sentence=None,
53
- num_sentences=None,
53
+ instruction=None,
54
+ num_sentences=1,
54
55
  confidence_score=False,
55
56
  reason=False,
56
57
  **kwargs,
@@ -67,9 +68,9 @@ class PredictTemplate(ScoredTemplate):
67
68
  """
68
69
  super().__init__(**kwargs)
69
70
 
70
- self.sentence = sentence
71
+ self.sentence = sentence or ''
71
72
  self.num_sentences = num_sentences
72
- self.task = f"predict the next {self.num_sentences} sentence(s)"
73
+ self.task = f"follow instruction to predict the next {self.num_sentences} sentence(s). Instruction: {instruction}."
73
74
 
74
75
  if reason:
75
76
  self.output_fields.append("reason")
@@ -82,6 +83,8 @@ async def predict(
82
83
  sentence=None,
83
84
  num_sentences=1,
84
85
  confidence_score=False,
86
+ instruction=None,
87
+ branch=None,
85
88
  reason=False,
86
89
  retries=2,
87
90
  delay=0.5,
@@ -128,7 +131,7 @@ async def predict(
128
131
  Returns:
129
132
  PredictTemplate: The predict template with the predicted sentence(s).
130
133
  """
131
- branch = Branch(
134
+ branch = branch or Branch(
132
135
  name=branch_name,
133
136
  system=system,
134
137
  messages=messages,
@@ -142,6 +145,7 @@ async def predict(
142
145
  )
143
146
 
144
147
  predict_template = PredictTemplate(
148
+ instruction=instruction,
145
149
  sentence=sentence,
146
150
  num_sentences=num_sentences,
147
151
  confidence_score=confidence_score,
@@ -3,11 +3,12 @@ from lionagi.libs import func_call, convert, AsyncUtil
3
3
  from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
4
4
  from ..prompt.action_template import ActionedTemplate
5
5
  from ..branch import Branch
6
+ from .utils import _process_tools
6
7
 
7
8
 
8
9
  class ReactTemplate(ActionedTemplate):
9
10
  template_name: str = "default_react"
10
- sentence: str | list | dict = Field(
11
+ sentence: str | list | dict | None= Field(
11
12
  default_factory=str,
12
13
  description="the given sentence(s) to reason and take actions on",
13
14
  )
@@ -29,7 +30,7 @@ class ReactTemplate(ActionedTemplate):
29
30
 
30
31
 
31
32
  async def _react(
32
- sentence,
33
+ sentence=None,
33
34
  *,
34
35
  instruction=None,
35
36
  branch=None,
@@ -58,6 +59,9 @@ async def _react(
58
59
 
59
60
  instruction = instruction or ""
60
61
 
62
+ if branch and tools:
63
+ _process_tools(tools, branch)
64
+
61
65
  branch = branch or Branch(
62
66
  name=branch_name,
63
67
  system=system,
@@ -109,7 +113,7 @@ async def _react(
109
113
 
110
114
 
111
115
  async def react(
112
- sentence,
116
+ sentence=None,
113
117
  *,
114
118
  instruction=None,
115
119
  num_instances=1,
@@ -1,3 +1,6 @@
1
+ from typing import Callable
2
+ from ..tool import func_to_tool
3
+ from ..schema import Tool
1
4
  # import contextlib
2
5
  # from lionagi.libs import ParseUtil, StringMatch, convert, func_call
3
6
 
@@ -85,3 +88,20 @@
85
88
  # return _out
86
89
 
87
90
  # return out_ if len(out_) > 1 else out_[0]
91
+
92
+
93
+ def _process_tools(tool_obj, branch):
94
+ if isinstance(tool_obj, Callable):
95
+ _process_tool(tool_obj, branch)
96
+ else:
97
+ for i in tool_obj:
98
+ _process_tool(i, branch)
99
+
100
+
101
+ def _process_tool(tool_obj, branch):
102
+ if isinstance(tool_obj, Tool) and tool_obj.schema_["function"]["name"] not in branch.tool_manager.registry:
103
+ branch.register_tools(tool_obj)
104
+ if isinstance(tool_obj, Callable):
105
+ tool = func_to_tool(tool_obj)[0]
106
+ if tool.schema_["function"]["name"] not in branch.tool_manager.registry:
107
+ branch.register_tools(tool)
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.0.315"
1
+ __version__ = "0.0.316"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.0.315
3
+ Version: 0.0.316
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: Haiyang Li <ocean@lionagi.ai>
@@ -1,5 +1,5 @@
1
1
  lionagi/__init__.py,sha256=i6Ci7FebU2s4EVVnBFj1Dsi5RvP80JqeSqW-iripRPg,418
2
- lionagi/version.py,sha256=Zazlk4sxt5cxFTrUeqVNVrVkGcIAkFTm-b9a6VLDqkw,24
2
+ lionagi/version.py,sha256=Qy3TjUfD4-wZt6o0kQq4iMofgfH5V1ldDvANr5qAYN8,24
3
3
  lionagi/core/__init__.py,sha256=M5YXmJJiLcR5QB1VRmYvec14cHT6pKvxZOEs737BmP8,322
4
4
  lionagi/core/agent/__init__.py,sha256=IVcw9yn_QMBJGBou1Atck98Us9uwPGFs-gERTv0RWew,59
5
5
  lionagi/core/agent/base_agent.py,sha256=CRUpl7Zc5d2H9uCa17nMiFAnhKM_UH5Ujo1NHo3JAxg,3371
@@ -10,15 +10,15 @@ lionagi/core/branch/branch_flow_mixin.py,sha256=yXEfpxTaJ1aoDQQnCBYx5wShn9zt1ki8
10
10
  lionagi/core/branch/executable_branch.py,sha256=Yi0t4fDNMa5UaHo15sX-zBchr5auvXOtSc0RnSpG2a8,12151
11
11
  lionagi/core/branch/util.py,sha256=os7Qp7HpDfyyCvdkbBTyIQ3AYHfzUP0M684W4XMDHN4,11813
12
12
  lionagi/core/branch/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- lionagi/core/direct/__init__.py,sha256=P17UfY3tLFgu0ncxMy4FRoVDlvOGUc7jzeowN41akBk,188
14
- lionagi/core/direct/cot.py,sha256=3hz0CjFN2Bw5IW1tOh26fzd1UVrV_41KKIS7pzCd6ok,26
15
- lionagi/core/direct/plan.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- lionagi/core/direct/predict.py,sha256=tkxvN9m_XOf3SW8xTi5yanXylV8xVNRn9a8DeGd9xgs,6355
17
- lionagi/core/direct/react.py,sha256=IJ6sKgajCjhQ_UpJHf-j71tnVehEtIXFnyeB6bNlZwk,4196
13
+ lionagi/core/direct/__init__.py,sha256=nnA1v-pXDiypH9TvnhKW2igwHTYvnNFvu9l5PMupZwk,310
14
+ lionagi/core/direct/cot.py,sha256=gfitm365i8m7Ov2YXWJyF0HbAceloLfUqEaj7Wkw45k,2819
15
+ lionagi/core/direct/plan.py,sha256=1qK19Q63pGMcaByxNoxjQifg3WtpuyEtzcU8ptoZKfU,4022
16
+ lionagi/core/direct/predict.py,sha256=xjO0o2OsibL_FEHg8aC70gUanqp514AJ6L0yQwQIwLs,6535
17
+ lionagi/core/direct/react.py,sha256=rd2iXn6_VF5WBLOruQk9saUnafpo3qgu2Kc3pFvSpjU,4318
18
18
  lionagi/core/direct/score.py,sha256=QHO11WtAUfMEdfa1K-SRyn5uqf6_N0UmyCbEJsiqcQw,10328
19
19
  lionagi/core/direct/select.py,sha256=pPwesq29C3JZ5J3piwjBHqjOCsEM4uChPKMGBRxtSTE,6127
20
20
  lionagi/core/direct/sentiment.py,sha256=rNwBs-I2XICOwsXxFvfM1Tlc_afsVcRCNCXCxfxm_2k,27
21
- lionagi/core/direct/utils.py,sha256=yqu4qv9aaU4qzUD9QovtN2m21QySzdMLmcBp5recWC0,2333
21
+ lionagi/core/direct/utils.py,sha256=Jszxx7QFIgC97YOwEhDStZC1g_9Zj-RhAxWaRqfeeJ8,3003
22
22
  lionagi/core/direct/vote.py,sha256=tjs-EYDGlGB3J6d_nSl1oIuJYHtxncjustBbU_pXDqQ,2449
23
23
  lionagi/core/flow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  lionagi/core/flow/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -114,8 +114,8 @@ lionagi/tests/test_libs/test_func_call.py,sha256=xvs19YBNxqh3RbWLjQXY19L06b1_uZY
114
114
  lionagi/tests/test_libs/test_nested.py,sha256=eEcE4BXJEkjoPZsd9-0rUxOJHjmu8W2hgVClUTwXEFY,13106
115
115
  lionagi/tests/test_libs/test_parse.py,sha256=aa74kfOoJwDU7L7-59EcgBGYc5-OtafPIP2oGTI3Zrk,6814
116
116
  lionagi/tests/test_libs/test_sys_util.py,sha256=Y-9jxLGxgbFNp78Z0PJyGUjRROMuRAG3Vo3i5LAH8Hs,7849
117
- lionagi-0.0.315.dist-info/LICENSE,sha256=vfczrx-xFNkybZ7Ef-lGUnA1Vorky6wL4kwb1Fd5o3I,1089
118
- lionagi-0.0.315.dist-info/METADATA,sha256=FVnSivifINUlYoYjEh7s01WKZ3h1Hn1AW_uKW3KfdLg,7934
119
- lionagi-0.0.315.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
120
- lionagi-0.0.315.dist-info/top_level.txt,sha256=szvch_d2jE1Lu9ZIKsl26Ll6BGfYfbOgt5lm-UpFSo4,8
121
- lionagi-0.0.315.dist-info/RECORD,,
117
+ lionagi-0.0.316.dist-info/LICENSE,sha256=vfczrx-xFNkybZ7Ef-lGUnA1Vorky6wL4kwb1Fd5o3I,1089
118
+ lionagi-0.0.316.dist-info/METADATA,sha256=StvgG7rD-1_VdC1-ftaT_DKrO4VZP8QvRl29xPnnIAM,7934
119
+ lionagi-0.0.316.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
120
+ lionagi-0.0.316.dist-info/top_level.txt,sha256=szvch_d2jE1Lu9ZIKsl26Ll6BGfYfbOgt5lm-UpFSo4,8
121
+ lionagi-0.0.316.dist-info/RECORD,,